-
Notifications
You must be signed in to change notification settings - Fork 199
/
Copy path92b1541a-a6d6-4ddf-8932-ea4bcd31ba3b.txt
2165 lines (2092 loc) · 134 KB
/
92b1541a-a6d6-4ddf-8932-ea4bcd31ba3b.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
import sys
with open(sys.argv[0]) as f:
code = f.read() # read the code of this file ASAP, for logging
import uuid
import glob
import time
import contextlib
from dataclasses import dataclass
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed as dist
import torch._inductor.config as config
from torch.nn.parallel import DistributedDataParallel as DDP
# Use of FlexAttention contributed by @KoszarskyB
from torch.nn.attention.flex_attention import flex_attention, create_block_mask
flex_attention = torch.compile(flex_attention, dynamic=False)
create_block_mask = torch.compile(create_block_mask, dynamic=False)
# -----------------------------------------------------------------------------
# Muon optimizer
def zeropower_via_svd(G, steps=None):
U, S, V = G.svd()
return U @ V.T
@torch.compile
def zeropower_via_newtonschulz5(G, steps=10, eps=1e-7):
"""
Newton-Schulz iteration to compute the zeroth power / orthogonalization of G. We opt to use a
quintic iteration whose coefficients are selected to maximize the slope at zero. For the purpose
of minimizing steps, it turns out to be empirically effective to keep increasing the slope at
zero even beyond the point where the iteration no longer converges all the way to one everywhere
on the interval. This iteration therefore does not produce UV^T but rather something like US'V^T
where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model
performance at all relative to UV^T, where USV^T = G is the SVD.
"""
assert len(G.shape) == 2
a, b, c = (3.4445, -4.7750, 2.0315)
X = G.bfloat16()
X /= (X.norm() + eps) # ensure top singular value <= 1
if G.size(0) > G.size(1):
X = X.T
for _ in range(steps):
A = X @ X.T
B = b * A + c * A @ A # adapted from suggestion by @jxbz, @leloykun, and @YouJiacheng
X = a * X + B @ X
if G.size(0) > G.size(1):
X = X.T
return X
zeropower_backends = dict(svd=zeropower_via_svd, newtonschulz5=zeropower_via_newtonschulz5)
class Muon(torch.optim.Optimizer):
"""
Muon - MomentUm Orthogonalized by Newton-schulz
Muon internally runs standard SGD-momentum, and then performs an orthogonalization post-
processing step, in which each 2D parameter's update is replaced with the nearest orthogonal
matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has
the advantage that it can be stably run in bfloat16 on the GPU.
Some warnings:
- This optimizer assumes that all parameters passed in are 2D.
- It should not be used for the embedding layer, the final fully connected layer, or any {0,1}-D
parameters; those should all be optimized by a standard method (e.g., AdamW).
- To use it with 4D convolutional filters, it works well to just flatten their last 3 dimensions.
- We believe it is unlikely to work well for training with small batch size.
- We believe it may not work well for finetuning pretrained models, but we haven't tested this.
- We have not yet tried this optimizer for training scenarios larger than NanoGPT (124M).
Arguments:
lr: The learning rate used by the internal SGD.
momentum: The momentum used by the internal SGD.
nesterov: Whether to use Nesterov-style momentum in the internal SGD. (recommended)
backend: The chosen backend for the orthogonalization step. (recommended: 'newtonschulz5')
backend_steps: The number of iteration steps to use in the backend, if it is iterative.
"""
def __init__(self, params, lr=0.02, momentum=0.95, nesterov=True,
backend='newtonschulz5', backend_steps=5):
defaults = dict(lr=lr, momentum=momentum, nesterov=nesterov, backend=backend, backend_steps=backend_steps)
super().__init__(params, defaults)
def step(self):
for group in self.param_groups:
lr = group['lr']
momentum = group['momentum']
zeropower_backend = zeropower_backends[group['backend']]
# generate weight updates in distributed fashion
total_params = sum(p.numel() for p in group['params'])
updates_flat = torch.zeros(total_params, device='cuda', dtype=torch.bfloat16)
curr_idx = 0
for i, p in enumerate(group['params']):
# luckily this will perfectly distribute a transformer with multiple of 4 layers to 8 GPUs
if i % int(os.environ['WORLD_SIZE']) == int(os.environ['RANK']):
g = p.grad
assert g is not None
state = self.state[p]
if 'momentum_buffer' not in state:
state['momentum_buffer'] = torch.zeros_like(g)
buf = state['momentum_buffer']
buf.mul_(momentum).add_(g)
g = g.add(buf, alpha=momentum) if group['nesterov'] else buf
g = zeropower_backend(g, steps=group['backend_steps'])
g *= max(1, g.size(0)/g.size(1))**0.5
updates_flat[curr_idx:curr_idx+p.numel()] = g.flatten()
curr_idx += p.numel()
# sync updates across devices. we are not memory-constrained so can do this simple deserialization
dist.all_reduce(updates_flat, op=dist.ReduceOp.SUM)
# deserialize and apply updates
curr_idx = 0
for p in group['params']:
g = updates_flat[curr_idx:curr_idx+p.numel()].view_as(p.data).type_as(p.data)
p.data.add_(g, alpha=-lr)
curr_idx += p.numel()
# -----------------------------------------------------------------------------
# PyTorch nn.Module definitions for the GPT-2 model
def norm(x):
return F.rms_norm(x, (x.size(-1),))
class CastedLinear(nn.Linear):
def __init__(self, in_features, out_features):
super().__init__(in_features, out_features, bias=False)
def forward(self, x):
return F.linear(x, self.weight.to(x.dtype))
class Rotary(torch.nn.Module):
def __init__(self, dim, base=10000):
super().__init__()
self.register_buffer('inv_freq', (1 / base) ** (torch.arange(0, dim, 2) / dim))
self.seq_len_cached = None
self.cos_cached = None
self.sin_cached = None
def forward(self, x):
seq_len = x.shape[1]
if seq_len != self.seq_len_cached:
t = torch.arange(seq_len, device=x.device)
freqs = torch.outer(t, self.inv_freq)
self.seq_len_cached = seq_len
self.cos_cached = freqs.cos()
self.sin_cached = freqs.sin()
cos, sin = self.cos_cached[None, :, None, :], self.sin_cached[None, :, None, :]
# apply_rotary_emb(x, cos, sin)
x1, x2 = x.chunk(2, dim=3)
y1 = x1 * cos + x2 * sin
y2 = x1 * (-sin) + x2 * cos
return torch.cat((y1, y2), 3).type_as(x)
class CausalSelfAttention(nn.Module):
def __init__(self, dim, n_head):
super().__init__()
assert dim % n_head == 0
self.n_head = n_head
self.c_q = CastedLinear(dim, dim)
self.c_k = CastedLinear(dim, dim)
self.c_v = CastedLinear(dim, dim)
# value residual lambda
self.lamb = nn.Parameter(torch.tensor(0.5)) # @Grad62304977
# rotary embeddings
self.rotary = Rotary(dim // n_head) # dim // n_head = head_dim
# output projection
self.c_proj = CastedLinear(dim, dim)
self.c_proj.weight.data.zero_() # zero init suggested by @Grad62304977
def forward(self, x, vi, block_mask):
B, T = x.size(0), x.size(1) # batch size, sequence length
assert B == 1, "Must use batch size = 1 for FlexAttention"
q = self.c_q(x).view(B, T, self.n_head, -1)
k = self.c_k(x).view(B, T, self.n_head, -1)
v = self.c_v(x).view(B, T, self.n_head, -1)
v = (1 - self.lamb) * v + self.lamb * vi.view_as(v) # @Grad62304977
q, k = norm(q), norm(k) # QK norm suggested by @Grad62304977
q, k = self.rotary(q), self.rotary(k)
y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask)
y = y.transpose(1, 2).contiguous().view_as(x) # re-assemble all head outputs side by side
y = self.c_proj(y)
return y
class MLP(nn.Module):
def __init__(self, dim):
super().__init__()
self.c_fc = CastedLinear(dim, 4 * dim)
self.c_proj = CastedLinear(4 * dim, dim)
self.c_proj.weight.data.zero_() # zero init suggested by @Grad62304977
def forward(self, x):
x = self.c_fc(x)
x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977
x = self.c_proj(x)
return x
class Block(nn.Module):
def __init__(self, config):
super().__init__()
self.attn = CausalSelfAttention(config.n_embd, config.n_head)
self.mlp = MLP(config.n_embd)
self.lambdas = nn.Parameter(torch.tensor([1., 0.]))
def forward(self, x, vi, x0, block_mask):
x = self.lambdas[0] * x + self.lambdas[1] * x0
x = x + self.attn(norm(x), vi, block_mask)
x = x + self.mlp(norm(x))
return x
# -----------------------------------------------------------------------------
# The main GPT-2 model
@dataclass
class GPTConfig:
vocab_size : int = 50304
n_layer : int = 12
n_head : int = 6 # head dim 128 suggested by @Grad62304977
n_embd : int = 768
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
# U-net design by @brendanh0gan
self.num_encoder_layers = config.n_layer // 2 # Half of the layers for encoder
self.num_decoder_layers = config.n_layer - self.num_encoder_layers # Remaining for decoder
# Add learnable skip connection weights for decoder layers
self.skip_weights = nn.Parameter(torch.ones(self.num_decoder_layers))
self.transformer = nn.ModuleDict(dict(
wte = nn.Embedding(config.vocab_size, config.n_embd),
# token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual learning
vte = nn.Embedding(config.vocab_size, config.n_embd*12),
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
))
self.lm_head = CastedLinear(config.n_embd, config.vocab_size)
self.lm_head.weight.data.zero_() # @Grad62304977
def forward(self, idx, target, attn_blocksize):
docs = (idx == 50256).cumsum(0)
def document_causal_mask(b, h, q_idx, kv_idx):
causal_mask = q_idx >= kv_idx
document_mask = docs[q_idx] == docs[kv_idx]
window_mask = q_idx - kv_idx < attn_blocksize
return causal_mask & document_mask & window_mask
S = len(idx)
block_mask = create_block_mask(document_causal_mask, None, None, S, S, device="cuda", _compile=True)
# forward the GPT model itself
x = self.transformer.wte(idx[None]) # token embeddings of shape (b, t, n_embd)
x = norm(x) # @Grad62304977
x0 = x
vi = self.transformer.vte(idx[None]).chunk(12, dim=-1)
# Store outputs for U-Net skip connections
skip_connections = []
# Encoder pass - process only the first half of the blocks
for i in range(self.num_encoder_layers):
x = self.transformer.h[i](x, vi[i], x0, block_mask)
skip_connections.append(x)
# Decoder pass - process the remaining blocks with weighted skip connections
for i in range(self.num_decoder_layers):
x = x + self.skip_weights[i] * skip_connections.pop()
x = self.transformer.h[self.num_encoder_layers + i](x, vi[self.num_encoder_layers+i], x0, block_mask)
x = norm(x)
logits = self.lm_head(x)
logits = 30 * torch.tanh(logits / 30) # @Grad62304977
logits = logits.float()
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target.view(-1))
return loss
# -----------------------------------------------------------------------------
# Our own simple Distributed Data Loader
def _peek_data_shard(filename):
# only reads the header, returns header data
with open(filename, "rb") as f:
# first read the header, which is 256 int32 integers (4 bytes each)
header = np.frombuffer(f.read(256*4), dtype=np.int32)
if header[0] != 20240520:
print("ERROR: magic number mismatch in the data .bin file!")
print("---> HINT: Are you passing in a correct file with --input_bin?")
print("---> HINT: Dataset encoding changed recently, re-run data prepro or refer again to README")
print("---> HINT: For example re-run: `python dev/data/tinyshakespeare.py`, then re-try")
exit(1)
assert header[1] == 1, "unsupported version"
ntok = header[2] # number of tokens (claimed)
return ntok # for now just return the number of tokens
def _load_data_shard(filename):
with open(filename, "rb") as f:
# first read the header, which is 256 int32 integers (4 bytes each)
header = np.frombuffer(f.read(256*4), dtype=np.int32)
assert header[0] == 20240520, "magic number mismatch in the data .bin file"
assert header[1] == 1, "unsupported version"
ntok = header[2] # number of tokens (claimed)
# the rest of it are tokens, stored as uint16
tokens = np.frombuffer(f.read(), dtype=np.uint16)
assert len(tokens) == ntok, "number of tokens read does not match header?"
return tokens
class DistributedDataLoader:
def __init__(self, filename_pattern, T, process_rank, num_processes):
self.process_rank = process_rank
self.num_processes = num_processes
self.T = T
# glob files that match the pattern
self.files = sorted(glob.glob(filename_pattern))
assert len(self.files) > 0, f"did not find any files that match the pattern {filename_pattern}"
# load and validate all data shards, count number of tokens in total
ntok_total = 0
for fname in self.files:
shard_ntok = _peek_data_shard(fname)
assert shard_ntok >= num_processes * T + 1
ntok_total += int(shard_ntok)
self.ntok_total = ntok_total
self.reset()
def reset(self):
self.current_shard = -1
self.advance()
def advance(self): # advance to next data shard
self.current_shard = (self.current_shard + 1) % len(self.files)
self.current_position = self.process_rank * self.T
self.tokens = _load_data_shard(self.files[self.current_shard])
def next_batch(self):
batch_size = self.T * self.num_processes
buf = self.tokens[self.current_position:self.current_position+self.T+1]
buf = torch.tensor(buf.astype(np.int32), dtype=torch.long)
x = buf[:-1] # inputs
y = buf[1:] # targets
# advance current position and load next shard if necessary
self.current_position += batch_size
if self.current_position + batch_size >= len(self.tokens):
self.advance()
return x.cuda(), y.cuda()
# -----------------------------------------------------------------------------
# int main
@dataclass
class Hyperparameters:
# data hyperparams
input_bin : str = 'data/fineweb10B/fineweb_train_*.bin' # input .bin to train on
input_val_bin : str = 'data/fineweb10B/fineweb_val_*.bin' # input .bin to eval validation loss on
# optimization hyperparams
batch_size : int = 8 # batch size, in sequences, across all devices
sequence_length : int = 64*1024 # sequence length, in tokens
num_iterations : int = 1530 # number of iterations to run
warmup_iters : int = 0
cooldown_iters : int = 600 # number of iterations of linear warmup/cooldown for triangular or trapezoidal schedule
weight_decay : float = 0
# evaluation and logging hyperparams
val_loss_every : int = 125 # every how many steps to evaluate val loss? 0 for only at the end
val_tokens : int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons
save_every : int = 0 # every how many steps to save the checkpoint? 0 for only at the end
args = Hyperparameters()
# set up DDP (distributed data parallel). torchrun sets this env variable
assert torch.cuda.is_available()
dist.init_process_group(backend='nccl')
ddp_rank = int(os.environ['RANK'])
ddp_local_rank = int(os.environ['LOCAL_RANK'])
ddp_world_size = int(os.environ['WORLD_SIZE'])
device = f'cuda:{ddp_local_rank}'
torch.cuda.set_device(device)
print(f"using device: {device}")
master_process = (ddp_rank == 0) # this process will do logging, checkpointing etc.
# begin logging
logfile = None
if master_process:
run_id = str(uuid.uuid4())
logdir = 'logs/%s/' % run_id
os.makedirs(logdir, exist_ok=True)
logfile = 'logs/%s.txt' % run_id
# create the log file
with open(logfile, "w") as f:
# begin the log by printing this file (the Python code)
f.write(code)
f.write('='*100 + '\n')
def print0(s, logonly=False):
if master_process:
with open(logfile, "a") as f:
if not logonly:
print(s)
f.write(s+'\n')
# log information about the hardware/software environment this is running on
# and print the full `nvidia-smi` to file
print0(f"Running pytorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}\nnvidia-smi:")
import subprocess
result = subprocess.run(['nvidia-smi'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
print0(f'{result.stdout}', logonly=True)
print0('='*100, logonly=True)
# convenience variables
T = args.sequence_length
# calculate the number of steps to take in the val loop.
assert args.val_tokens % (T * ddp_world_size) == 0
val_steps = args.val_tokens // (T * ddp_world_size)
# calculate the steps of gradient accumulation required to attain the desired global batch size.
assert args.batch_size % (ddp_world_size) == 0
train_accumulation_steps = args.batch_size // ddp_world_size
# load tokens
train_loader = DistributedDataLoader(args.input_bin, T, ddp_rank, ddp_world_size)
val_loader = DistributedDataLoader(args.input_val_bin, T, ddp_rank, ddp_world_size)
print0(f"Training DataLoader: total number of tokens: {train_loader.ntok_total} across {len(train_loader.files)} files")
print0(f"Validation DataLoader: total number of tokens: {val_loader.ntok_total} across {len(val_loader.files)} files")
print0('='*100, logonly=True)
x, y = train_loader.next_batch()
# there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. suggested to me by @Grad62304977.
# this originates from Karpathy's experiments.
num_vocab = 50304
model = GPT(GPTConfig(vocab_size=num_vocab, n_layer=12, n_head=6, n_embd=768))
model = model.cuda().bfloat16()
for m in model.modules():
if isinstance(m, CastedLinear):
m.float()
if hasattr(config, "coordinate_descent_tuning"):
config.coordinate_descent_tuning = True # suggested by @Chillee
model = torch.compile(model)
# here we wrap model into DDP container
model = DDP(model, device_ids=[ddp_local_rank])
raw_model = model.module # always contains the "raw" unwrapped model
# init the optimizer(s)
optimizer1 = torch.optim.Adam([raw_model.transformer.wte.weight, raw_model.transformer.vte.weight], lr=0.6, betas=(0.8, 0.95), fused=True)
optimizer2 = torch.optim.Adam([raw_model.lm_head.weight], lr=0.008, betas=(0.8, 0.95), fused=True)
params = list(raw_model.transformer.h.parameters())
matrix_params = [p for p in params if p.ndim == 2]
scalar_params = [p for p in params if p.ndim < 2] + [raw_model.skip_weights]
optimizer3 = Muon(matrix_params, lr=0.05, momentum=0.95)
optimizer4 = torch.optim.Adam(scalar_params, lr=0.04, betas=(0.8, 0.95), fused=True) # note that this learning rate is neither sensitive nor tuned
optimizers = [optimizer1, optimizer2, optimizer3, optimizer4]
# learning rate decay scheduler (linear warmup and cooldown)
def get_lr(it):
assert it <= args.num_iterations
# 1) linear warmup for warmup_iters steps
if it < args.warmup_iters:
return (it+1) / args.warmup_iters
# 2) constant lr for a while
elif it < args.num_iterations - args.cooldown_iters:
return 1.0
# 3) linear cooldown
else:
decay_ratio = (args.num_iterations - it) / args.cooldown_iters
return decay_ratio
schedulers = [torch.optim.lr_scheduler.LambdaLR(opt, get_lr) for opt in optimizers]
# Start training loop
training_time_ms = 0
# start the clock
torch.cuda.synchronize()
t0 = time.time()
# begin training
for step in range(args.num_iterations + 1):
last_step = (step == args.num_iterations)
# This effectively ignores timing first 10 steps, which are slower for weird reasons.
# Alternately, and slightly more correctly in terms of benchmarking, we could do 10
# steps with dummy data first, and then re-initialize the model and reset the loader.
if step == 10:
training_time_ms = 0
t0 = time.time()
timed_steps = float('nan') if step <= 11 else (step - 10) + 1 # <= 11 to avoid bug in val
# Set the attention blocksize for the current step, in chunks of 64. By @fernbear.bsky.social
attn_blocksize = torch.tensor(64*((step/args.num_iterations * (1792 - 64) + 64)//64), dtype=torch.int, device='cuda')
# once in a while evaluate the validation dataset
if (last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0)):
# stop the clock
torch.cuda.synchronize()
training_time_ms += 1000 * (time.time() - t0)
# run validation batches
model.eval()
val_loader.reset()
val_loss = 0.0
for _ in range(val_steps):
with torch.no_grad():
x_val, y_val = val_loader.next_batch()
val_loss += model(x_val, y_val, attn_blocksize=attn_blocksize)
dist.all_reduce(val_loss, op=dist.ReduceOp.AVG)
val_loss /= val_steps
# log val loss to console and to logfile
print0(f'step:{step}/{args.num_iterations} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/(timed_steps-1):.2f}ms')
# start the clock again
torch.cuda.synchronize()
t0 = time.time()
if master_process and (last_step or (args.save_every > 0 and step % args.save_every == 0)):
# stop the clock
torch.cuda.synchronize()
training_time_ms += 1000 * (time.time() - t0)
# save the state of the training process
log = dict(step=step, code=code, model=raw_model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers])
torch.save(log, 'logs/%s/state_step%06d.pt' % (run_id, step))
# start the clock again
torch.cuda.synchronize()
t0 = time.time()
# bit confusing: we want to make sure to eval on 0th iteration
# but also after the very last iteration. so we loop for step <= num_iterations
# instead of just < num_iterations (one extra due to <=), only to do
# the validation/sampling one last time, and then we break right here as we're done.
if last_step:
break
# --------------- TRAINING SECTION BEGIN -----------------
model.train()
for i in range(1, train_accumulation_steps+1):
ctx = model.no_sync() if i < train_accumulation_steps else contextlib.nullcontext()
with ctx: # there's no need to sync gradients every accumulation step
# forward pass
loss = model(x, y, attn_blocksize=attn_blocksize)
# advance the dataset for the next batch
x, y = train_loader.next_batch()
# backward pass
loss.backward()
train_loss = loss.detach()
for p in model.parameters():
p.grad /= train_accumulation_steps
# momentum warmup for Muon
frac = min(step/300, 1)
optimizer3.param_groups[0]['momentum'] = (1 - frac) * 0.85 + frac * 0.95
# step the optimizers and schedulers
for opt, sched in zip(optimizers, schedulers):
opt.step()
sched.step()
# null the gradients
model.zero_grad(set_to_none=True)
# --------------- TRAINING SECTION END -------------------
# everything that follows now is just diagnostics, prints, logging, etc.
#dist.all_reduce(train_loss, op=dist.ReduceOp.AVG) # all-reducing the training loss would be more correct in terms of logging, but slower
approx_time = training_time_ms + 1000 * (time.time() - t0)
print0(f"step:{step+1}/{args.num_iterations} train_loss:{train_loss.item():.4f} train_time:{approx_time:.0f}ms step_avg:{approx_time/timed_steps:.2f}ms")
if master_process:
print(f"peak memory consumption: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB")
# -------------------------------------------------------------------------
# clean up nice
dist.destroy_process_group()
====================================================================================================
Running pytorch 2.6.0.dev20241203+cu124 compiled for CUDA 12.4
nvidia-smi:
Thu Dec 5 02:44:46 2024
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA H100 80GB HBM3 On | 00000000:19:00.0 Off | 0 |
| N/A 38C P0 75W / 700W | 3MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA H100 80GB HBM3 On | 00000000:3B:00.0 Off | 0 |
| N/A 30C P0 99W / 700W | 22MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 2 NVIDIA H100 80GB HBM3 On | 00000000:4C:00.0 Off | 0 |
| N/A 30C P0 118W / 700W | 529MiB / 81559MiB | 1% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 3 NVIDIA H100 80GB HBM3 On | 00000000:5D:00.0 Off | 0 |
| N/A 37C P0 118W / 700W | 529MiB / 81559MiB | 1% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 4 NVIDIA H100 80GB HBM3 On | 00000000:9B:00.0 Off | 0 |
| N/A 38C P0 111W / 700W | 23MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 5 NVIDIA H100 80GB HBM3 On | 00000000:BB:00.0 Off | 0 |
| N/A 29C P0 110W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 6 NVIDIA H100 80GB HBM3 On | 00000000:CB:00.0 Off | 0 |
| N/A 38C P0 127W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 7 NVIDIA H100 80GB HBM3 On | 00000000:DB:00.0 Off | 0 |
| N/A 29C P0 118W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
+---------------------------------------------------------------------------------------+
====================================================================================================
Training DataLoader: total number of tokens: 1100000000 across 11 files
Validation DataLoader: total number of tokens: 100000000 across 1 files
====================================================================================================
step:0/1530 val_loss:10.8258 train_time:0ms step_avg:nanms
step:1/1530 train_loss:10.8258 train_time:31645ms step_avg:nanms
step:2/1530 train_loss:10.0800 train_time:31756ms step_avg:nanms
step:3/1530 train_loss:8.3835 train_time:31917ms step_avg:nanms
step:4/1530 train_loss:7.5695 train_time:32077ms step_avg:nanms
step:5/1530 train_loss:7.4778 train_time:32237ms step_avg:nanms
step:6/1530 train_loss:6.9739 train_time:32398ms step_avg:nanms
step:7/1530 train_loss:7.2121 train_time:32559ms step_avg:nanms
step:8/1530 train_loss:6.7230 train_time:32721ms step_avg:nanms
step:9/1530 train_loss:6.6272 train_time:32881ms step_avg:nanms
step:10/1530 train_loss:6.5475 train_time:33042ms step_avg:nanms
step:11/1530 train_loss:6.4750 train_time:115ms step_avg:nanms
step:12/1530 train_loss:6.3663 train_time:275ms step_avg:nanms
step:13/1530 train_loss:6.2313 train_time:435ms step_avg:145.07ms
step:14/1530 train_loss:6.2046 train_time:595ms step_avg:148.86ms
step:15/1530 train_loss:6.1490 train_time:756ms step_avg:151.30ms
step:16/1530 train_loss:6.1130 train_time:917ms step_avg:152.85ms
step:17/1530 train_loss:6.1672 train_time:1077ms step_avg:153.87ms
step:18/1530 train_loss:5.9912 train_time:1238ms step_avg:154.79ms
step:19/1530 train_loss:5.9822 train_time:1399ms step_avg:155.46ms
step:20/1530 train_loss:5.6826 train_time:1560ms step_avg:155.97ms
step:21/1530 train_loss:5.9410 train_time:1721ms step_avg:156.41ms
step:22/1530 train_loss:6.1715 train_time:1880ms step_avg:156.69ms
step:23/1530 train_loss:5.8489 train_time:2040ms step_avg:156.93ms
step:24/1530 train_loss:5.9971 train_time:2200ms step_avg:157.16ms
step:25/1530 train_loss:5.6823 train_time:2361ms step_avg:157.41ms
step:26/1530 train_loss:5.6008 train_time:2522ms step_avg:157.62ms
step:27/1530 train_loss:5.7466 train_time:2682ms step_avg:157.76ms
step:28/1530 train_loss:5.4171 train_time:2841ms step_avg:157.84ms
step:29/1530 train_loss:5.6728 train_time:3001ms step_avg:157.97ms
step:30/1530 train_loss:5.4645 train_time:3162ms step_avg:158.08ms
step:31/1530 train_loss:5.4292 train_time:3321ms step_avg:158.16ms
step:32/1530 train_loss:5.2919 train_time:3482ms step_avg:158.26ms
step:33/1530 train_loss:5.5634 train_time:3641ms step_avg:158.31ms
step:34/1530 train_loss:5.4925 train_time:3802ms step_avg:158.40ms
step:35/1530 train_loss:5.6081 train_time:3961ms step_avg:158.45ms
step:36/1530 train_loss:5.5460 train_time:4123ms step_avg:158.56ms
step:37/1530 train_loss:5.4602 train_time:4282ms step_avg:158.60ms
step:38/1530 train_loss:5.3154 train_time:4441ms step_avg:158.62ms
step:39/1530 train_loss:5.3141 train_time:4602ms step_avg:158.68ms
step:40/1530 train_loss:5.2359 train_time:4762ms step_avg:158.72ms
step:41/1530 train_loss:5.2215 train_time:4922ms step_avg:158.76ms
step:42/1530 train_loss:5.1639 train_time:5081ms step_avg:158.78ms
step:43/1530 train_loss:5.2620 train_time:5241ms step_avg:158.83ms
step:44/1530 train_loss:5.2436 train_time:5402ms step_avg:158.88ms
step:45/1530 train_loss:5.3821 train_time:5561ms step_avg:158.89ms
step:46/1530 train_loss:5.1786 train_time:5722ms step_avg:158.94ms
step:47/1530 train_loss:5.0650 train_time:5881ms step_avg:158.93ms
step:48/1530 train_loss:5.2039 train_time:6040ms step_avg:158.95ms
step:49/1530 train_loss:5.1383 train_time:6200ms step_avg:158.98ms
step:50/1530 train_loss:5.2500 train_time:6360ms step_avg:159.01ms
step:51/1530 train_loss:5.1424 train_time:6521ms step_avg:159.05ms
step:52/1530 train_loss:5.0292 train_time:6681ms step_avg:159.06ms
step:53/1530 train_loss:5.1651 train_time:6841ms step_avg:159.10ms
step:54/1530 train_loss:5.0070 train_time:7001ms step_avg:159.12ms
step:55/1530 train_loss:5.4237 train_time:7162ms step_avg:159.15ms
step:56/1530 train_loss:5.0287 train_time:7322ms step_avg:159.17ms
step:57/1530 train_loss:4.8666 train_time:7482ms step_avg:159.19ms
step:58/1530 train_loss:5.0395 train_time:7642ms step_avg:159.21ms
step:59/1530 train_loss:5.0119 train_time:7802ms step_avg:159.22ms
step:60/1530 train_loss:5.1381 train_time:7962ms step_avg:159.25ms
step:61/1530 train_loss:4.8431 train_time:8123ms step_avg:159.27ms
step:62/1530 train_loss:4.9924 train_time:8282ms step_avg:159.27ms
step:63/1530 train_loss:4.9994 train_time:8442ms step_avg:159.29ms
step:64/1530 train_loss:4.9571 train_time:8602ms step_avg:159.30ms
step:65/1530 train_loss:4.8012 train_time:8762ms step_avg:159.31ms
step:66/1530 train_loss:4.9091 train_time:8921ms step_avg:159.31ms
step:67/1530 train_loss:4.8213 train_time:9082ms step_avg:159.33ms
step:68/1530 train_loss:5.0808 train_time:9242ms step_avg:159.35ms
step:69/1530 train_loss:4.7074 train_time:9402ms step_avg:159.36ms
step:70/1530 train_loss:4.8324 train_time:9563ms step_avg:159.38ms
step:71/1530 train_loss:4.9747 train_time:9722ms step_avg:159.38ms
step:72/1530 train_loss:4.8903 train_time:9882ms step_avg:159.39ms
step:73/1530 train_loss:4.7743 train_time:10042ms step_avg:159.39ms
step:74/1530 train_loss:4.9081 train_time:10202ms step_avg:159.40ms
step:75/1530 train_loss:4.8747 train_time:10362ms step_avg:159.41ms
step:76/1530 train_loss:4.7962 train_time:10522ms step_avg:159.42ms
step:77/1530 train_loss:4.9126 train_time:10682ms step_avg:159.43ms
step:78/1530 train_loss:5.1353 train_time:10841ms step_avg:159.43ms
step:79/1530 train_loss:4.8310 train_time:11002ms step_avg:159.44ms
step:80/1530 train_loss:4.8635 train_time:11161ms step_avg:159.44ms
step:81/1530 train_loss:4.6689 train_time:11323ms step_avg:159.47ms
step:82/1530 train_loss:4.8199 train_time:11483ms step_avg:159.48ms
step:83/1530 train_loss:4.7765 train_time:11642ms step_avg:159.48ms
step:84/1530 train_loss:4.7748 train_time:11802ms step_avg:159.49ms
step:85/1530 train_loss:4.6255 train_time:11962ms step_avg:159.50ms
step:86/1530 train_loss:4.8451 train_time:12122ms step_avg:159.50ms
step:87/1530 train_loss:4.7491 train_time:12281ms step_avg:159.49ms
step:88/1530 train_loss:4.7398 train_time:12441ms step_avg:159.51ms
step:89/1530 train_loss:4.6918 train_time:12603ms step_avg:159.53ms
step:90/1530 train_loss:4.6446 train_time:12762ms step_avg:159.53ms
step:91/1530 train_loss:4.6256 train_time:12923ms step_avg:159.54ms
step:92/1530 train_loss:4.7947 train_time:13083ms step_avg:159.54ms
step:93/1530 train_loss:4.6251 train_time:13242ms step_avg:159.55ms
step:94/1530 train_loss:4.6453 train_time:13402ms step_avg:159.54ms
step:95/1530 train_loss:4.6861 train_time:13562ms step_avg:159.56ms
step:96/1530 train_loss:4.5841 train_time:13723ms step_avg:159.57ms
step:97/1530 train_loss:4.6397 train_time:13883ms step_avg:159.57ms
step:98/1530 train_loss:4.5807 train_time:14042ms step_avg:159.57ms
step:99/1530 train_loss:4.6579 train_time:14202ms step_avg:159.58ms
step:100/1530 train_loss:4.6853 train_time:14363ms step_avg:159.58ms
step:101/1530 train_loss:4.5651 train_time:14522ms step_avg:159.58ms
step:102/1530 train_loss:4.7044 train_time:14682ms step_avg:159.58ms
step:103/1530 train_loss:4.5694 train_time:14841ms step_avg:159.58ms
step:104/1530 train_loss:4.5349 train_time:15001ms step_avg:159.59ms
step:105/1530 train_loss:4.5631 train_time:15161ms step_avg:159.59ms
step:106/1530 train_loss:4.6151 train_time:15322ms step_avg:159.61ms
step:107/1530 train_loss:4.4993 train_time:15482ms step_avg:159.61ms
step:108/1530 train_loss:4.3534 train_time:15642ms step_avg:159.61ms
step:109/1530 train_loss:4.4740 train_time:15802ms step_avg:159.62ms
step:110/1530 train_loss:4.4841 train_time:15962ms step_avg:159.62ms
step:111/1530 train_loss:4.4243 train_time:16122ms step_avg:159.63ms
step:112/1530 train_loss:4.5873 train_time:16282ms step_avg:159.63ms
step:113/1530 train_loss:4.4816 train_time:16442ms step_avg:159.63ms
step:114/1530 train_loss:4.3552 train_time:16602ms step_avg:159.63ms
step:115/1530 train_loss:4.5023 train_time:16764ms step_avg:159.65ms
step:116/1530 train_loss:4.4686 train_time:16929ms step_avg:159.71ms
step:117/1530 train_loss:4.3567 train_time:17094ms step_avg:159.76ms
step:118/1530 train_loss:4.5759 train_time:17258ms step_avg:159.80ms
step:119/1530 train_loss:4.4425 train_time:17422ms step_avg:159.83ms
step:120/1530 train_loss:4.3298 train_time:17586ms step_avg:159.88ms
step:121/1530 train_loss:4.2903 train_time:17749ms step_avg:159.90ms
step:122/1530 train_loss:4.4378 train_time:17915ms step_avg:159.95ms
step:123/1530 train_loss:4.2858 train_time:18078ms step_avg:159.98ms
step:124/1530 train_loss:4.5841 train_time:18242ms step_avg:160.02ms
step:125/1530 train_loss:4.4448 train_time:18405ms step_avg:160.05ms
step:125/1530 val_loss:4.3968 train_time:18452ms step_avg:160.45ms
step:126/1530 train_loss:4.4079 train_time:18572ms step_avg:160.10ms
step:127/1530 train_loss:4.4206 train_time:18738ms step_avg:160.15ms
step:128/1530 train_loss:4.3705 train_time:18901ms step_avg:160.18ms
step:129/1530 train_loss:4.6758 train_time:19066ms step_avg:160.22ms
step:130/1530 train_loss:4.3569 train_time:19231ms step_avg:160.26ms
step:131/1530 train_loss:4.3875 train_time:19394ms step_avg:160.28ms
step:132/1530 train_loss:4.3259 train_time:19558ms step_avg:160.31ms
step:133/1530 train_loss:4.4397 train_time:19722ms step_avg:160.34ms
step:134/1530 train_loss:4.2560 train_time:19885ms step_avg:160.37ms
step:135/1530 train_loss:4.4341 train_time:20050ms step_avg:160.40ms
step:136/1530 train_loss:4.1989 train_time:20214ms step_avg:160.43ms
step:137/1530 train_loss:4.3632 train_time:20378ms step_avg:160.46ms
step:138/1530 train_loss:4.2721 train_time:20543ms step_avg:160.49ms
step:139/1530 train_loss:4.3646 train_time:20707ms step_avg:160.52ms
step:140/1530 train_loss:4.4780 train_time:20871ms step_avg:160.55ms
step:141/1530 train_loss:4.3096 train_time:21035ms step_avg:160.57ms
step:142/1530 train_loss:4.2962 train_time:21199ms step_avg:160.60ms
step:143/1530 train_loss:4.2402 train_time:21363ms step_avg:160.63ms
step:144/1530 train_loss:4.3336 train_time:21527ms step_avg:160.65ms
step:145/1530 train_loss:4.2913 train_time:21691ms step_avg:160.68ms
step:146/1530 train_loss:4.1595 train_time:21854ms step_avg:160.69ms
step:147/1530 train_loss:4.3216 train_time:22018ms step_avg:160.71ms
step:148/1530 train_loss:4.3558 train_time:22180ms step_avg:160.73ms
step:149/1530 train_loss:4.2893 train_time:22343ms step_avg:160.74ms
step:150/1530 train_loss:4.4220 train_time:22508ms step_avg:160.77ms
step:151/1530 train_loss:4.2596 train_time:22671ms step_avg:160.79ms
step:152/1530 train_loss:4.2744 train_time:22835ms step_avg:160.81ms
step:153/1530 train_loss:4.3613 train_time:22999ms step_avg:160.83ms
step:154/1530 train_loss:4.3552 train_time:23162ms step_avg:160.85ms
step:155/1530 train_loss:4.2557 train_time:23327ms step_avg:160.87ms
step:156/1530 train_loss:4.3511 train_time:23490ms step_avg:160.89ms
step:157/1530 train_loss:4.4002 train_time:23653ms step_avg:160.91ms
step:158/1530 train_loss:4.2362 train_time:23817ms step_avg:160.92ms
step:159/1530 train_loss:4.2959 train_time:23980ms step_avg:160.94ms
step:160/1530 train_loss:4.1186 train_time:24143ms step_avg:160.95ms
step:161/1530 train_loss:4.3441 train_time:24308ms step_avg:160.98ms
step:162/1530 train_loss:4.3477 train_time:24472ms step_avg:161.00ms
step:163/1530 train_loss:4.3281 train_time:24635ms step_avg:161.01ms
step:164/1530 train_loss:4.1888 train_time:24799ms step_avg:161.03ms
step:165/1530 train_loss:4.2722 train_time:24963ms step_avg:161.05ms
step:166/1530 train_loss:4.3268 train_time:25126ms step_avg:161.07ms
step:167/1530 train_loss:4.1820 train_time:25291ms step_avg:161.09ms
step:168/1530 train_loss:4.2747 train_time:25454ms step_avg:161.10ms
step:169/1530 train_loss:4.1555 train_time:25617ms step_avg:161.12ms
step:170/1530 train_loss:4.0198 train_time:25781ms step_avg:161.13ms
step:171/1530 train_loss:4.1938 train_time:25945ms step_avg:161.15ms
step:172/1530 train_loss:4.2006 train_time:26109ms step_avg:161.17ms
step:173/1530 train_loss:4.2522 train_time:26272ms step_avg:161.18ms
step:174/1530 train_loss:4.3978 train_time:26435ms step_avg:161.19ms
step:175/1530 train_loss:4.2338 train_time:26597ms step_avg:161.19ms
step:176/1530 train_loss:4.0925 train_time:26760ms step_avg:161.20ms
step:177/1530 train_loss:4.0611 train_time:26924ms step_avg:161.22ms
step:178/1530 train_loss:4.1738 train_time:27087ms step_avg:161.23ms
step:179/1530 train_loss:4.1102 train_time:27249ms step_avg:161.24ms
step:180/1530 train_loss:4.1040 train_time:27412ms step_avg:161.25ms
step:181/1530 train_loss:4.2895 train_time:27575ms step_avg:161.26ms
step:182/1530 train_loss:4.1561 train_time:27737ms step_avg:161.26ms
step:183/1530 train_loss:4.1173 train_time:27900ms step_avg:161.27ms
step:184/1530 train_loss:4.1136 train_time:28062ms step_avg:161.28ms
step:185/1530 train_loss:4.1957 train_time:28224ms step_avg:161.28ms
step:186/1530 train_loss:4.1722 train_time:28388ms step_avg:161.29ms
step:187/1530 train_loss:4.2221 train_time:28550ms step_avg:161.30ms
step:188/1530 train_loss:4.1572 train_time:28849ms step_avg:162.07ms
step:189/1530 train_loss:4.1000 train_time:29177ms step_avg:163.00ms
step:190/1530 train_loss:4.2003 train_time:29339ms step_avg:162.99ms
step:191/1530 train_loss:4.0708 train_time:29503ms step_avg:163.00ms
step:192/1530 train_loss:4.0313 train_time:29666ms step_avg:163.00ms
step:193/1530 train_loss:4.2357 train_time:29829ms step_avg:163.00ms
step:194/1530 train_loss:4.1678 train_time:29992ms step_avg:163.00ms
step:195/1530 train_loss:4.3540 train_time:30154ms step_avg:162.99ms
step:196/1530 train_loss:4.1657 train_time:30316ms step_avg:162.99ms
step:197/1530 train_loss:4.0382 train_time:30479ms step_avg:162.99ms
step:198/1530 train_loss:4.1748 train_time:30642ms step_avg:162.99ms
step:199/1530 train_loss:4.0300 train_time:30804ms step_avg:162.98ms
step:200/1530 train_loss:4.0991 train_time:30968ms step_avg:162.99ms
step:201/1530 train_loss:4.0020 train_time:31131ms step_avg:162.99ms
step:202/1530 train_loss:4.2443 train_time:31293ms step_avg:162.99ms
step:203/1530 train_loss:4.0543 train_time:31456ms step_avg:162.98ms
step:204/1530 train_loss:4.1803 train_time:31618ms step_avg:162.98ms
step:205/1530 train_loss:4.2363 train_time:31780ms step_avg:162.97ms
step:206/1530 train_loss:3.9424 train_time:31943ms step_avg:162.97ms
step:207/1530 train_loss:4.0745 train_time:32106ms step_avg:162.97ms
step:208/1530 train_loss:4.0908 train_time:32268ms step_avg:162.97ms
step:209/1530 train_loss:4.2306 train_time:32431ms step_avg:162.97ms
step:210/1530 train_loss:4.1734 train_time:32594ms step_avg:162.97ms
step:211/1530 train_loss:4.0460 train_time:32755ms step_avg:162.96ms
step:212/1530 train_loss:4.1046 train_time:32918ms step_avg:162.96ms
step:213/1530 train_loss:4.0397 train_time:33081ms step_avg:162.96ms
step:214/1530 train_loss:4.1054 train_time:33243ms step_avg:162.96ms
step:215/1530 train_loss:3.9377 train_time:33407ms step_avg:162.96ms
step:216/1530 train_loss:3.9916 train_time:33569ms step_avg:162.96ms
step:217/1530 train_loss:3.9899 train_time:33732ms step_avg:162.96ms
step:218/1530 train_loss:4.0675 train_time:33894ms step_avg:162.95ms
step:219/1530 train_loss:4.0664 train_time:34057ms step_avg:162.95ms
step:220/1530 train_loss:4.0740 train_time:34218ms step_avg:162.94ms
step:221/1530 train_loss:4.0880 train_time:34382ms step_avg:162.95ms
step:222/1530 train_loss:3.9908 train_time:34545ms step_avg:162.95ms
step:223/1530 train_loss:3.9891 train_time:34710ms step_avg:162.96ms
step:224/1530 train_loss:4.2915 train_time:34873ms step_avg:162.96ms
step:225/1530 train_loss:3.9227 train_time:35036ms step_avg:162.96ms
step:226/1530 train_loss:3.9830 train_time:35198ms step_avg:162.95ms
step:227/1530 train_loss:3.9681 train_time:35361ms step_avg:162.96ms
step:228/1530 train_loss:4.1316 train_time:35526ms step_avg:162.96ms
step:229/1530 train_loss:3.9207 train_time:35693ms step_avg:162.98ms
step:230/1530 train_loss:4.0281 train_time:35858ms step_avg:162.99ms
step:231/1530 train_loss:3.8991 train_time:36024ms step_avg:163.00ms
step:232/1530 train_loss:3.9694 train_time:36190ms step_avg:163.02ms
step:233/1530 train_loss:4.0808 train_time:36355ms step_avg:163.03ms
step:234/1530 train_loss:4.0170 train_time:36520ms step_avg:163.04ms
step:235/1530 train_loss:3.8869 train_time:36688ms step_avg:163.06ms
step:236/1530 train_loss:4.0783 train_time:36853ms step_avg:163.07ms
step:237/1530 train_loss:4.0670 train_time:37019ms step_avg:163.08ms
step:238/1530 train_loss:3.9294 train_time:37186ms step_avg:163.10ms
step:239/1530 train_loss:4.0724 train_time:37352ms step_avg:163.11ms
step:240/1530 train_loss:4.1112 train_time:37517ms step_avg:163.12ms
step:241/1530 train_loss:3.9535 train_time:37683ms step_avg:163.13ms
step:242/1530 train_loss:4.1347 train_time:37850ms step_avg:163.15ms
step:243/1530 train_loss:4.0041 train_time:38015ms step_avg:163.15ms
step:244/1530 train_loss:4.0752 train_time:38181ms step_avg:163.17ms
step:245/1530 train_loss:4.1313 train_time:38347ms step_avg:163.18ms
step:246/1530 train_loss:4.0506 train_time:38513ms step_avg:163.19ms
step:247/1530 train_loss:4.0021 train_time:38679ms step_avg:163.20ms
step:248/1530 train_loss:4.0932 train_time:38845ms step_avg:163.21ms
step:249/1530 train_loss:3.9064 train_time:39011ms step_avg:163.22ms
step:250/1530 train_loss:3.9613 train_time:39176ms step_avg:163.23ms
step:250/1530 val_loss:3.9934 train_time:39224ms step_avg:163.43ms
step:251/1530 train_loss:4.0637 train_time:39345ms step_avg:163.26ms
step:252/1530 train_loss:4.1536 train_time:39511ms step_avg:163.27ms
step:253/1530 train_loss:3.9251 train_time:39677ms step_avg:163.28ms
step:254/1530 train_loss:3.8710 train_time:39844ms step_avg:163.29ms
step:255/1530 train_loss:4.0671 train_time:40009ms step_avg:163.30ms
step:256/1530 train_loss:3.9675 train_time:40174ms step_avg:163.31ms
step:257/1530 train_loss:3.9813 train_time:40341ms step_avg:163.32ms
step:258/1530 train_loss:3.9817 train_time:40506ms step_avg:163.33ms
step:259/1530 train_loss:4.0189 train_time:40673ms step_avg:163.34ms
step:260/1530 train_loss:4.0475 train_time:40840ms step_avg:163.36ms
step:261/1530 train_loss:4.0190 train_time:41005ms step_avg:163.37ms
step:262/1530 train_loss:3.9852 train_time:41171ms step_avg:163.38ms
step:263/1530 train_loss:3.8798 train_time:41338ms step_avg:163.39ms
step:264/1530 train_loss:3.9769 train_time:41504ms step_avg:163.40ms
step:265/1530 train_loss:3.8678 train_time:41670ms step_avg:163.41ms
step:266/1530 train_loss:3.9207 train_time:41836ms step_avg:163.42ms
step:267/1530 train_loss:3.9311 train_time:42002ms step_avg:163.43ms
step:268/1530 train_loss:3.9508 train_time:42168ms step_avg:163.44ms
step:269/1530 train_loss:3.8485 train_time:42335ms step_avg:163.45ms
step:270/1530 train_loss:4.0977 train_time:42502ms step_avg:163.47ms
step:271/1530 train_loss:3.9640 train_time:42668ms step_avg:163.48ms
step:272/1530 train_loss:3.9206 train_time:42835ms step_avg:163.49ms
step:273/1530 train_loss:3.9306 train_time:43000ms step_avg:163.50ms
step:274/1530 train_loss:4.0395 train_time:43166ms step_avg:163.51ms
step:275/1530 train_loss:4.0521 train_time:43332ms step_avg:163.52ms
step:276/1530 train_loss:4.2215 train_time:43498ms step_avg:163.53ms
step:277/1530 train_loss:4.0280 train_time:43665ms step_avg:163.54ms
step:278/1530 train_loss:4.0764 train_time:43830ms step_avg:163.54ms
step:279/1530 train_loss:3.9878 train_time:43996ms step_avg:163.55ms
step:280/1530 train_loss:4.1644 train_time:44164ms step_avg:163.57ms
step:281/1530 train_loss:3.9721 train_time:44329ms step_avg:163.58ms
step:282/1530 train_loss:3.9332 train_time:44496ms step_avg:163.59ms
step:283/1530 train_loss:3.8999 train_time:44663ms step_avg:163.60ms
step:284/1530 train_loss:4.0386 train_time:44829ms step_avg:163.61ms
step:285/1530 train_loss:4.0504 train_time:44995ms step_avg:163.62ms
step:286/1530 train_loss:4.0837 train_time:45163ms step_avg:163.63ms
step:287/1530 train_loss:3.8944 train_time:45329ms step_avg:163.64ms
step:288/1530 train_loss:4.0028 train_time:45494ms step_avg:163.65ms
step:289/1530 train_loss:3.8570 train_time:45659ms step_avg:163.65ms
step:290/1530 train_loss:3.8485 train_time:45824ms step_avg:163.66ms
step:291/1530 train_loss:3.9049 train_time:45991ms step_avg:163.67ms
step:292/1530 train_loss:3.8553 train_time:46156ms step_avg:163.67ms
step:293/1530 train_loss:3.8899 train_time:46322ms step_avg:163.68ms
step:294/1530 train_loss:3.9334 train_time:46487ms step_avg:163.69ms
step:295/1530 train_loss:3.8304 train_time:46652ms step_avg:163.69ms
step:296/1530 train_loss:3.8485 train_time:46817ms step_avg:163.70ms
step:297/1530 train_loss:3.8549 train_time:46982ms step_avg:163.70ms
step:298/1530 train_loss:3.9626 train_time:47147ms step_avg:163.70ms
step:299/1530 train_loss:3.8173 train_time:47311ms step_avg:163.71ms
step:300/1530 train_loss:3.9488 train_time:47475ms step_avg:163.71ms
step:301/1530 train_loss:3.9513 train_time:47641ms step_avg:163.71ms
step:302/1530 train_loss:3.9262 train_time:47806ms step_avg:163.72ms
step:303/1530 train_loss:3.9674 train_time:47970ms step_avg:163.72ms
step:304/1530 train_loss:3.9603 train_time:48136ms step_avg:163.73ms
step:305/1530 train_loss:4.4445 train_time:48302ms step_avg:163.73ms
step:306/1530 train_loss:3.9304 train_time:48467ms step_avg:163.74ms
step:307/1530 train_loss:3.8263 train_time:48631ms step_avg:163.74ms
step:308/1530 train_loss:3.9630 train_time:48797ms step_avg:163.75ms
step:309/1530 train_loss:3.8670 train_time:48963ms step_avg:163.76ms
step:310/1530 train_loss:4.0812 train_time:49128ms step_avg:163.76ms
step:311/1530 train_loss:3.9197 train_time:49292ms step_avg:163.76ms
step:312/1530 train_loss:3.8589 train_time:49458ms step_avg:163.77ms
step:313/1530 train_loss:3.9283 train_time:49624ms step_avg:163.78ms
step:314/1530 train_loss:4.0552 train_time:49789ms step_avg:163.78ms
step:315/1530 train_loss:3.9269 train_time:49954ms step_avg:163.79ms
step:316/1530 train_loss:3.7859 train_time:50120ms step_avg:163.79ms
step:317/1530 train_loss:3.8620 train_time:50285ms step_avg:163.80ms
step:318/1530 train_loss:3.9179 train_time:50450ms step_avg:163.80ms
step:319/1530 train_loss:3.8844 train_time:50615ms step_avg:163.80ms
step:320/1530 train_loss:4.0052 train_time:50781ms step_avg:163.81ms
step:321/1530 train_loss:3.9534 train_time:50946ms step_avg:163.81ms
step:322/1530 train_loss:3.9300 train_time:51110ms step_avg:163.81ms
step:323/1530 train_loss:3.9948 train_time:51276ms step_avg:163.82ms
step:324/1530 train_loss:3.9298 train_time:51442ms step_avg:163.83ms
step:325/1530 train_loss:4.0031 train_time:51606ms step_avg:163.83ms
step:326/1530 train_loss:3.8882 train_time:51773ms step_avg:163.84ms
step:327/1530 train_loss:4.3810 train_time:51938ms step_avg:163.84ms
step:328/1530 train_loss:4.0616 train_time:52103ms step_avg:163.85ms
step:329/1530 train_loss:3.7813 train_time:52268ms step_avg:163.85ms
step:330/1530 train_loss:3.7322 train_time:52434ms step_avg:163.86ms
step:331/1530 train_loss:3.9683 train_time:52600ms step_avg:163.86ms
step:332/1530 train_loss:3.8957 train_time:52765ms step_avg:163.87ms
step:333/1530 train_loss:3.8762 train_time:52929ms step_avg:163.87ms
step:334/1530 train_loss:3.8277 train_time:53094ms step_avg:163.87ms
step:335/1530 train_loss:4.0012 train_time:53261ms step_avg:163.88ms
step:336/1530 train_loss:3.9499 train_time:53425ms step_avg:163.88ms
step:337/1530 train_loss:4.4172 train_time:53591ms step_avg:163.89ms
step:338/1530 train_loss:3.9372 train_time:53757ms step_avg:163.89ms
step:339/1530 train_loss:3.8547 train_time:53923ms step_avg:163.90ms
step:340/1530 train_loss:3.9246 train_time:54088ms step_avg:163.90ms
step:341/1530 train_loss:3.8456 train_time:54255ms step_avg:163.91ms
step:342/1530 train_loss:3.8043 train_time:54423ms step_avg:163.92ms
step:343/1530 train_loss:3.8276 train_time:54591ms step_avg:163.94ms
step:344/1530 train_loss:3.9817 train_time:54759ms step_avg:163.95ms
step:345/1530 train_loss:3.8029 train_time:54928ms step_avg:163.97ms
step:346/1530 train_loss:3.7572 train_time:55096ms step_avg:163.98ms
step:347/1530 train_loss:3.7854 train_time:55266ms step_avg:163.99ms
step:348/1530 train_loss:3.8478 train_time:55432ms step_avg:164.00ms
step:349/1530 train_loss:3.8215 train_time:55602ms step_avg:164.02ms
step:350/1530 train_loss:3.5554 train_time:55771ms step_avg:164.03ms
step:351/1530 train_loss:3.8145 train_time:55941ms step_avg:164.05ms
step:352/1530 train_loss:4.1772 train_time:56108ms step_avg:164.06ms
step:353/1530 train_loss:3.6521 train_time:56275ms step_avg:164.07ms
step:354/1530 train_loss:3.9194 train_time:56443ms step_avg:164.08ms
step:355/1530 train_loss:3.7775 train_time:56610ms step_avg:164.09ms
step:356/1530 train_loss:3.8748 train_time:56779ms step_avg:164.10ms
step:357/1530 train_loss:3.7480 train_time:56948ms step_avg:164.11ms
step:358/1530 train_loss:3.8533 train_time:57116ms step_avg:164.13ms
step:359/1530 train_loss:3.7614 train_time:57285ms step_avg:164.14ms
step:360/1530 train_loss:3.4165 train_time:57455ms step_avg:164.16ms
step:361/1530 train_loss:4.0082 train_time:57623ms step_avg:164.17ms
step:362/1530 train_loss:3.9115 train_time:57791ms step_avg:164.18ms
step:363/1530 train_loss:3.8398 train_time:57959ms step_avg:164.19ms
step:364/1530 train_loss:3.7383 train_time:58128ms step_avg:164.20ms
step:365/1530 train_loss:3.9013 train_time:58296ms step_avg:164.21ms
step:366/1530 train_loss:3.8502 train_time:58465ms step_avg:164.23ms
step:367/1530 train_loss:3.8493 train_time:58632ms step_avg:164.23ms
step:368/1530 train_loss:3.8454 train_time:58800ms step_avg:164.24ms
step:369/1530 train_loss:3.7364 train_time:58968ms step_avg:164.26ms
step:370/1530 train_loss:3.8728 train_time:59135ms step_avg:164.26ms
step:371/1530 train_loss:3.7223 train_time:59303ms step_avg:164.28ms
step:372/1530 train_loss:3.6845 train_time:59472ms step_avg:164.29ms
step:373/1530 train_loss:3.8982 train_time:59640ms step_avg:164.30ms
step:374/1530 train_loss:3.8177 train_time:59807ms step_avg:164.31ms
step:375/1530 train_loss:3.7951 train_time:59975ms step_avg:164.32ms
step:375/1530 val_loss:3.8219 train_time:60024ms step_avg:164.45ms