-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmodel.py
1157 lines (1007 loc) · 90.1 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import tensorflow as tf
import tensorlayer as tl
import numpy as np
from tensorlayer.layers import *
from config import sp_config, config, log_config
def UNet_down(patch, num_features_out, is_train=False, reuse = False, scope = 'unet_down'):
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
#g_init = tf.random_normal_initializer(1., 0.02)
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.2)
with tf.variable_scope(scope, reuse = reuse) as vs:
""" input layer """
net_in = InputLayer(patch, name='input')
""" conv1 """
network = PadLayer(net_in, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad1_1')
network = Conv2d(network, n_filter=32, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv1_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_1')
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad1_2')
network = Conv2d(network, n_filter=32, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv1_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_2')
d0 = network
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
""" conv2 """
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad2_1')
network = Conv2d(network, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv2_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_1')
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad2_2')
network = Conv2d(network, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv2_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_2')
d1 = network
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
""" conv3 """
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad3_1')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv3_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_1')
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad3_2')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv3_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_2')
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad3_3')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv3_3')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_3')
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad3_4')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv3_4')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_4')
d2 = network
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3')
""" conv4 """
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad4_1')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv4_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_1')
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad4_2')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv4_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_2')
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad4_3')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv4_3')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_3')
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad4_4')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv4_4')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_4')
d3 = network
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4')
""" conv5 """
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad5_1')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv5_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn5_1')
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad5_2')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv5_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn5_2')
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad5_3')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv5_3')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn5_3')
network = PadLayer(network, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad5_4')
network = Conv2d(network, n_filter=num_features_out, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv5_4')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn5_4')
d4 = network
return d0.outputs,d1.outputs,d2.outputs,d3.outputs,d4.outputs
def UNet_up(feats, is_train=False, reuse=False, scope = 'unet_up'):
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
#g_init = tf.random_normal_initializer(1., 0.02)
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.2)
with tf.variable_scope(scope, reuse=reuse) as vs:
d0 = InputLayer(feats[0], name='d0')
d1 = InputLayer(feats[1], name='d1')
d2 = InputLayer(feats[2], name='d2')
d3 = InputLayer(feats[3], name='d3')
d4 = InputLayer(feats[4], name='d4')
n = UpSampling2dLayer(d4, (2, 2), is_scale = True, method = 1, align_corners=True, name='u3/u')
n = ConcatLayer([n, d3], concat_dim = 3, name='u3/concat')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u3/pad1')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u3/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u3/b1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u3/pad2')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u3/c2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u3/b2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u3/pad3')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u3/c3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u3/b3')
n = UpSampling2dLayer(n, (2, 2), is_scale = True, method = 1, align_corners=True, name='u2/u')
n = ConcatLayer([n, d2], concat_dim = 3, name='u2/concat')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u2/pad1')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u2/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u2/b1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u2/pad3')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u2/c2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u2/b2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u2/pad3')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u2/c3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u2/b3')
n = UpSampling2dLayer(n, (2, 2), is_scale = True, method = 1, align_corners=True, name='u1/u')
n = ConcatLayer([n, d1], concat_dim = 3, name='u1/concat')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u1/pad1')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u1/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u1/b1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u1/pad2')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u1/c2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u1/b2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u1/pad3')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u1/c3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u1/b3')
n = UpSampling2dLayer(n, (2, 2), is_scale = True, method = 1, align_corners=True, name='u0/u')
n = ConcatLayer([n, d0], concat_dim = 3, name='u0/concat')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u0/pad_init')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u0/c_init')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u0/b_init')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad1')#
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c1')#
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b1')#
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad2')#
n = Conv2d(n, 32, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c2')#
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b2')#
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad3')#pad1
n = Conv2d(n, 32, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c3')#c1
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b3')#
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad4')#pad1
n = Conv2d(n, 3, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c4')#c1
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b4')#
return n.outputs
def Localizer(feats, out_param_dim, is_train=False, reuse = False, is_tanh = False, scope = 'Localizer'):
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
#g_init = tf.random_normal_initializer(1., 0.02)
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.2)
with tf.variable_scope(scope, reuse = reuse) as vs:
n = InputLayer(feats, name='input')
n = Conv2d(n, 32, (3, 3), (2, 2), act=None, padding='VALID', W_init=w_init_relu, name='d1/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='d1/b1')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='d2/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='d2/b1')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='d3/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='d3/b1')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='d4/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='d4/b1')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='d5/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='d5/b1')
n = FlattenLayer(n, name='df/flatten1')
n = DenseLayer(n, n_units = 256, act = tf.identity, name='df/dense1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='df/b1')
if is_tanh:
n = DenseLayer(n, n_units = out_param_dim, act = tf.nn.tanh, name='df/dense2')
else:
n = DenseLayer(n, n_units = out_param_dim, act = tf.identity, name='df/dense2')
return n.outputs
def UNet_down_3D_reshape(patch, is_train=False, reuse = False, scope = 'unet_down'):
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
#g_init = tf.random_normal_initializer(1., 0.02)
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.2)
with tf.variable_scope(scope, reuse = reuse) as vs:
""" input layer """
net_in = InputLayer(patch, name='input')
""" conv1 """
network = Conv3dLayer(net_in, shape=(3, 3, 3, 3, 32), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv1_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_1')
network = Conv3dLayer(network, shape=(3, 3, 3, 32, 32), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv1_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_2')
network = MaxPool3d(network, filter_size=(2, 2, 2), strides=(2, 2, 2), padding='SAME', name='pool1')
print(network)
""" conv2 """
network = Conv3dLayer(network, shape=(3, 3, 3, 32, 64), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv2_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_1')
network = Conv3dLayer(network, shape=(3, 3, 3, 64, 64), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv2_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_2')
network = MaxPool3d(network, filter_size=(2, 2, 2), strides=(2, 2, 2), padding='SAME', name='pool2')
print(network)
""" conv3 """
network = Conv3dLayer(network, shape=(3, 3, 3, 64, 128), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv3_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_1')
network = Conv3dLayer(network, shape=(3, 3, 3, 128, 128), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv3_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_2')
network = Conv3dLayer(network, shape=(3, 3, 3, 128, 128), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv3_3')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_3')
network = MaxPool3d(network, filter_size=(2, 2, 2), strides=(2, 2, 2), padding='SAME', name='pool3')
print(network)
""" conv4 """
network = Conv3dLayer(network, shape=(3, 3, 3, 128, 256), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv4_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_1')
network = Conv3dLayer(network, shape=(3, 3, 3, 256, 256), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv4_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_2')
network = Conv3dLayer(network, shape=(3, 3, 3, 256, 256), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv4_3')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_3')
network = MaxPool3d(network, filter_size=(2, 2, 2), strides=(2, 2, 2), padding='SAME', name='pool4')
print(network)
return tf.reshape(network.outputs,[-1,22,22,512])
def UNet_down_3D(patch, is_train=False, reuse = False, scope = 'unet_down'):
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
#g_init = tf.random_normal_initializer(1., 0.02)
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.2)
with tf.variable_scope(scope, reuse = reuse) as vs:
""" input layer """
net_in = InputLayer(patch, name='input')
""" conv1 """
network = Conv3dLayer(net_in, shape=(3, 3, 3, 3, 32), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv1_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_1')
network = Conv3dLayer(network, shape=(3, 3, 3, 32, 32), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv1_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_2')
network = MaxPool3d(network, filter_size=(2, 2, 2), strides=(2, 2, 2), padding='SAME', name='pool1')
#print(network)
""" conv2 """
network = Conv3dLayer(network, shape=(3, 3, 3, 32, 64), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv2_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_1')
network = Conv3dLayer(network, shape=(3, 3, 3, 64, 64), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv2_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_2')
network = MaxPool3d(network, filter_size=(2, 2, 2), strides=(2, 2, 2), padding='SAME', name='pool2')
#print(network)
""" conv3 """
network = Conv3dLayer(network, shape=(3, 3, 3, 64, 128), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv3_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_1')
network = Conv3dLayer(network, shape=(3, 3, 3, 128, 128), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv3_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_2')
network = Conv3dLayer(network, shape=(3, 3, 3, 128, 128), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv3_3')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_3')
network = MaxPool3d(network, filter_size=(2, 2, 2), strides=(2, 2, 2), padding='SAME', name='pool3')
#print(network)
""" conv4 """
network = Conv3dLayer(network, shape=(3, 3, 3, 128, 256), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv4_1')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_1')
network = Conv3dLayer(network, shape=(3, 3, 3, 256, 256), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv4_2')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_2')
network = Conv3dLayer(network, shape=(3, 3, 3, 256, 256), strides=(1,1,1,1,1), act=None, W_init=w_init_relu, padding='SAME', name='conv4_3')
network = BatchNormLayer(network, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_3')
network = MaxPool3d(network, filter_size=(2, 2, 2), strides=(2, 2, 2), padding='SAME', name='pool4')
#print(network)
return tf.squeeze(network.outputs,[1])
def UNet_up_merged(feats, is_train=False, reuse=False, scope = 'unet_up'):
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
#g_init = tf.random_normal_initializer(1., 0.02)
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.2)
with tf.variable_scope(scope, reuse=reuse) as vs:
net_in = InputLayer(feats, name='input')
n = PadLayer(net_in, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u4/pad1')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u4/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u4/b1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u4/pad2')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u4/c2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u4/b2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u4/pad3')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u4/c3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u4/b3')
n = UpSampling2dLayer(n, (2, 2), is_scale = True, method = 1, align_corners=True, name='u3/u')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u3/pad1')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u3/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u3/b1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u3/pad2')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u3/c2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u3/b2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u3/pad3')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u3/c3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u3/b3')
n = UpSampling2dLayer(n, (2, 2), is_scale = True, method = 1, align_corners=True, name='u2/u')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u2/pad1')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u2/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u2/b1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u2/pad3')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u2/c2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u2/b2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u2/pad3')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u2/c3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u2/b3')
n = UpSampling2dLayer(n, (2, 2), is_scale = True, method = 1, align_corners=True, name='u1/u')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u1/pad1')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u1/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u1/b1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u1/pad2')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u1/c2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u1/b2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u1/pad3')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u1/c3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u1/b3')
n = UpSampling2dLayer(n, (2, 2), is_scale = True, method = 1, align_corners=True, name='u0/u')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u0/pad_init')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u0/c_init')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u0/b_init')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad1')#
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c1')#
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b1')#
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad2')#
n = Conv2d(n, 32, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c2')#
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b2')#
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad3')#pad1
n = Conv2d(n, 32, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c3')#c1
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b3')#
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad4')#pad1
n = Conv2d(n, 3, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c4')#c1
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b4')#
return n.outputs
## for main_addhomo.py
def UNet_merged(feats, is_train=False, reuse=False, scope = 'unet_merged'):
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
#g_init = tf.random_normal_initializer(1., 0.02)
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.2)
with tf.variable_scope(scope, reuse=reuse) as vs:
net_in = InputLayer(feats, name='input')
n = PadLayer(net_in, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u4/pad1')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u4/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u4/b1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u4/pad2')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u4/c2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u4/b2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u4/pad3')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u4/c3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u4/b3')
return n.outputs
def dense_homo(feats,is_train=False, reuse=False, scope = 'dense_homo'):
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
#g_init = tf.random_normal_initializer(1., 0.02)
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.2)
customTanh = lambda x: tf.tanh(x/1000)
with tf.variable_scope(scope, reuse=reuse) as vs:
net_in = InputLayer(feats, name='input')
n = PadLayer(net_in, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad1_1')
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv1_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_1')
n = PadLayer(net_in, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad1_2')
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv1_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_2')
n = PadLayer(net_in, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad1_3')
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv1_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_3')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
n = PadLayer(net_in, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad2_1')
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv2_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_1')
n = PadLayer(net_in, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad2_2')
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv2_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_2')
n = PadLayer(net_in, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad2_3')
n = Conv2d(n, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv2_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_3')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
print(n.outputs)
n = ReshapeLayer(n,[config.batch_size,-1],name='reshape')
print(n.outputs)
n = DenseLayer(n , n_units = 1000, act = lrelu, name='df/dense1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='df/b1')
n = DenseLayer(n, n_units = 1000, act = lrelu, name='df/dense2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='df/b2')
n = DenseLayer(n, n_units = 8, act = customTanh, name='df/dense3')
#n = tf.multiply(n.outputs, tf.constant([[0.2,0.2,0.4,0.2,0.2,0.4,0.2,0.2]],dtype=tf.float32))
#n = tf.multiply(n.outputs, tf.constant([[0.3,0.1,100,0.1,0.3,100,0.1,0.1]],dtype=tf.float32))
#n = tf.multiply(n.outputs, tf.constant([[1,3,100,3,1,100,3,3]],dtype=tf.float32))
n = tf.multiply(n.outputs, tf.constant([[0.3,0.1,170,0.1,0.3,170,0.01,0.01]],dtype=tf.float32))
n = tf.add(n, tf.constant([[1,0,0,0,1,0,0,0]],dtype=tf.float32))
print(n)
n = tf.concat( [n,tf.constant([[1]],dtype=tf.float32)],axis=1 )
print(n)
n = tf.reshape(n,[-1,3,3])
#n = tf.matrix_inverse(n)
n = tf.reshape(n,[-1,9])
n = n[:,0:8]
#n = tf.multiply(n.outputs, tf.constant([[0,0,0,0,0,0,0,0]],dtype=tf.float32))
#n = tf.add(n, tf.constant([[1,0,0.5,0,1,0.50,0,0]],dtype=tf.float32))
#updateIdx = [ [b,x] for b in range(config.batch_size) for x in range(8) ]
#updateIdxT = tf.constant(updateIdx)
#updataVal = [ [-n[b,0]+n[b,0]/10+1] for b in range(config.batch_size), \
# [-n[1]+n[1]/10] for b in range(config.batch_size), \
# [-n[2]+n[2]/2] for b in range(config.batch_size),\
# [-n[3]+n[3]/10] for b in range(config.batch_size),\
# [-n[4]+n[4]/10+1] for b in range(config.batch_size),\
# [-n[5]+n[5]/2] for b in range(config.batch_size),\
# [-n[6]+n[6]/10] for b in range(config.batch_size),\
# [-n[7]+n[7]/10] for b in range(config.batch_size) ]
#updataValT = tf.constant(updateVal)
#updateShape = tf.constant([config.batch_size,8])
#n= n + tf.scatter_nd(updateIdxT , updataValT, updateShape)
## n[0] = n[0]/10+1
## n[1] = n[1]/10
## n[2] = n[2]/2
## n[3] = n[3]/10
## n[4] = n[4]/10+1
## n[5] = n[5]/2
## n[6] = n[6]/10
## n[7] = n[7]/10
## n = tf.constant([n[:,0]/10+1,n[:,1]/10,n[:,2]/2,n[:,3]/10,n[:,4]/10+1,n[:,5]/2,n[:,6]/10,n[:,7]/10] , shape=[config.batch_size,8])
#raise
return n
def UNet_up_merged_without_u4(feats, is_train=False, reuse=False, scope = 'unet_up_without_u4'):
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
#g_init = tf.random_normal_initializer(1., 0.02)
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.2)
with tf.variable_scope(scope, reuse=reuse) as vs:
net_in = InputLayer(feats, name='input')
n = UpSampling2dLayer(net_in , (2, 2), is_scale = True, method = 1, align_corners=True, name='u3/u')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u3/pad1')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u3/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u3/b1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u3/pad2')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u3/c2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u3/b2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u3/pad3')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u3/c3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u3/b3')
n = UpSampling2dLayer(n, (2, 2), is_scale = True, method = 1, align_corners=True, name='u2/u')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u2/pad1')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u2/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u2/b1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u2/pad3')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u2/c2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u2/b2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u2/pad3')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u2/c3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u2/b3')
n = UpSampling2dLayer(n, (2, 2), is_scale = True, method = 1, align_corners=True, name='u1/u')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u1/pad1')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u1/c1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u1/b1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u1/pad2')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u1/c2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u1/b2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u1/pad3')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u1/c3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u1/b3')
n = UpSampling2dLayer(n, (2, 2), is_scale = True, method = 1, align_corners=True, name='u0/u')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='u0/pad_init')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='u0/c_init')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='u0/b_init')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad1')#
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c1')#
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b1')#
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad2')#
n = Conv2d(n, 32, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c2')#
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b2')#
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad3')#pad1
n = Conv2d(n, 32, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c3')#c1
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b3')#
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='uf/pad4')#pad1
n = Conv2d(n, 3, (3, 3), (1, 1), act=None, padding='VALID', W_init=w_init_relu, name='uf/c4')#c1
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='uf/b4')#
return n.outputs
def discriminator(feats, is_train=False, reuse=False, scope = 'cnn'):
g_init = None
lrelu = lambda x: tf.nn.relu(x)
identity = lambda x: x
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
with tf.variable_scope('cnn', reuse = False) as vs:
n = InputLayer(feats, name='input')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='1_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='1_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='1_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='1_2')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='2_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='2_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='2_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='2_2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='2_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='2_3')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='3_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='3_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='3_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='3_2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='3_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='3_3')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='4_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='4_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='4_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='4_2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='4_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='4_3')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='5_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='5_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='5_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='5_2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='5_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='5_3')
print(n.outputs)
n = ReshapeLayer(n, [-1, 4*4*512*4])
n = DenseLayer(n , n_units=2048, act=lrelu,name='1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='1')
n = DenseLayer(n , n_units=512, act=lrelu,name='2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='2')
n = DenseLayer(n , n_units=2, act=identity,name='3')
n = tf.nn.sigmoid(n.outputs)
return n
def flownetS(feats, batch_size, is_train=False, reuse=False, scope = 'flownetS' ):
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.1)
identity = lambda x: x
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
def UpSampling2dLayer_(input, image_ref, method, align_corners, name):
input = input.outputs
size = tf.shape(image_ref)
n = InputLayer(input, name = name + '_in')
n = UpSampling2dLayer(n, size=[size[1], size[2]], is_scale = False, method = method, align_corners = align_corners, name = name)
return n
with tf.variable_scope(scope, reuse = reuse) as vs:
n = InputLayer(feats, name='input')
n = PadLayer(n, [[0, 0], [3, 3], [3, 3], [0, 0]], "constant")
n = Conv2d(n, n_filter=64, filter_size=(7, 7), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='1')
conv1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='1')
n = PadLayer(conv1, [[0, 0], [2, 2], [2, 2], [0, 0]], "constant")
n = Conv2d(n, n_filter=128, filter_size=(5, 5), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='2')
conv2 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='2')
n = PadLayer(conv2, [[0, 0], [2, 2], [2, 2], [0, 0]], "constant")
n = Conv2d(n, n_filter=256, filter_size=(5, 5), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='3')
conv3 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='3')
print(conv3.outputs)
n = PadLayer(conv3, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='3_1')
conv3_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='3_1')
n = PadLayer(conv3_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='4')
conv4 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='4')
n = PadLayer(conv4, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='4_1')
conv4_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='4_1')
n = PadLayer(conv4_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='5')
conv5 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='5')
n = PadLayer(conv5, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='5_1')
conv5_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='5_1')
print(conv5_1.outputs)
n = PadLayer(conv5_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=1024, filter_size=(3, 3), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='6')
conv6 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='6')
print(conv6.outputs)
n = PadLayer(conv6, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=1024, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='6_1')
conv6_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='6_1')
print(conv6_1.outputs)
n = PadLayer(conv6_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow6 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict6')
deconv5 = DeConv2dLayer(conv6_1,act=lrelu,shape=(4,4,512,1024),output_shape=(batch_size ,12,16,512),strides=(1,2,2,1),name='deconv5')
#deconv5 = DeConv2dLayer(conv6_1,act=None,shape=(4,4,512,1024),output_shape=(batch_size ,12,16,512),strides=(1,2,2,1),name='deconv5')
#deconv5 = BatchNormLayer(deconv5, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv5_bn')
upsample_flow6to5 = DeConv2dLayer(predict_flow6,act=None,shape=(4,4,2,2),output_shape=(batch_size ,12,16,2),strides=(1,2,2,1),name='upsample6_5')
concat5 = ConcatLayer([conv5_1,deconv5,upsample_flow6to5],concat_dim=3)
n = PadLayer(concat5, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow5 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict5')
deconv4 = DeConv2dLayer(concat5,act=lrelu,shape=(4,4,256,1026),output_shape=(batch_size ,24,32,256),strides=(1,2,2,1),name='deconv4')
#deconv4 = DeConv2dLayer(concat5,act=None,shape=(4,4,256,1026),output_shape=(batch_size ,24,32,256),strides=(1,2,2,1),name='deconv4')
#deconv4 = BatchNormLayer(deconv4, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv4_bn')
upsample_flow5to4 = DeConv2dLayer(predict_flow5,act= None,shape=(4,4,2,2),output_shape=(batch_size ,24,32,2),strides=(1,2,2,1),name='upsample5_4')
concat4 = ConcatLayer([conv4_1,deconv4,upsample_flow5to4],concat_dim=3)
n = PadLayer(concat4, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow4 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict4')
deconv3 = DeConv2dLayer(concat4,act=lrelu,shape=(4,4, 128,770),output_shape=(batch_size,48,64,128),strides=(1,2,2,1),name='deconv3')
#deconv3 = DeConv2dLayer(concat4,act=None,shape=(4,4, 128,770),output_shape=(batch_size,48,64,128),strides=(1,2,2,1),name='deconv3')
#deconv3 = BatchNormLayer(deconv3, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv3_bn')
upsample_flow4to3 = DeConv2dLayer(predict_flow4,act= None,shape=(4,4,2,2),output_shape=(batch_size ,48,64,2),strides=(1,2,2,1),name='upsample4_3')
concat3 = ConcatLayer([conv3_1,deconv3,upsample_flow4to3],concat_dim=3)
n = PadLayer(concat3, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow3 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict3')
deconv2 = DeConv2dLayer(concat3,act=lrelu,shape=(4,4, 64,386),output_shape=(batch_size,96,128,64),strides=(1,2,2,1),name='deconv2')
#deconv2 = DeConv2dLayer(concat3,act=None,shape=(4,4, 64,386),output_shape=(batch_size,96,128,64),strides=(1,2,2,1),name='deconv2')
#deconv2 = BatchNormLayer(deconv2, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv2_bn')
upsample_flow3to2 = DeConv2dLayer(predict_flow3,act= None,shape=(4,4,2,2),output_shape=(batch_size ,96,128,2),strides=(1,2,2,1),name='upsample3_2')
concat2 = ConcatLayer([conv2,deconv2,upsample_flow3to2],concat_dim=3)
n = PadLayer(concat2, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
#n = UpSampling2dLayer_(n, feats, method = 1, align_corners = True, name = 'upsample2_1') # added
predict_flow2 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict2')
flow = predict_flow2.outputs * tf.constant(20.0)
flow = tf.image.resize_bilinear(flow,tf.stack([384, 512]),align_corners=True)
return {'predict_flow6': predict_flow6.outputs, 'predict_flow5': predict_flow5.outputs,'predict_flow4': predict_flow4.outputs,'predict_flow3': predict_flow3.outputs,'predict_flow2': predict_flow2.outputs,'flow': flow}
def flownetS_realdata(feats, batch_size, is_train=False, reuse=False, scope = 'flownetS' ):
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.1)
identity = lambda x: x
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
def UpSampling2dLayer_(input, image_ref, method, align_corners, name):
input = input.outputs
size = tf.shape(image_ref)
n = InputLayer(input, name = name + '_in')
n = UpSampling2dLayer(n, size=[size[1], size[2]], is_scale = False, method = method, align_corners = align_corners, name = name)
return n
with tf.variable_scope(scope, reuse = reuse) as vs:
n = InputLayer(feats, name='input')
n = PadLayer(n, [[0, 0], [3, 3], [3, 3], [0, 0]], "constant")
n = Conv2d(n, n_filter=64, filter_size=(7, 7), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='1')
conv1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='1')
n = PadLayer(conv1, [[0, 0], [2, 2], [2, 2], [0, 0]], "constant")
n = Conv2d(n, n_filter=128, filter_size=(5, 5), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='2')
conv2 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='2')
n = PadLayer(conv2, [[0, 0], [2, 2], [2, 2], [0, 0]], "constant")
n = Conv2d(n, n_filter=256, filter_size=(5, 5), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='3')
conv3 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='3')
print(conv3.outputs)
n = PadLayer(conv3, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='3_1')
conv3_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='3_1')
n = PadLayer(conv3_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='4')
conv4 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='4')
n = PadLayer(conv4, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='4_1')
conv4_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='4_1')
n = PadLayer(conv4_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='5')
conv5 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='5')
n = PadLayer(conv5, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='5_1')
conv5_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='5_1')
print(conv5_1.outputs)
n = PadLayer(conv5_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=1024, filter_size=(3, 3), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='6')
conv6 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='6')
print(conv6.outputs)
n = PadLayer(conv6, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=1024, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='6_1')
conv6_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='6_1')
print(conv6_1.outputs)
n = PadLayer(conv6_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow6 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict6')
#deconv5 = DeConv2dLayer(conv6_1,act=lrelu,shape=(4,4,512,1024),output_shape=(batch_size ,12,16,512),strides=(1,2,2,1),name='deconv5')
deconv5 = DeConv2dLayer(conv6_1,act=None,shape=(4,4,512,1024),output_shape=(batch_size ,12,16,512),strides=(1,2,2,1),name='deconv5')
deconv5 = BatchNormLayer(deconv5, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv5_bn')
upsample_flow6to5 = DeConv2dLayer(predict_flow6,act=None,shape=(4,4,2,2),output_shape=(batch_size ,12,16,2),strides=(1,2,2,1),name='upsample6_5')
concat5 = ConcatLayer([conv5_1,deconv5,upsample_flow6to5],concat_dim=3)
n = PadLayer(concat5, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow5 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict5')
#deconv4 = DeConv2dLayer(concat5,act=lrelu,shape=(4,4,256,1026),output_shape=(batch_size ,24,32,256),strides=(1,2,2,1),name='deconv4')
deconv4 = DeConv2dLayer(concat5,act=None,shape=(4,4,256,1026),output_shape=(batch_size ,24,32,256),strides=(1,2,2,1),name='deconv4')
deconv4 = BatchNormLayer(deconv4, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv4_bn')
upsample_flow5to4 = DeConv2dLayer(predict_flow5,act= None,shape=(4,4,2,2),output_shape=(batch_size ,24,32,2),strides=(1,2,2,1),name='upsample5_4')
concat4 = ConcatLayer([conv4_1,deconv4,upsample_flow5to4],concat_dim=3)
n = PadLayer(concat4, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow4 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict4')
#deconv3 = DeConv2dLayer(concat4,act=lrelu,shape=(4,4, 128,770),output_shape=(batch_size,48,64,128),strides=(1,2,2,1),name='deconv3')
deconv3 = DeConv2dLayer(concat4,act=None,shape=(4,4, 128,770),output_shape=(batch_size,48,64,128),strides=(1,2,2,1),name='deconv3')
deconv3 = BatchNormLayer(deconv3, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv3_bn')
upsample_flow4to3 = DeConv2dLayer(predict_flow4,act= None,shape=(4,4,2,2),output_shape=(batch_size ,48,64,2),strides=(1,2,2,1),name='upsample4_3')
concat3 = ConcatLayer([conv3_1,deconv3,upsample_flow4to3],concat_dim=3)
n = PadLayer(concat3, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow3 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict3')
#deconv2 = DeConv2dLayer(concat3,act=lrelu,shape=(4,4, 64,386),output_shape=(batch_size,96,128,64),strides=(1,2,2,1),name='deconv2')
deconv2 = DeConv2dLayer(concat3,act=None,shape=(4,4, 64,386),output_shape=(batch_size,96,128,64),strides=(1,2,2,1),name='deconv2')
deconv2 = BatchNormLayer(deconv2, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv2_bn')
upsample_flow3to2 = DeConv2dLayer(predict_flow3,act= None,shape=(4,4,2,2),output_shape=(batch_size ,96,128,2),strides=(1,2,2,1),name='upsample3_2')
concat2 = ConcatLayer([conv2,deconv2,upsample_flow3to2],concat_dim=3)
n = PadLayer(concat2, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = UpSampling2dLayer_(n, feats, method = 1, align_corners = True, name = 'upsample2_1') # added
predict_flow2 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict2')
flow = predict_flow2.outputs# * tf.constant(20.0)
#flow = tf.image.resize_bilinear(flow,tf.stack([384, 512]),align_corners=True)
return {'predict_flow6': predict_flow6.outputs, 'predict_flow5': predict_flow5.outputs,'predict_flow4': predict_flow4.outputs,'predict_flow3': predict_flow3.outputs,'predict_flow2': predict_flow2.outputs,'flow': flow}
def flownetS_pyramid(feats, batch_size, is_train=False, reuse=False, scope = 'flownetS' ):
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.1)
identity = lambda x: x
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
featsSize = tf.shape(feats)
featsSize = featsSize[0]
def UpSampling2dLayer_(input, image_ref, method, align_corners, name):
input = input.outputs
size = tf.shape(image_ref)
n = InputLayer(input, name = name + '_in')
n = UpSampling2dLayer(n, size=[size[1], size[2]], is_scale = False, method = method, align_corners = align_corners, name = name)
return n
with tf.variable_scope(scope, reuse = reuse) as vs:
n = InputLayer(feats, name='input')
n = PadLayer(n, [[0, 0], [3, 3], [3, 3], [0, 0]], "constant")
n = Conv2d(n, n_filter=64, filter_size=(7, 7), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='1')
conv1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='1')
n = PadLayer(conv1, [[0, 0], [2, 2], [2, 2], [0, 0]], "constant")
n = Conv2d(n, n_filter=128, filter_size=(5, 5), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='2')
conv2 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='2')
n = PadLayer(conv2, [[0, 0], [2, 2], [2, 2], [0, 0]], "constant")
n = Conv2d(n, n_filter=256, filter_size=(5, 5), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='3')
conv3 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='3')
print(conv3.outputs)
n = PadLayer(conv3, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='3_1')
conv3_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='3_1')
print(conv3_1.outputs)
n = PadLayer(conv3_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='4')
conv4 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='4')
print(conv4.outputs)
n = PadLayer(conv4, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='4_1')
conv4_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='4_1')
print(conv4_1.outputs)
n = PadLayer(conv4_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='5')
conv5 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='5')
print(conv5.outputs)
n = PadLayer(conv5, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='5_1')
conv5_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='5_1')
print(conv5_1.outputs)
n = PadLayer(conv5_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=1024, filter_size=(3, 3), strides=(2, 2), act=None, W_init=w_init_relu, padding='VALID',name='6')
conv6 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='6')
print(conv6.outputs)
n = PadLayer(conv6, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = Conv2d(n, n_filter=1024, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID',name='6_1')
conv6_1 = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init,name='6_1')
print(conv6_1.outputs)
n = PadLayer(conv6_1, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow6 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict6')
#deconv5 = DeConv2dLayer(conv6_1,act=lrelu,shape=(4,4,512,1024),output_shape=(batch_size ,12,16,512),strides=(1,2,2,1),name='deconv5')
deconv5 = DeConv2dLayer(conv6_1,act=None,shape=(4,4,512,1024),output_shape=(batch_size,12,16,512),strides=(1,2,2,1),name='deconv5')
deconv5 = BatchNormLayer(deconv5, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv5_bn')
upsample_flow6to5 = DeConv2dLayer(predict_flow6,act=None,shape=(4,4,2,2),output_shape=(batch_size,12,16,2),strides=(1,2,2,1),name='upsample6_5')
concat5 = ConcatLayer([conv5_1,deconv5,upsample_flow6to5],concat_dim=3)
n = PadLayer(concat5, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow5 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict5')
predict_flow5 = tl.layers.ElementwiseLayer([predict_flow5, UpSampling2dLayer(predict_flow6, size = (12,16),is_scale=False), UpSampling2dLayer(predict_flow6, size = (12,16),is_scale=False)], combine_fn=tf.add)
#deconv4 = DeConv2dLayer(concat5,act=lrelu,shape=(4,4,256,1026),output_shape=(batch_size ,24,32,256),strides=(1,2,2,1),name='deconv4')
deconv4 = DeConv2dLayer(concat5,act=None,shape=(4,4,256,1026),output_shape=(batch_size,24,32,256),strides=(1,2,2,1),name='deconv4')
deconv4 = BatchNormLayer(deconv4, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv4_bn')
upsample_flow5to4 = DeConv2dLayer(predict_flow5,act= None,shape=(4,4,2,2),output_shape=(batch_size ,24,32,2),strides=(1,2,2,1),name='upsample5_4')
concat4 = ConcatLayer([conv4_1,deconv4,upsample_flow5to4],concat_dim=3)
n = PadLayer(concat4, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow4 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict4')
predict_flow4 = tl.layers.ElementwiseLayer([predict_flow4, UpSampling2dLayer(predict_flow5, size = (24,32),is_scale=False), UpSampling2dLayer(predict_flow5, size = (24,32),is_scale=False)], combine_fn=tf.add)
#deconv3 = DeConv2dLayer(concat4,act=lrelu,shape=(4,4, 128,770),output_shape=(batch_size,48,64,128),strides=(1,2,2,1),name='deconv3')
deconv3 = DeConv2dLayer(concat4,act=None,shape=(4,4, 128,770),output_shape=(batch_size,48,64,128),strides=(1,2,2,1),name='deconv3')
deconv3 = BatchNormLayer(deconv3, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv3_bn')
upsample_flow4to3 = DeConv2dLayer(predict_flow4,act= None,shape=(4,4,2,2),output_shape=(batch_size,48,64,2),strides=(1,2,2,1),name='upsample4_3')
concat3 = ConcatLayer([conv3_1,deconv3,upsample_flow4to3],concat_dim=3)
n = PadLayer(concat3, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
predict_flow3 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict3')
predict_flow3 = tl.layers.ElementwiseLayer([predict_flow3,UpSampling2dLayer(predict_flow4, size = (48,64),is_scale=False), UpSampling2dLayer(predict_flow4, size = (48,64),is_scale=False)], combine_fn=tf.add)
#deconv2 = DeConv2dLayer(concat3,act=lrelu,shape=(4,4, 64,386),output_shape=(batch_size,96,128,64),strides=(1,2,2,1),name='deconv2')
deconv2 = DeConv2dLayer(concat3,act=None,shape=(4,4, 64,386),output_shape=(batch_size,96,128,64),strides=(1,2,2,1),name='deconv2')
deconv2 = BatchNormLayer(deconv2, act=lrelu, is_train = is_train, gamma_init = g_init,name='deconv2_bn')
upsample_flow3to2 = DeConv2dLayer(predict_flow3,act= None,shape=(4,4,2,2),output_shape=(batch_size,96,128,2),strides=(1,2,2,1),name='upsample3_2')
concat2 = ConcatLayer([conv2,deconv2,upsample_flow3to2],concat_dim=3)
n = PadLayer(concat2, [[0, 0], [1, 1], [1, 1], [0, 0]], "constant")
n = UpSampling2dLayer_(n, feats, method = 1, align_corners = True, name = 'upsample2_1') # added
#n = tl.layers.ElementwiseLayer([n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n], combine_fn=tf.add)
predict_flow2 = Conv2d(n, n_filter=2, filter_size=(3, 3), act=None, strides=(1, 1), W_init=w_init_relu, padding='VALID',name='predict2')
upprevlayer = UpSampling2dLayer(predict_flow3, size = (382,510),is_scale=False);
predict_flow2 = tl.layers.ElementwiseLayer([predict_flow2,upprevlayer,upprevlayer,upprevlayer,upprevlayer,upprevlayer,upprevlayer,upprevlayer,upprevlayer], combine_fn=tf.add)
#predict_flow2 = tl.layers.ElementwiseLayer([predict_flow2,upprevlayer,upprevlayer,upprevlayer,upprevlayer], combine_fn=tf.add)
flow = predict_flow2.outputs# * tf.constant(20.0)
#flow = tf.image.resize_bilinear(flow,tf.stack([384, 512]),align_corners=True)
return {'predict_flow6': predict_flow6.outputs, 'predict_flow5': predict_flow5.outputs,'predict_flow4': predict_flow4.outputs,'predict_flow3': predict_flow3.outputs,'predict_flow2': predict_flow2.outputs,'flow': flow}
def flownetS_prehomo(feats,batch_size,is_train=False, reuse=False, scope = 'dense_homo'):
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
#g_init = tf.random_normal_initializer(1., 0.02)
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.2)
customTanh = lambda x: tf.tanh(x/1000)
with tf.variable_scope(scope, reuse=reuse) as vs:
net_in = InputLayer(feats, name='input')
n = PadLayer(net_in, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad1_1')
n = Conv2d(n, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv1_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad1_2')
n = Conv2d(n, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv1_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad1_3')
n = Conv2d(n, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv1_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_3')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad2_1')
n = Conv2d(n, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv2_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad2_2')
n = Conv2d(n, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv2_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad2_3')
n = Conv2d(n, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv2_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_3')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad3_1')
n = Conv2d(n, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv3_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad3_2')
n = Conv2d(n, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv3_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad3_3')
n = Conv2d(n, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv3_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_3')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad4_1')
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv4_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad4_2')
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv4_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad4_3')
n = Conv2d(n, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv4_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn4_3')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4')
n = ReshapeLayer(n,[batch_size,-1],name='reshape')
n = DenseLayer(n , n_units = 256, act = lrelu, name='df/dense1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='df/b1')
n = DenseLayer(n, n_units = 128, act = lrelu, name='df/dense2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='df/b2')
n = DenseLayer(n, n_units = 8, act = None, name='df/dense3')
return n.outputs
def flownetS_variableWeight(feats,batch_size,is_train=False, reuse=False, scope = 'dense_homo'):
w_init_relu = tf.contrib.layers.variance_scaling_initializer()
w_init_sigmoid = tf.contrib.layers.xavier_initializer()
#g_init = tf.random_normal_initializer(1., 0.02)
g_init = None
lrelu = lambda x: tl.act.lrelu(x, 0.2)
customTanh = lambda x: tf.tanh(x/1000)
with tf.variable_scope(scope, reuse=reuse) as vs:
net_in = InputLayer(feats, name='input')
n = PadLayer(net_in, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad1_1')
n = Conv2d(n, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv1_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad1_2')
n = Conv2d(n, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv1_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad1_3')
n = Conv2d(n, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv1_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn1_3')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad2_1')
n = Conv2d(n, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv2_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad2_2')
n = Conv2d(n, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv2_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad2_3')
n = Conv2d(n, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv2_3')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn2_3')
n = MaxPool2d(n, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad3_1')
n = Conv2d(n, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv3_1')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_1')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad3_2')
n = Conv2d(n, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init_relu, padding='VALID', name='conv3_2')
n = BatchNormLayer(n, act=lrelu, is_train = is_train, gamma_init = g_init, name='bn3_2')
n = PadLayer(n, [[0, 0], [1, 1], [1, 1], [0, 0]], "Symmetric", name='pad3_3')