-
Notifications
You must be signed in to change notification settings - Fork 42
/
GAN_Nets.py
148 lines (118 loc) · 7.55 KB
/
GAN_Nets.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 15 12:41:47 2017
@author: Pavitrakumar
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
from sklearn.utils import shuffle
import time
import cv2
import tqdm
from PIL import Image
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers.core import Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D
from keras.layers.core import Flatten, Dropout
from keras.layers import Input, merge
from keras.layers.pooling import MaxPooling2D
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.models import Model
from keras.optimizers import SGD, Adam, RMSprop
from keras.layers.advanced_activations import LeakyReLU
import matplotlib.pyplot as plt
from misc_layers import MinibatchDiscrimination, SubPixelUpscaling, CustomLRELU, bilinear2x
from keras_contrib.layers.convolutional import SubPixelUpscaling
from keras.datasets import mnist
import keras.backend as K
from keras.initializers import RandomNormal
K.set_image_dim_ordering('tf')
#import keras.backend as K
#K.set_learning_phase(1)
#we would have BatchNormalization layers on all but the generator output and discriminator input layers
np.random.seed(42)
def get_gen_normal(noise_shape):
noise_shape = noise_shape
"""
Changing padding = 'same' in the first layer makes a lot fo difference!!!!
"""
#kernel_init = RandomNormal(mean=0.0, stddev=0.01)
kernel_init = 'glorot_uniform'
gen_input = Input(shape = noise_shape) #if want to directly use with conv layer next
#gen_input = Input(shape = [noise_shape]) #if want to use with dense layer next
generator = Conv2DTranspose(filters = 512, kernel_size = (4,4), strides = (1,1), padding = "valid", data_format = "channels_last", kernel_initializer = kernel_init)(gen_input)
generator = BatchNormalization(momentum = 0.5)(generator)
generator = LeakyReLU(0.2)(generator)
#generator = bilinear2x(generator,256,kernel_size=(4,4))
#generator = UpSampling2D(size=(2, 2))(generator)
#generator = SubPixelUpscaling(scale_factor=2)(generator)
#generator = Conv2D(filters = 256, kernel_size = (4,4), strides = (1,1), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(generator)
generator = Conv2DTranspose(filters = 256, kernel_size = (4,4), strides = (2,2), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(generator)
generator = BatchNormalization(momentum = 0.5)(generator)
generator = LeakyReLU(0.2)(generator)
#generator = bilinear2x(generator,128,kernel_size=(4,4))
#generator = UpSampling2D(size=(2, 2))(generator)
#generator = SubPixelUpscaling(scale_factor=2)(generator)
#generator = Conv2D(filters = 128, kernel_size = (4,4), strides = (1,1), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(generator)
generator = Conv2DTranspose(filters = 128, kernel_size = (4,4), strides = (2,2), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(generator)
generator = BatchNormalization(momentum = 0.5)(generator)
generator = LeakyReLU(0.2)(generator)
#generator = bilinear2x(generator,64,kernel_size=(4,4))
#generator = UpSampling2D(size=(2, 2))(generator)
#generator = SubPixelUpscaling(scale_factor=2)(generator)
#generator = Conv2D(filters = 64, kernel_size = (4,4), strides = (1,1), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(generator)
generator = Conv2DTranspose(filters = 64, kernel_size = (4,4), strides = (2,2), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(generator)
generator = BatchNormalization(momentum = 0.5)(generator)
generator = LeakyReLU(0.2)(generator)
generator = Conv2D(filters = 64, kernel_size = (3,3), strides = (1,1), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(generator)
generator = BatchNormalization(momentum = 0.5)(generator)
generator = LeakyReLU(0.2)(generator)
#generator = bilinear2x(generator,3,kernel_size=(3,3))
#generator = UpSampling2D(size=(2, 2))(generator)
#generator = SubPixelUpscaling(scale_factor=2)(generator)
#generator = Conv2D(filters = 3, kernel_size = (4,4), strides = (1,1), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(generator)
generator = Conv2DTranspose(filters = 3, kernel_size = (4,4), strides = (2,2), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(generator)
generator = Activation('tanh')(generator)
gen_opt = Adam(lr=0.00015, beta_1=0.5)
generator_model = Model(input = gen_input, output = generator)
generator_model.compile(loss='binary_crossentropy', optimizer=gen_opt, metrics=['accuracy'])
generator_model.summary()
return generator_model
#------------------------------------------------------------------------------------------
def get_disc_normal(image_shape=(64,64,3)):
image_shape = image_shape
dropout_prob = 0.4
#kernel_init = RandomNormal(mean=0.0, stddev=0.01)
kernel_init = 'glorot_uniform'
dis_input = Input(shape = image_shape)
discriminator = Conv2D(filters = 64, kernel_size = (4,4), strides = (2,2), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(dis_input)
discriminator = LeakyReLU(0.2)(discriminator)
#discriminator = MaxPooling2D(pool_size=(2, 2))(discriminator)
#discriminator = Dropout(dropout_prob)(discriminator)
discriminator = Conv2D(filters = 128, kernel_size = (4,4), strides = (2,2), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(discriminator)
discriminator = BatchNormalization(momentum = 0.5)(discriminator)
discriminator = LeakyReLU(0.2)(discriminator)
#discriminator = MaxPooling2D(pool_size=(2, 2))(discriminator)
#discriminator = Dropout(dropout_prob)(discriminator)
discriminator = Conv2D(filters = 256, kernel_size = (4,4), strides = (2,2), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(discriminator)
discriminator = BatchNormalization(momentum = 0.5)(discriminator)
discriminator = LeakyReLU(0.2)(discriminator)
#discriminator = MaxPooling2D(pool_size=(2, 2))(discriminator)
#discriminator = Dropout(dropout_prob)(discriminator)
discriminator = Conv2D(filters = 512, kernel_size = (4,4), strides = (2,2), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(discriminator)
discriminator = BatchNormalization(momentum = 0.5)(discriminator)
discriminator = LeakyReLU(0.2)(discriminator)
#discriminator = MaxPooling2D(pool_size=(2, 2))(discriminator)
discriminator = Flatten()(discriminator)
#discriminator = MinibatchDiscrimination(100,5)(discriminator)
discriminator = Dense(1)(discriminator)
discriminator = Activation('sigmoid')(discriminator)
dis_opt = Adam(lr=0.0002, beta_1=0.5)
discriminator_model = Model(input = dis_input, output = discriminator)
discriminator_model.compile(loss='binary_crossentropy', optimizer=dis_opt, metrics=['accuracy'])
discriminator_model.summary()
return discriminator_model
#------------------------------------------------------------------------------------------