forked from hardmaru/cppn-gan-vae-tensorflow
-
Notifications
You must be signed in to change notification settings - Fork 1
/
ops.py
123 lines (96 loc) · 4.91 KB
/
ops.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
#from utils import *
class batch_norm(object):
"""Code modification of http://stackoverflow.com/a/33950177"""
def __init__(self, batch_size, epsilon=1e-5, momentum = 0.1, name="batch_norm"):
with tf.variable_scope(name) as scope:
self.epsilon = epsilon
self.momentum = momentum
self.batch_size = batch_size
self.ema = tf.train.ExponentialMovingAverage(decay=self.momentum)
self.name=name
def __call__(self, x, train=True):
shape = x.get_shape().as_list()
with tf.variable_scope(self.name) as scope:
self.gamma = tf.get_variable("gamma", [shape[-1]],
initializer=tf.random_normal_initializer(1., 0.02))
self.beta = tf.get_variable("beta", [shape[-1]],
initializer=tf.constant_initializer(0.))
self.mean, self.variance = tf.nn.moments(x, [0, 1, 2])
return tf.nn.batch_norm_with_global_normalization(
x, self.mean, self.variance, self.beta, self.gamma, self.epsilon,
scale_after_normalization=True)
def binary_cross_entropy_with_logits(logits, targets, name=None):
"""Computes binary cross entropy given `logits`.
For brevity, let `x = logits`, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
logits: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `logits`.
"""
eps = 1e-12
with ops.op_scope([logits, targets], name, "bce_loss") as name:
logits = ops.convert_to_tensor(logits, name="logits")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(logits * tf.log(targets + eps) +
(1. - logits) * tf.log(1. - targets + eps)))
def conv_cond_concat(x, y):
"""Concatenate conditioning vector on feature map axis."""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat(3, [x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])])
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_h, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
def fully_connected(input_, output_size, scope=None, stddev=0.1, with_bias = True):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "FC"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
result = tf.matmul(input_, matrix)
if with_bias:
bias = tf.get_variable("bias", [1, output_size],
initializer=tf.random_normal_initializer(stddev=stddev))
result += bias*tf.ones([shape[0], 1], dtype=tf.float32)
return result