Skip to content

Commit

Permalink
Remove tl.layers.initialize_global_variables(sess) (#931)
Browse files Browse the repository at this point in the history
* update sampling layers

* upadte zoom

* fix bug zoom

* typo

* fix bug affine_transform_cv2 x and y

* fix bug crop when crop size equal to image size

* fix file docs typo

* fix bug instance norm

* fix docs

* update examples , init variables

* changelog
  • Loading branch information
zsdonghao authored Jan 16, 2019
1 parent 3405b11 commit 94d20f7
Show file tree
Hide file tree
Showing 31 changed files with 40 additions and 41 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ To release a new version, please update the changelog as followed:
### Added

### Changed
- remove `tl.layers.initialize_global_variables(sess)` (PR #931)

### Dependencies Update
- nltk>=3.3,<3.4 => nltk>=3.3,<3.5 (PR #892)
Expand All @@ -87,6 +88,7 @@ To release a new version, please update the changelog as followed:
### Security

### Contributors
@zsdonghao: #931

## [1.11.1] - 2018-11-15

Expand Down
4 changes: 2 additions & 2 deletions docs/modules/layers.rst
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ All TensorLayer layers have a number of properties in common:

All TensorLayer layers have a number of methods in common:

- ``layer.print_params()`` : print network variable information in order (after ``tl.layers.initialize_global_variables(sess)``). alternatively, print all variables by ``tl.layers.print_all_variables()``.
- ``layer.print_params()`` : print network variable information in order (after ``sess.run(tf.global_variables_initializer())``). alternatively, print all variables by ``tl.layers.print_all_variables()``.
- ``layer.print_layers()`` : print network layer information in order.
- ``layer.count_params()`` : print the number of parameters in the network.

Expand Down Expand Up @@ -89,7 +89,7 @@ To count the number of parameters in a network, run ``network.count_params()``.
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-08, use_locking=False).minimize(cost, var_list = train_params)
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())
network.print_params()
network.print_layers()
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_cifar10_placeholder.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def distort_fn(x, is_train=False):
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08,
use_locking=False).minimize(cost, var_list=train_params)

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

network.print_params(False)
network.print_layers()
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_cifar10_tfrecord.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())
if resume:
print("Load existing model " + "!" * 10)
saver = tf.train.Saver()
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_mlp_dropout1.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params)

# initialize all variables in the session
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

# print network information
network.print_params()
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_mlp_dropout2.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def mlp(x, is_train=True, reuse=False):
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params)

# initialize all variables in the session
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

n_epoch = 500
batch_size = 500
Expand Down
8 changes: 4 additions & 4 deletions examples/basic_tutorials/tutorial_mnist_autoencoder_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def main_test_layers(model='relu'):
print_freq = 5
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

net.print_params()
net.print_layers()
Expand Down Expand Up @@ -179,7 +179,7 @@ def main_test_denoise_AE(model='relu'):
recon_layer1 = tl.layers.ReconLayer(net, x_recon=x, n_units=784, act=tf.nn.sigmoid, name='recon_layer1')

# ready to train
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

# print all params
print("All net Params")
Expand Down Expand Up @@ -253,7 +253,7 @@ def main_test_stacked_denoise_AE(model='relu'):
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost, var_list=train_params)

# Initialize all variables including weights, biases and the variables in train_op
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

# Pre-train
print("\nAll net Params before pre-train")
Expand Down Expand Up @@ -417,7 +417,7 @@ def main_test_cnn_layer():
train_params = net.all_params
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost, var_list=train_params)

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())
net.print_params()
net.print_layers()

Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_mnist_float16.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def model(x, is_train=True, reuse=False):
use_locking=False).minimize(cost, var_list=train_params)

# initialize all variables in the session
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

# train the network
n_epoch = 500
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_mnist_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params)

# initialize all variables in the session
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

# print network information
network.print_params()
Expand Down
2 changes: 1 addition & 1 deletion examples/database/task_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def mlp(x, is_train=True, reuse=False):
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params)

# initialize all variables in the session
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

# train the network
tl.utils.fit(
Expand Down
2 changes: 1 addition & 1 deletion examples/keras_tfslim/tutorial_keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def keras_block(x):
train_params = network.all_params
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost, var_list=train_params)

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

for epoch in range(n_epoch):
start_time = time.time()
Expand Down
2 changes: 1 addition & 1 deletion examples/keras_tfslim/tutorial_tfslim.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def slim_block(x):
train_params = network.all_params
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost, var_list=train_params)

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

for epoch in range(n_epoch):
start_time = time.time()
Expand Down
2 changes: 1 addition & 1 deletion examples/pretrained_cnn/tutorial_inceptionV3_tfslim.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def print_prob(prob):
# )
# sess = tf.InteractiveSession()
# # sess.run(tf.global_variables_initializer())
# tl.layers.initialize_global_variables(sess)
# sess.run(tf.global_variables_initializer())
# network.print_params()

## InceptionV3 / All TF-Slim nets can be merged into TensorLayer
Expand Down
2 changes: 1 addition & 1 deletion examples/pretrained_cnn/tutorial_mobilenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def mobilenet(x, is_train=True, reuse=False):
n.print_params(False)

sess = tf.InteractiveSession()
# tl.layers.initialize_global_variables(sess)
# sess.run(tf.global_variables_initializer())

if not os.path.isfile(MODEL_PATH):
raise Exception("Please download mobilenet.npz from : https://github.com/tensorlayer/pretrained-models")
Expand Down
2 changes: 1 addition & 1 deletion examples/pretrained_cnn/tutorial_vgg16.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def fc_layers(net):
# correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.float32), tf.cast(y_, tf.float32))
# acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())
net.print_params()
net.print_layers()

Expand Down
2 changes: 1 addition & 1 deletion examples/pretrained_cnn/tutorial_vgg19.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def Vgg19_simple_api(rgb):
net = Vgg19_simple_api(x)
y = net.outputs
probs = tf.nn.softmax(y, name="prob")
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

# You need to download the pre-trained model - VGG19 NPY
if not os.path.isfile(MODEL_PATH):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ def model(x_crop, y_, reuse):
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())
if resume:
print("Load existing model " + "!" * 10)
saver = tf.train.Saver()
Expand Down
2 changes: 1 addition & 1 deletion examples/quantized_net/tutorial_binarynet_mnist_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def model(x, is_train=True, reuse=False):
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params)

# initialize all variables in the session
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

net_train.print_params()
net_train.print_layers()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def model(x_crop, y_, reuse):
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())
if resume:
print("Load existing model " + "!" * 10)
saver = tf.train.Saver()
Expand Down
2 changes: 1 addition & 1 deletion examples/quantized_net/tutorial_dorefanet_mnist_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def model(x, is_train=True, reuse=False):
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params)

# initialize all variables in the session
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

net_train.print_params()
net_train.print_layers()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ def model(x_crop, y_, reuse):
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())
if resume:
print("Load existing model " + "!" * 10)
saver = tf.train.Saver()
Expand Down
2 changes: 1 addition & 1 deletion examples/quantized_net/tutorial_ternaryweight_mnist_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def model(x, is_train=True, reuse=False):
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params)

# initialize all variables in the session
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

net_train.print_params()
net_train.print_layers()
Expand Down
2 changes: 1 addition & 1 deletion examples/reinforcement_learning/tutorial_atari_pong.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def prepro(I):
train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)

with tf.Session() as sess:
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())
# if resume:
# load_params = tl.files.load_npz(name=model_file_name+'.npz')
# tl.files.assign_params(sess, load_params, network)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def work(self):
workers.append(Worker(i_name, GLOBAL_AC))

COORD = tf.train.Coordinator()
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

# start TF threading
worker_threads = []
Expand All @@ -278,7 +278,7 @@ def work(self):
# ============================= EVALUATION =============================
# env = gym.make(GAME)
# GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE)
# tl.layers.initialize_global_variables(sess)
# sess.run(tf.global_variables_initializer())
# GLOBAL_AC.load_ckpt()
# while True:
# s = env.reset()
Expand Down
2 changes: 1 addition & 1 deletion examples/reinforcement_learning/tutorial_cartpole_ac.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def learn(self, s, r, s_):
# we need a good teacher, so the teacher should learn faster than the actor
critic = Critic(sess, n_features=N_F, lr=LR_C)

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

if OUTPUT_GRAPH:
tf.summary.FileWriter("logs/", sess.graph)
Expand Down
2 changes: 1 addition & 1 deletion examples/reinforcement_learning/tutorial_frozenlake_dqn.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def to_one_hot(i, n_classes=None):
e = 0.1 # e-Greedy Exploration, the larger the more random
num_episodes = 10000
with tf.Session() as sess:
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())
for i in range(num_episodes):
## Reset environment and get first new observation
episode_time = time.time()
Expand Down
2 changes: 1 addition & 1 deletion examples/text_classification/tutorial_imdb_fasttext.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def train_test_and_save_model():
)

with tf.Session() as sess:
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

for epoch in range(N_EPOCH):
start_time = time.time()
Expand Down
4 changes: 2 additions & 2 deletions examples/text_generation/tutorial_generate_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def main_restore_embedding_layer():
emb_net = tl.layers.EmbeddingInputlayer(x, vocabulary_size, embedding_size, name='emb')

# sess.run(tf.global_variables_initializer())
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

tl.files.assign_params(sess, [load_params[0]], emb_net)

Expand Down Expand Up @@ -283,7 +283,7 @@ def loss_fn(outputs, targets, batch_size, sequence_length):
train_op = optimizer.apply_gradients(zip(grads, tvars))

# ===== Training
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

print("\nStart learning a model to generate text")
for i in range(max_max_epoch):
Expand Down
5 changes: 2 additions & 3 deletions examples/text_ptb/tutorial_ptb_lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ def inference(x, is_training, num_steps, reuse=None):
net_test, lstm1_test, lstm2_test = inference(input_data_test, is_training=False, num_steps=1, reuse=True)

# sess.run(tf.global_variables_initializer())
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

def loss_fn(outputs, targets): # , batch_size, num_steps):
# See tl.cost.cross_entropy_seq()
Expand Down Expand Up @@ -280,8 +280,7 @@ def loss_fn(outputs, targets): # , batch_size, num_steps):
optimizer = tf.train.GradientDescentOptimizer(lr)
train_op = optimizer.apply_gradients(zip(grads, tvars))

# sess.run(tf.global_variables_initializer())
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

net.print_params()
net.print_layers()
Expand Down
4 changes: 2 additions & 2 deletions examples/text_ptb/tutorial_ptb_lstm_state_is_tuple.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ def inference(x, is_training, num_steps, reuse=None):
net_test, lstm1_test, lstm2_test = inference(input_data_test, is_training=False, num_steps=1, reuse=True)

# sess.run(tf.global_variables_initializer())
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

def loss_fn(outputs, targets, batch_size):
# See tl.cost.cross_entropy_seq()
Expand Down Expand Up @@ -287,7 +287,7 @@ def loss_fn(outputs, targets, batch_size):
train_op = optimizer.apply_gradients(zip(grads, tvars))

# sess.run(tf.global_variables_initializer())
tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())

net.print_params()
net.print_layers()
Expand Down
4 changes: 1 addition & 3 deletions examples/text_word_embedding/tutorial_word2vec_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,9 +218,7 @@ def main_word2vec_basic():
# transpose_b=True, normalized_embeddings is transposed before multiplication.

# Step 5: Start training.
print()

tl.layers.initialize_global_variables(sess)
sess.run(tf.global_variables_initializer())
if resume:
print("Load existing model" + "!" * 10)
# Load from ckpt or npz file
Expand Down

0 comments on commit 94d20f7

Please sign in to comment.