-
Notifications
You must be signed in to change notification settings - Fork 14.9k
/
Copy pathlogistic_regression_eager_api.py
105 lines (80 loc) · 3.02 KB
/
logistic_regression_eager_api.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
''' Logistic Regression with Eager API.
A logistic regression learning algorithm example using TensorFlow's Eager API.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import absolute_import, division, print_function
import tensorflow as tf
# Set Eager API
tf.enable_eager_execution()
tfe = tf.contrib.eager
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
# Parameters
learning_rate = 0.1
batch_size = 128
num_steps = 1000
display_step = 100
dataset = tf.data.Dataset.from_tensor_slices(
(mnist.train.images, mnist.train.labels))
dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)
dataset_iter = tfe.Iterator(dataset)
# Variables
W = tfe.Variable(tf.zeros([784, 10]), name='weights')
b = tfe.Variable(tf.zeros([10]), name='bias')
# Logistic regression (Wx + b)
def logistic_regression(inputs):
return tf.matmul(inputs, W) + b
# Cross-Entropy loss function
def loss_fn(inference_fn, inputs, labels):
# Using sparse_softmax cross entropy
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=inference_fn(inputs), labels=labels))
# Calculate accuracy
def accuracy_fn(inference_fn, inputs, labels):
prediction = tf.nn.softmax(inference_fn(inputs))
correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# SGD Optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# Compute gradients
grad = tfe.implicit_gradients(loss_fn)
# Training
average_loss = 0.
average_acc = 0.
for step in range(num_steps):
# Iterate through the dataset
d = dataset_iter.next()
# Images
x_batch = d[0]
# Labels
y_batch = tf.cast(d[1], dtype=tf.int64)
# Compute the batch loss
batch_loss = loss_fn(logistic_regression, x_batch, y_batch)
average_loss += batch_loss
# Compute the batch accuracy
batch_accuracy = accuracy_fn(logistic_regression, x_batch, y_batch)
average_acc += batch_accuracy
if step == 0:
# Display the initial cost, before optimizing
print("Initial loss= {:.9f}".format(average_loss))
# Update the variables following gradients info
optimizer.apply_gradients(grad(logistic_regression, x_batch, y_batch))
# Display info
if (step + 1) % display_step == 0 or step == 0:
if step > 0:
average_loss /= display_step
average_acc /= display_step
print("Step:", '%04d' % (step + 1), " loss=",
"{:.9f}".format(average_loss), " accuracy=",
"{:.4f}".format(average_acc))
average_loss = 0.
average_acc = 0.
# Evaluate model on the test image set
testX = mnist.test.images
testY = mnist.test.labels
test_acc = accuracy_fn(logistic_regression, testX, testY)
print("Testset Accuracy: {:.4f}".format(test_acc))