-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathnn_gpu_new.py
112 lines (88 loc) · 3.14 KB
/
nn_gpu_new.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
# -*- coding: utf-8 -*-
# Chainerの新しい書き方に対応したバージョン
import time
from chainer import Chain, Variable, optimizers, cuda
import chainer.links as L
import chainer.functions as F
from chainer.cuda import cupy
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.externals import joblib
import numpy as np
# 学習データとテストデータに分ける
data_train, data_test, label_train, label_test = joblib.load("mnist")
data_train = np.asarray(data_train, np.float32)
data_test = np.asarray(data_test, np.float32)
label_train = np.asarray(label_train, np.int32)
label_test = np.asarray(label_test, np.int32)
# ネットワークのモデル
class MyNN(Chain):
# 層のパラメータ
def __init__(self):
super(MyNN, self).__init__(
l1=L.Linear(784, 200),
l2=L.Linear(200, 100),
l3=L.Linear(100, 10))
self.is_train = True
# 伝播のさせかた
def __call__(self, x):
h1 = F.dropout(F.relu(self.l1(x)), train=self.is_train)
h2 = F.dropout(F.relu(self.l2(h1)), train=self.is_train)
p = self.l3(h2)
return p
network = MyNN()
model = L.Classifier(network)
model.compute_accuracy = True
# GPUを使う場合
model.to_gpu()
xp = cupy
# 学習のさせかた
optimizer = optimizers.Adam()
optimizer.setup(model)
# 学習
n_epoch = 100 # 学習繰り返し回数
batchsize = 20 # 学習データの分割サイズ
N = len(data_train)
losses = [] # 各回での誤差の変化を記録するための配列
start = time.time() # 処理時間の計測開始
for epoch in range(n_epoch):
print('epoch: %d' % (epoch+1))
perm = np.random.permutation(N) # 分割をランダムにするための並べ替え
sum_accuracy = 0
sum_loss = 0
for i in range(0, N, batchsize):
# 並べ替えた i〜i+batchsize 番目までのデータを使って学習
x_batch = data_train[perm[i:i+batchsize]]
t_batch = label_train[perm[i:i+batchsize]]
# 初期化
optimizer.zero_grads()
# 順伝播
x = Variable(xp.asarray(x_batch))
t = Variable(xp.asarray(t_batch))
loss = model(x, t)
# 誤差逆伝播
loss.backward()
# 認識率の計算(表示用)
accuracy = model.accuracy
# パラメータ更新
optimizer.update()
sum_loss += float(loss.data) * batchsize
sum_accuracy += float(accuracy.data) * batchsize
losses.append(sum_loss / N)
print("loss: %f, accuracy: %f" % (sum_loss / N, sum_accuracy / N))
training_time = time.time() - start
joblib.dump((model, training_time, losses), "classifiers/"+"nn_cpu")
# 評価
start = time.time()
x_test = Variable(xp.asarray(data_test))
network.is_train = False
result_scores = cuda.to_cpu(network(x_test).data)
predict_time = time.time() - start
results = np.argmax(result_scores, axis=1)
# %%
# 認識率を計算
score = accuracy_score(label_test, results)
print(training_time, predict_time)
print(score)
cmatrix = confusion_matrix(label_test, results)
print(cmatrix)
joblib.dump((training_time, predict_time, score, cmatrix), "results/"+"nn_cpu")