-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathNN.py
98 lines (82 loc) · 3.23 KB
/
NN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import numpy as np
from backpropogation import backprop
import json
class Network:
def __init__ (self,size):
self.insize=size[0]
self.osize=size[-1]
self.sizes=size
self.num_layers=len(size)
# Needs to be tested ,Can have an alternative of Zero Intitalization
self.weights= [np.random.randn(y, x)
for x, y in zip(size[:-1], size[1:])]
self.biases= [np.random.randn(y, 1) for y in size[1:]]
def feedforward(self, a):
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
#Needs work
def hebbian(self,input,eta,forget):
out=[]
a =input
out.append(a)
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
out.append(a)
nabw=[]
print(out)
for j,i in zip(out[1:],out[:-1]):
nabw.append(np.dot(i,np.transpose(j)).transpose())
#implement forgetting factor
self.weights=[w+eta*nw for w,nw in zip(self.weights,nabw)]
#check the bias update
self.biases=[b+eta*nw for b,nw in zip(self.biases,out[1:])]
#Implement Hebbian interface
# reverse traversal
#Still needs research , Current implementation is just inverse of sigmoid
def rev(self,op):
wr=self.weights[:]
br=self.biases[:]
wr=np.flip(wr)
br=np.flip(br)
reverse=list(zip(br,wr))
for b,w in reverse:
op=op-b
op=inv_sigmoid(np.dot(np.transpose(w),op))
return op
def QS(self,data ,fqu, eta):
#implement a little deviator
for j in range(fqu):
self.update_mini_batch(data, eta)
def update_mini_batch(self, mini_batch, eta):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
self,delta_nabla_b, delta_nabla_w = backprop(self,x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/(len(mini_batch)+1))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
return (output_activations-y)
def save(self):
data = {"sizes": self.sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases]}
return data
def load(self,data):
self.weights = [np.array(w) for w in data["weights"]]
self.biases = [np.array(b) for b in data["biases"]]
def sigmoid(z):
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
return sigmoid(z)*(1-sigmoid(z))
def inv_sigmoid(z):
return sigmoid(z)
#To be left for discussion over inverse sigmoid function , Currently only using sigmoid