Skip to content

Commit d7a94a1

Browse files
committed
add a framework of bp neural network and delete the old one
1 parent a03b2ea commit d7a94a1

File tree

2 files changed

+190
-152
lines changed

2 files changed

+190
-152
lines changed

Neural_Network/bpnn.py

+190
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,190 @@
1+
'''
2+
3+
A Framework of Back Propagation Neural Network(BP) model
4+
5+
Easy to use:
6+
* add many layers as you want !!!
7+
* clearly see how the loss decreasing
8+
Easy to expand:
9+
* more activation functions
10+
* more loss functions
11+
* more optimization method
12+
13+
Author: Stephen Lee
14+
Github : https://github.com/RiptideBo
15+
Date: 2017.11.23
16+
17+
'''
18+
19+
import numpy as np
20+
import matplotlib.pyplot as plt
21+
22+
23+
def sigmoid(x):
24+
return 1 / (1 + np.exp(-1 * x))
25+
26+
class DenseLayer():
27+
'''
28+
Layers of BP neural network
29+
'''
30+
def __init__(self,units,activation=None,learning_rate=None,is_input_layer=False):
31+
'''
32+
common connected layer of bp network
33+
:param units: numbers of neural units
34+
:param activation: activation function
35+
:param learning_rate: learning rate for paras
36+
:param is_input_layer: whether it is input layer or not
37+
'''
38+
self.units = units
39+
self.weight = None
40+
self.bias = None
41+
self.activation = activation
42+
if learning_rate is None:
43+
learning_rate = 0.3
44+
self.learn_rate = learning_rate
45+
self.is_input_layer = is_input_layer
46+
47+
def initializer(self,back_units):
48+
self.weight = np.asmatrix(np.random.normal(0,0.5,(self.units,back_units)))
49+
self.bias = np.asmatrix(np.random.normal(0,0.5,self.units)).T
50+
if self.activation is None:
51+
self.activation = sigmoid
52+
53+
def cal_gradient(self):
54+
if self.activation == sigmoid:
55+
gradient_mat = np.dot(self.output ,(1- self.output).T)
56+
gradient_activation = np.diag(np.diag(gradient_mat))
57+
else:
58+
gradient_activation = 1
59+
return gradient_activation
60+
61+
def forward_propagation(self,xdata):
62+
self.xdata = xdata
63+
if self.is_input_layer:
64+
# input layer
65+
self.wx_plus_b = xdata
66+
self.output = xdata
67+
return xdata
68+
else:
69+
self.wx_plus_b = np.dot(self.weight,self.xdata) - self.bias
70+
self.output = self.activation(self.wx_plus_b)
71+
return self.output
72+
73+
def back_propagation(self,gradient):
74+
75+
gradient_activation = self.cal_gradient() # i * i 维
76+
gradient = np.asmatrix(np.dot(gradient.T,gradient_activation))
77+
78+
self._gradient_weight = np.asmatrix(self.xdata)
79+
self._gradient_bias = -1
80+
self._gradient_x = self.weight
81+
82+
self.gradient_weight = np.dot(gradient.T,self._gradient_weight.T)
83+
self.gradient_bias = gradient * self._gradient_bias
84+
self.gradient = np.dot(gradient,self._gradient_x).T
85+
# ----------------------upgrade
86+
# -----------the Negative gradient direction --------
87+
self.weight = self.weight - self.learn_rate * self.gradient_weight
88+
self.bias = self.bias - self.learn_rate * self.gradient_bias.T
89+
90+
return self.gradient
91+
92+
93+
class BPNN():
94+
'''
95+
Back Propagation Neural Network model
96+
'''
97+
def __init__(self):
98+
self.layers = []
99+
self.train_mse = []
100+
self.fig_loss = plt.figure()
101+
self.ax_loss = self.fig_loss.add_subplot(1,1,1)
102+
103+
def add_layer(self,layer):
104+
self.layers.append(layer)
105+
106+
def build(self):
107+
for i,layer in enumerate(self.layers[:]):
108+
if i < 1:
109+
layer.is_input_layer = True
110+
else:
111+
layer.initializer(self.layers[i-1].units)
112+
113+
def summary(self):
114+
for i,layer in enumerate(self.layers[:]):
115+
print('------- layer %d -------'%i)
116+
print('weight.shape ',np.shape(layer.weight))
117+
print('bias.shape ',np.shape(layer.bias))
118+
119+
def train(self,xdata,ydata,train_round,accuracy):
120+
self.train_round = train_round
121+
self.accuracy = accuracy
122+
123+
self.ax_loss.hlines(self.accuracy, 0, self.train_round * 1.1)
124+
125+
x_shape = np.shape(xdata)
126+
for round_i in range(train_round):
127+
all_loss = 0
128+
for row in range(x_shape[0]):
129+
_xdata = np.asmatrix(xdata[row,:]).T
130+
_ydata = np.asmatrix(ydata[row,:]).T
131+
132+
# forward propagation
133+
for layer in self.layers:
134+
_xdata = layer.forward_propagation(_xdata)
135+
136+
loss, gradient = self.cal_loss(_ydata, _xdata)
137+
all_loss = all_loss + loss
138+
139+
# back propagation
140+
# the input_layer does not upgrade
141+
for layer in self.layers[:0:-1]:
142+
gradient = layer.back_propagation(gradient)
143+
144+
mse = all_loss/x_shape[0]
145+
self.train_mse.append(mse)
146+
147+
self.plot_loss()
148+
149+
if mse < self.accuracy:
150+
print('----达到精度----')
151+
return mse
152+
153+
def cal_loss(self,ydata,ydata_):
154+
self.loss = np.sum(np.power((ydata - ydata_),2))
155+
self.loss_gradient = 2 * (ydata_ - ydata)
156+
# vector (shape is the same as _ydata.shape)
157+
return self.loss,self.loss_gradient
158+
159+
def plot_loss(self):
160+
if self.ax_loss.lines:
161+
self.ax_loss.lines.remove(self.ax_loss.lines[0])
162+
self.ax_loss.plot(self.train_mse, 'r-')
163+
plt.ion()
164+
plt.show()
165+
plt.pause(0.1)
166+
167+
168+
169+
170+
def example():
171+
172+
x = np.random.randn(10,10)
173+
y = np.asarray([[0.8,0.4],[0.4,0.3],[0.34,0.45],[0.67,0.32],
174+
[0.88,0.67],[0.78,0.77],[0.55,0.66],[0.55,0.43],[0.54,0.1],
175+
[0.1,0.5]])
176+
177+
model = BPNN()
178+
model.add_layer(DenseLayer(10))
179+
model.add_layer(DenseLayer(20))
180+
model.add_layer(DenseLayer(30))
181+
model.add_layer(DenseLayer(2))
182+
183+
model.build()
184+
185+
model.summary()
186+
187+
model.train(xdata=x,ydata=y,train_round=100,accuracy=0.01)
188+
189+
if __name__ == '__main__':
190+
example()

Neural_Network/neuralnetwork_bp3.py

-152
This file was deleted.

0 commit comments

Comments
 (0)