-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathminiflow.py
263 lines (222 loc) · 8.25 KB
/
miniflow.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
import numpy as np
class Layer:
"""
Base class for layers in the network.
Arguments:
`inbound_layers`: A list of layers with edges into this layer.
"""
def __init__(self, inbound_layers=[]):
"""
Layer's constructor (runs when the object is instantiated). Sets
properties that all layers need.
"""
# A list of layers with edges into this layer.
self.inbound_layers = inbound_layers
# The eventual value of this layer. Set by running
# the forward() method.
self.value = None
# A list of layers that this layer outputs to.
self.outbound_layers = []
# New property! Keys are the inputs to this layer and
# their values are the partials of this layer with
# respect to that input.
self.gradients = {}
# Sets this layer as an outbound layer for all of
# this layer's inputs.
for layer in inbound_layers:
layer.outbound_layers.append(self)
def forward():
"""
Every layer that uses this class as a base class will
need to define its own `forward` method.
"""
raise NotImplementedError
def backward():
"""
Every layer that uses this class as a base class will
need to define its own `backward` method.
"""
raise NotImplementedError
class Input(Layer):
"""
A generic input into the network.
"""
def __init__(self):
# The base class constructor has to run to set all
# the properties here.
#
# The most important property on an Input is value.
# self.value is set during `topological_sort` later.
Layer.__init__(self)
def forward(self):
# Do nothing because nothing is calculated.
pass
def backward(self):
# An Input layer has no inputs so the gradient (derivative)
# is zero.
# The key, `self`, is reference to this object.
self.gradients = {self: 0}
# Weights and bias may be inputs, so you need to sum
# the gradient from output gradients.
for n in self.outbound_layers:
self.gradients[self] += n.gradients[self]
class Linear(Layer):
"""
Represents a layer that performs a linear transform.
"""
def __init__(self, X, W, b):
# The base class (Layer) constructor. Weights and bias
# are treated like inbound layers.
Layer.__init__(self, [X, W, b])
def forward(self):
"""
Performs the math behind a linear transform.
"""
X = self.inbound_layers[0].value
W = self.inbound_layers[1].value
b = self.inbound_layers[2].value
self.value = np.dot(X, W) + b
def backward(self):
"""
Calculates the gradient based on the output values.
"""
# Initialize a partial for each of the inbound_layers.
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_layers}
# Cycle through the outputs. The gradient will change depending
# on each output, so the gradients are summed over all outputs.
for n in self.outbound_layers:
# Get the partial of the cost with respect to this layer.
grad_cost = n.gradients[self]
# Set the partial of the loss with respect to this layer's inputs.
self.gradients[self.inbound_layers[0]] += np.dot(grad_cost, self.inbound_layers[1].value.T)
# Set the partial of the loss with respect to this layer's weights.
self.gradients[self.inbound_layers[1]] += np.dot(self.inbound_layers[0].value.T, grad_cost)
# Set the partial of the loss with respect to this layer's bias.
self.gradients[self.inbound_layers[2]] += np.sum(grad_cost, axis=0, keepdims=False)
class Sigmoid(Layer):
"""
Represents a layer that performs the sigmoid activation function.
"""
def __init__(self, layer):
# The base class constructor.
Layer.__init__(self, [layer])
def _sigmoid(self, x):
"""
This method is separate from `forward` because it
will be used with `backward` as well.
`x`: A numpy array-like object.
"""
return 1. / (1. + np.exp(-x))
def forward(self):
"""
Perform the sigmoid function and set the value.
"""
input_value = self.inbound_layers[0].value
self.value = self._sigmoid(input_value)
def backward(self):
"""
Calculates the gradient using the derivative of
the sigmoid function.
"""
# Initialize the gradients to 0.
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_layers}
# Sum the partial with respect to the input over all the outputs.
for n in self.outbound_layers:
grad_cost = n.gradients[self]
sigmoid = self.value
self.gradients[self.inbound_layers[0]] += sigmoid * (1 - sigmoid) * grad_cost
class MSE(Layer):
def __init__(self, y, a):
"""
The mean squared error cost function.
Should be used as the last layer for a network.
"""
# Call the base class' constructor.
Layer.__init__(self, [y, a])
def forward(self):
"""
Calculates the mean squared error.
"""
# NOTE: We reshape these to avoid possible matrix/vector broadcast
# errors.
#
# For example, if we subtract an array of shape (3,) from an array of shape
# (3,1) we get an array of shape(3,3) as the result when we want
# an array of shape (3,1) instead.
#
# Making both arrays (3,1) insures the result is (3,1) and does
# an elementwise subtraction as expected.
y = self.inbound_layers[0].value.reshape(-1, 1)
a = self.inbound_layers[1].value.reshape(-1, 1)
self.m = self.inbound_layers[0].value.shape[0]
# Save the computed output for backward.
self.diff = y - a
self.value = np.mean(self.diff**2)
def backward(self):
"""
Calculates the gradient of the cost.
"""
self.gradients[self.inbound_layers[0]] = (2 / self.m) * self.diff
self.gradients[self.inbound_layers[1]] = (-2 / self.m) * self.diff
def topological_sort(feed_dict):
"""
Sort the layers in topological order using Kahn's Algorithm.
`feed_dict`: A dictionary where the key is a `Input` Layer and the value is the respective value feed to that Layer.
Returns a list of sorted layers.
"""
input_layers = [n for n in feed_dict.keys()]
G = {}
layers = [n for n in input_layers]
while len(layers) > 0:
n = layers.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.outbound_layers:
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
layers.append(m)
L = []
S = set(input_layers)
while len(S) > 0:
n = S.pop()
if isinstance(n, Input):
n.value = feed_dict[n]
L.append(n)
for m in n.outbound_layers:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return L
def forward_and_backward(graph):
"""
Performs a forward pass and a backward pass through a list of sorted Layers.
Arguments:
`graph`: The result of calling `topological_sort`.
"""
# Forward pass
for n in graph:
n.forward()
# Backward pass
# see: https://docs.python.org/2.3/whatsnew/section-slices.html
for n in graph[::-1]:
n.backward()
def sgd_update(trainables, learning_rate=1e-2):
"""
Updates the value of each trainable with SGD.
Arguments:
`trainables`: A list of `Input` Layers representing weights/biases.
`learning_rate`: The learning rate.
"""
# Performs SGD
#
# Loop over the trainables
for t in trainables:
# Change the trainable's value by subtracting the learning rate
# multiplied by the partial of the cost with respect to this
# trainable.
partial = t.gradients[t]
t.value -= learning_rate * partial