Skip to content

Commit a55fa32

Browse files
committed
feat(ml): add deep learning implementations dir
- from my deep-learning-map repo
1 parent 48dcb1f commit a55fa32

20 files changed

+1461
-0
lines changed

algorithms/README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
# Algorithms
22

3+
Algorithms implemented in Python. Implementations of machine learning algorithms can be found in the directory `machine-learning`.
4+
35
Algorithms implemented:
46
- Breadth-first search `bfs.py`
57
- Depth-first search `dfs.py`
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
# Implementations
2+
3+
A collection of implementations of networks, layers and evaluation metrics in Python.
4+
5+
TODO:
6+
- Multi-layer Perceptron
7+
- CNN
8+
- RNN
9+
- LSTM
10+
- Backpropagation
11+
- RL agent
12+
- (GAN)
Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
"""
2+
Adapted from Udacity's MiniFlow.
3+
"""
4+
import numpy as np
5+
6+
7+
class Node:
8+
def __init__(self, inbound_nodes=[]):
9+
self.inbound_nodes = inbound_nodes
10+
self.value = None
11+
self.outbound_nodes = []
12+
self.gradients = {}
13+
for node in inbound_nodes:
14+
node.outbound_nodes.append(self)
15+
16+
def forward(self):
17+
"""
18+
Every node that uses this class as a base class will
19+
need to define its own 'forward' method.
20+
:return:
21+
"""
22+
raise NotImplementedError
23+
24+
def backward(self):
25+
"""
26+
Every node that uses this class as a base class will
27+
need to define its own 'forward' method.
28+
:return:
29+
"""
30+
raise NotImplementedError
31+
32+
33+
class Input(Node):
34+
"""An input to the network."""
35+
def __init__(self):
36+
Node.__init__(self)
37+
38+
def forward(self):
39+
# Do nothing because nothing is calculated
40+
pass
41+
42+
def backward(self):
43+
# TODO: (don't get this) Input node has no inputs, so gradient of node is zero
44+
# I get it if it's init at zero. but if it's got inputs, still init at zero?
45+
self.gradients = {self: 0}
46+
# Sum gradient from output nodes
47+
for n in self.outbound_nodes:
48+
self.gradients[self] += n.gradients[self]
49+
50+
51+
class Linear(Node):
52+
"""Node that performs a linear transform."""
53+
def __init__(self, X, W, b):
54+
Node.__init__(self, [X, W, b])
55+
56+
def forward(self):
57+
X = self.inbound_nodes[0].value
58+
W = self.inbound_nodes[1].value
59+
b = self.inbound_nodes[2].value
60+
61+
# output: each column is a unit, each row is an example
62+
self.value = np.dot(X, W) + b
63+
64+
def backward(self):
65+
# init partial derivative for each inbound node
66+
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}
67+
# sum gradients over all outputs
68+
for n in self.outbound_nodes:
69+
# df/dthis
70+
grad_cost = n.gradients[self]
71+
72+
# X:
73+
# take coeff (W) and mult with grad cost in a way that preserves correct dims
74+
self.gradients[self.inbound_nodes[0]] = np.dot(grad_cost, self.inbound_nodes[1].value.T)
75+
# W
76+
self.gradients[self.inbound_nodes[0]] = np.dot(self.inbound_nodes[0].value.T, grad_cost)
77+
# b
78+
self.gradients[self.inbound_nodes[2]] += np.sum(grad_cost, axis=0, keepdims=False) # sum each column, i.e. sum across examples
79+
80+
81+
class Sigmoid(Node):
82+
"""Node that performs sigmoid activation function."""
83+
def __init__(self, node):
84+
Node.__init__(self, [node])
85+
86+
def _sigmoid(self, x):
87+
"""
88+
TODO: tbh can combine with forward method
89+
:param x: numpy array-like object
90+
:return: elementwise (TODO: check?) sigmoid of x
91+
"""
92+
return 1. / (1. + np.exp(-x))
93+
94+
def forward(self):
95+
input_value = self.inbound_nodes[0].value
96+
self.value = self._sigmoid(input_value)
97+
98+
def backward(self):
99+
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}
100+
# sum gradients over all outputs
101+
for n in self.outbound_nodes:
102+
grad_cost = n.gradients[self]
103+
sigmoid = self.value
104+
self.gradients[self.inbound_nodes[0]] += grad_cost * sigmoid * (1-sigmoid)
105+
106+
class Layer:
107+
def __init__(self, units=0, W=None, activation="relu", n_inputs=0):
108+
if W is not None:
109+
self.units = len(W)
110+
self.n_inputs = len(W[0])
111+
self.W = W
112+
else:
113+
# TODO: make this more robust
114+
self.n_inputs = n_inputs
115+
self.units = units
116+
self.W = np.random.randn(self.units,self.n_inputs+1)
117+
self.W = W
118+
self.activation = activation
119+
120+
def forward_pass(self,x):
121+
prod = np.matmul(self.W,x)
122+
return relu(prod)
123+
124+
def backward_pass(self, grad):
125+
self.dW = grad.dot(x.T)
126+
self.dx = W.T.dot(grad)
127+
128+
def relu(array):
129+
return [max(0, x) for x in array]
130+
131+
132+
W = [[1,2,3],[4,5,6],[7,8,9]]
133+
x = [1,1,-1]
134+
135+
h1 = Layer(W)
136+
print(h1.forward_pass(x))
137+
138+
139+
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
import numpy as np
2+
import matplotlib.pyplot as plt
3+
4+
5+
def relu(X):
6+
return [[max(0, x) for x in example] for example in X]
7+
8+
9+
def softmax(X):
10+
# TODO: optimise
11+
probs = []
12+
for example in X:
13+
exps = [np.exp(x) for x in example]
14+
den = sum(exps)
15+
probs.append(exps/den)
16+
return probs
17+
18+
19+
def cross_entropy_loss(pred, true):
20+
if len(pred) != len(true):
21+
# TODO: rewrite exception
22+
raise Exception("Number of predictions different from number of labelled examples.")
23+
n_examples = len(pred)
24+
mean_loss = 0
25+
for i in range(n_examples):
26+
yh = pred[i]
27+
yt = true[i]
28+
loss = sum(-yt*np.log(yh))
29+
mean_loss += loss/n_examples
30+
return mean_loss
31+
32+
33+
# Generate data
34+
np.random.seed(42)
35+
num_examples = 20
36+
input_length = 3
37+
int_range = 10
38+
X = np.random.randint(int_range, size=[num_examples, input_length])
39+
y = [x[0]+x[2]**2 for x in X]
40+
y = [max(np.ceil(el/25), 4) - 1 for el in y]
41+
print(X[:5])
42+
print(y[:5])
43+
# X, y = sklearn.datasets.make_moons(200, noise=0.20)
44+
# plt.scatter(X[:,0], X[:,1], s=40, c=y)
45+
46+
# Set up an MLP with two hidden layers, 5 neurons in each layer
47+
h1_units = 5
48+
h2_units = 5
49+
output_classes = 4
50+
51+
# Initialise weights
52+
stdev = 0.001
53+
W1 = np.random.randn(input_length, h1_units) * stdev
54+
b1 = np.random.randn(h1_units) * stdev
55+
# W2 = np.random.randn(h1_units, h2_units) * stdev
56+
# b2 = np.random.randn(h2_units) * stdev
57+
Wo = np.random.randn(h1_units, output_classes) * stdev
58+
bo = np.random.randn(output_classes) * stdev
59+
60+
# Forward prop
61+
h1_mul = np.dot(X, W1) + b1
62+
print("h1 shape: ", h1.shape)
63+
# print("h1: ", h1)
64+
h1 = relu(h1_mul)
65+
# h2 = np.dot(h1, W2) + b2
66+
# print("h2: ", h2)
67+
# h2 = relu(h2)
68+
output = np.dot(h1, Wo) + bo
69+
pred = softmax(output)
70+
# print("output: ", output)
71+
print(cross_entropy_loss(pred, y))
72+
73+
74+
# Backward prop
75+
dpred = sum([-yt * 1.0 / yh for yt, yh in zip(y.ravel(), pred.ravel())])
76+
# sum_exps = sum([np.exp(sk) for sk in ...])
77+
# doutput expr: (sum_exps - np.exp(sj))*np.exp(sj)/(sum_exps**2)
78+
doutput = None # TODO: calculate, need to diff softmax
79+
dWo = doutput * h1
80+
dbo = doutput
81+
dh1 = doutput * Wo
82+
dh1_mul = dh1 * np.int64(h1 > 0)
83+
db1 = dh1_mul
84+
dW1 = X * dh1_mul
85+
dX = W1 * dh1_mul

0 commit comments

Comments
 (0)