-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathneuralnetwork.py
119 lines (97 loc) · 4.05 KB
/
neuralnetwork.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from .optimizer.gradientdescent import GradientDescent
from .layers.inputlayer import InputLayer
from .loss import MSE,MAE
from .layers.dense import Dense
def get_colorshape(arg):
''' Given a layer class, outputs a color and a shape for each layer type'''
switcher = {
'InputLayer': ('black','o'),
'Dense' : ('blue','o'),
'Conv1D' : ('lightcoral','s'),
'Conv2D' : ('red','s'),
'Conv3D' : ('maroon','s'),
'Recurrent' : ('limegreen','$\circlearrowleft$'),
'LSTM' : ('forestgreen','$\circlearrowleft$'),
'GRU' : ('darkgreen','$\circlearrowleft$'),
'Transformer' : ('mediumseagreen','$\circlearrowleft$'),
'Flatten' : ('dimgrey','$\|$'),
'Reshape' : ('dimgrey','$\|$'),
}
if arg not in switcher :
raise Exception("Layer type not yep supported for display")
return switcher.get(arg,None)
def legend_without_duplicate_labels(ax):
handles, labels = ax.get_legend_handles_labels()
unique = [(h, l) for i, (h, l) in enumerate(zip(handles, labels)) if l not in labels[:i]]
ax.legend(*zip(*unique),bbox_to_anchor=(0,0.75))
class NeuralNetwork:
'''Neural Network '''
def __init__(self,input_shape,loss=MAE()):
self.layers = [InputLayer(input_shape)]
self.loss = loss
@property
def output_layer(self):
return self.layers[-1]
def add(self,layer):
layer.plug(self.output_layer)
self.layers.append(layer)
self.loss.plug(self.output_layer)
def forward(self,X,y,weights=1):
return self.loss.forward(X,y,weights=weights)
def backprop(self,y,weights=1):
delta = self.loss.backprop(y,weights=weights)
delta_loss = np.copy(delta)
for i in range(len(self.layers)-1,0,-1):
delta = self.layers[i].backprop(delta)
return delta_loss
def get_layers_to_update(self):
return self.layers[1:]
def update(self,lr,noise_std=0):
for layer in self.layers[1:]:
layer.update(lr,noise_std=noise_std)
def fit(self,X,y,optimizer=GradientDescent(),weights = 1):
optimizer.minimize(self,X,y,weights = weights)
def predict(self,X):
return self.output_layer.forward(X)
def score(self,X,y):
y_hat = self.predict(X)
return self.loss.loss_function(y,y_hat)
def get_list_layers_todisplay(self):
return self.layers
def display(self,print_connections=True):
layers = self.get_list_layers_todisplay()
fig, ax = plt.subplots()
nu = 0
for i in range(len(layers)) :
layer_type = layers[i].__class__.__name__
color,mark = get_colorshape(layer_type)
if layer_type in ['Flatten','Reshape'] : # Layer not to display (e.g. flatten,reshape)
ax.scatter(i,0,marker=mark,c=color,s=10000)
continue
old_nu = nu
nu = layers[i].units
ax.scatter(np.ones(nu)*i,np.linspace(int(-nu/2),int(nu/2),nu),s=350,zorder=1,c=color,marker=mark,label=layer_type)
# plot lines/ connection
if i!=0 and print_connections :
for j in np.linspace(int(-old_nu/2),int(old_nu/2),old_nu):
for k in np.linspace(int(-nu/2),int(nu/2),nu):
ax.plot([last_displayed,i],[j,k],c='gray',zorder=-1)
last_displayed = i
legend_without_duplicate_labels(ax)
plt.axis('off')
plt.tight_layout()
plt.show()
def summary(self):
cols = ['Layer Type','Output Shape','Trainable Parameters']
layers = self.get_list_layers_todisplay()
summary = []
params_total = 0
for l in layers :
summary.append([l.__class__.__name__,l.output_shape,l.nparams])
params_total += l.nparams
summary = pd.DataFrame(summary,columns=cols)
print(summary.to_markdown(index=False))
print("\nTotal trainable parameters :", params_total)