pyerualjetwork 1.3.1__py3-none-any.whl → 1.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan/__init__.py +1 -1
- plan/plan.py +467 -423
- pyerualjetwork-1.3.2.dist-info/METADATA +8 -0
- pyerualjetwork-1.3.2.dist-info/RECORD +6 -0
- pyerualjetwork-1.3.1.dist-info/METADATA +0 -8
- pyerualjetwork-1.3.1.dist-info/RECORD +0 -6
- {pyerualjetwork-1.3.1.dist-info → pyerualjetwork-1.3.2.dist-info}/WHEEL +0 -0
- {pyerualjetwork-1.3.1.dist-info → pyerualjetwork-1.3.2.dist-info}/top_level.txt +0 -0
plan/plan.py
CHANGED
@@ -9,64 +9,64 @@ import seaborn as sns
|
|
9
9
|
|
10
10
|
# BUILD -----
|
11
11
|
def TrainPLAN(
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
12
|
+
x_train: List[Union[int, float]],
|
13
|
+
y_train: List[Union[int, float, str]], # At least two.. and one hot encoded
|
14
|
+
class_count: int,
|
15
|
+
layers: List[str],
|
16
|
+
neurons: List[Union[int, float]],
|
17
|
+
membran_thresholds: List[str],
|
18
|
+
membran_potentials: List[Union[int, float]],
|
19
|
+
normalizations: List[str],
|
20
|
+
activations: List[str],
|
21
|
+
visualize: str
|
22
22
|
) -> str:
|
23
23
|
|
24
24
|
infoPLAN = """
|
25
25
|
Creates and configures a PLAN model.
|
26
26
|
|
27
27
|
Args:
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
28
|
+
x_train (list[num]): List of input data.
|
29
|
+
y_train (list[num]): List of y_train. (one hot encoded)
|
30
|
+
class_count (int): Number of classes.
|
31
|
+
layers (list[str]): List of layer names. (options: 'fex' (Feature Extraction), 'cat' (Catalyser))
|
32
|
+
neurons (list[num]): List of neuron counts for each layer.
|
33
|
+
membran_thresholds (list[str]): List of membran_thresholds.
|
34
|
+
membran_potentials (list[num]): List of membran_potentials.
|
35
|
+
normalizations (List[str]): Whether normalization will be performed at indexed layers ("y" or "n").
|
36
|
+
activations (list[str]): List of activation functions.
|
37
|
+
visualize (str): visualize Training procces or not visualize ('y' or 'n')
|
38
38
|
|
39
39
|
Returns:
|
40
|
-
list([num]): (Weight matrices list, TrainPredictions list,
|
40
|
+
list([num]): (Weight matrices list, TrainPredictions list, Trainacc).
|
41
41
|
error handled ?: Process status ('e')
|
42
42
|
"""
|
43
43
|
|
44
|
-
if
|
45
|
-
print(Fore.RED + "ERROR109:
|
44
|
+
if visualize != 'y' and visualize != 'n':
|
45
|
+
print(Fore.RED + "ERROR109: visualize parameter must be 'y' or 'n'. TrainPLAN",infoPLAN)
|
46
46
|
return 'e'
|
47
47
|
|
48
48
|
|
49
|
-
LastNeuron =
|
50
|
-
if LastNeuron !=
|
49
|
+
LastNeuron = neurons[-1:][0]
|
50
|
+
if LastNeuron != class_count:
|
51
51
|
print(Fore.RED + "ERROR108: Last layer of neuron count must be equal class count. from: TrainPLAN",infoPLAN)
|
52
52
|
return 'e'
|
53
53
|
|
54
|
-
if len(
|
54
|
+
if len(normalizations) != len(membran_potentials):
|
55
55
|
|
56
|
-
print(Fore.RED + "ERROR307: Normalization list length must be equal to length of
|
56
|
+
print(Fore.RED + "ERROR307: Normalization list length must be equal to length of membran_thresholds List,membran_potentials List,layers List,neurons List. from: TrainPLAN",infoPLAN)
|
57
57
|
return 'e'
|
58
58
|
|
59
|
-
if len(
|
60
|
-
print(Fore.RED + "ERROR301:
|
59
|
+
if len(x_train) != len(y_train):
|
60
|
+
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length.",infoPLAN)
|
61
61
|
return 'e'
|
62
62
|
|
63
|
-
for i, Value in enumerate(
|
63
|
+
for i, Value in enumerate(membran_potentials):
|
64
64
|
|
65
|
-
if
|
65
|
+
if normalizations[i] != 'y' and normalizations[i] != 'n':
|
66
66
|
print(Fore.RED + "ERROR105: Normalization list must be 'y' or 'n'.",infoPLAN)
|
67
67
|
return 'e'
|
68
68
|
|
69
|
-
if
|
69
|
+
if membran_thresholds[i] == 'none':
|
70
70
|
print(Fore.MAGENTA + "WARNING102: We are advise to do not put 'none' Threshold sign. But some cases improves performance of the model from: TrainPLAN",infoPLAN + Style.RESET_ALL)
|
71
71
|
time.sleep(3)
|
72
72
|
|
@@ -74,140 +74,140 @@ def TrainPLAN(
|
|
74
74
|
print(Fore.RED + "ERROR201: MEMBRAN POTENTIALS must be numeric. from: TrainPLAN")
|
75
75
|
return 'e'
|
76
76
|
|
77
|
-
if isinstance(
|
78
|
-
print(Fore.RED + "ERROR202:
|
77
|
+
if isinstance(neurons[i], str):
|
78
|
+
print(Fore.RED + "ERROR202: neurons list must be numeric.")
|
79
79
|
return 'e'
|
80
80
|
|
81
|
-
if len(
|
81
|
+
if len(membran_thresholds) != len(membran_potentials):
|
82
82
|
print(Fore.RED + "ERROR302: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
|
83
83
|
return 'e'
|
84
84
|
|
85
|
-
if len(
|
86
|
-
print(Fore.RED + "ERROR303:
|
85
|
+
if len(layers) != len(neurons):
|
86
|
+
print(Fore.RED + "ERROR303: layers list and neurons list must same length. from: TrainPLAN",infoPLAN)
|
87
87
|
return 'e'
|
88
88
|
|
89
|
-
if len(
|
90
|
-
print(Fore.RED + "ERROR306: MEMBRAN POTENTIALS and MEMBRAN THRESHOLDS lists length must be same
|
89
|
+
if len(membran_potentials) != len(layers) or len(membran_thresholds) != len(layers):
|
90
|
+
print(Fore.RED + "ERROR306: MEMBRAN POTENTIALS and MEMBRAN THRESHOLDS lists length must be same layers list length. from: TrainPLAN",infoPLAN)
|
91
91
|
return 'e'
|
92
92
|
|
93
93
|
|
94
|
-
for Activation in
|
94
|
+
for Activation in activations:
|
95
95
|
if Activation != 'softmax' and Activation != 'sigmoid' and Activation != 'relu' and Activation != 'none':
|
96
|
-
print(Fore.RED + "ERROR108:
|
96
|
+
print(Fore.RED + "ERROR108: activations list must be 'sigmoid' or 'softmax' or 'relu' or 'none' from: TrainPLAN",infoPLAN)
|
97
97
|
return 'e'
|
98
98
|
|
99
99
|
|
100
|
-
for index, Neuron in enumerate(
|
100
|
+
for index, Neuron in enumerate(neurons):
|
101
101
|
if Neuron < 1:
|
102
|
-
print(Fore.RED + "ERROR101:
|
102
|
+
print(Fore.RED + "ERROR101: neurons list must be positive non zero integer. from: TrainPLAN",infoPLAN)
|
103
103
|
return 'e'
|
104
104
|
|
105
|
-
if index + 1 != len(
|
105
|
+
if index + 1 != len(neurons) and Neuron % 2 != 0:
|
106
106
|
print(Fore.MAGENTA + "WARNING101: We strongly advise to do Neuron counts be should even numbers. from: TrainPLAN",infoPLAN)
|
107
107
|
time.sleep(3)
|
108
108
|
|
109
|
-
if Neuron <
|
109
|
+
if Neuron < class_count:
|
110
110
|
print(Fore.RED + "ERROR102: Neuron count must be greater than class count(For PLAN). from: TrainPLAN")
|
111
111
|
return 'e'
|
112
112
|
|
113
|
-
if
|
114
|
-
print(Fore.RED + "ERROR107:
|
113
|
+
if layers[index] != 'fex' and layers[index] != 'cat':
|
114
|
+
print(Fore.RED + "ERROR107: layers list must be 'fex'(Feature Extraction Layer) or 'cat' (Catalyser Layer). from: TrainPLAN",infoPLAN)
|
115
115
|
return 'e'
|
116
116
|
|
117
|
-
if len(
|
117
|
+
if len(membran_thresholds) != len(membran_potentials):
|
118
118
|
print(Fore.RED + "ERROR305: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
|
119
119
|
return 'e'
|
120
120
|
|
121
121
|
|
122
|
-
for i, Sign in enumerate(
|
122
|
+
for i, Sign in enumerate(membran_thresholds):
|
123
123
|
if Sign != '>' and Sign != '<' and Sign != '==' and Sign != '!=' and Sign != 'none':
|
124
124
|
print(Fore.RED + "ERROR104: MEMBRAN THRESHOLDS must be '>' or '<' or '==' or '!='. or 'none' WE SUGGEST '<' FOR FEX LAYER AND '==' FOR CAT LAYER (Your data, your hyperparameter) from: TrainPLAN",infoPLAN)
|
125
125
|
return 'e'
|
126
126
|
|
127
|
-
if
|
127
|
+
if layers[i] == 'fex' and Sign == 'none':
|
128
128
|
print(Fore.RED + "ERROR109: at layer type 'fex', pairing with 'none' Threshold is not acceptlable. if you want to 'none' put '==' and make threshold value '0'. from: TrainPLAN ",infoPLAN)
|
129
129
|
return 'e'
|
130
130
|
|
131
|
-
|
132
|
-
for sublist in
|
131
|
+
Uniquey_train = set()
|
132
|
+
for sublist in y_train:
|
133
133
|
|
134
|
-
|
134
|
+
Uniquey_train.add(tuple(sublist))
|
135
135
|
|
136
136
|
|
137
|
-
|
137
|
+
Uniquey_train = list(Uniquey_train)
|
138
138
|
|
139
|
-
|
139
|
+
y_train = [tuple(sublist) for sublist in y_train]
|
140
140
|
|
141
141
|
|
142
|
-
if len(
|
142
|
+
if len(Uniquey_train) != class_count:
|
143
143
|
print(Fore.RED + "ERROR106: Label variety length must be same Class Count. from: TrainPLAN",infoPLAN)
|
144
144
|
return 'e'
|
145
145
|
|
146
|
-
|
147
|
-
|
148
|
-
|
146
|
+
x_train[0] = np.array(x_train[0])
|
147
|
+
x_train[0] = x_train[0].ravel()
|
148
|
+
x_train_size = len(x_train[0])
|
149
149
|
|
150
|
-
W = WeightIdentification(len(
|
151
|
-
Divides = SynapticDividing(
|
150
|
+
W = WeightIdentification(len(layers) - 1,class_count,neurons,x_train_size)
|
151
|
+
Divides = SynapticDividing(class_count,W)
|
152
152
|
TrainedWs = [1] * len(W)
|
153
153
|
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL,)
|
154
|
-
TrainPredictions = [None] * len(
|
154
|
+
TrainPredictions = [None] * len(y_train)
|
155
155
|
true = 0
|
156
|
-
|
157
|
-
for index, inp in enumerate(
|
158
|
-
|
156
|
+
start_time = time.time()
|
157
|
+
for index, inp in enumerate(x_train):
|
158
|
+
uni_start_time = time.time()
|
159
159
|
inp = np.array(inp)
|
160
160
|
inp = inp.ravel()
|
161
161
|
|
162
|
-
if
|
163
|
-
print(Fore.RED +"ERROR304: All input matrices or vectors in
|
162
|
+
if x_train_size != len(inp):
|
163
|
+
print(Fore.RED +"ERROR304: All input matrices or vectors in x_train list, must be same size. from: TrainPLAN",infoPLAN + Style.RESET_ALL)
|
164
164
|
return 'e'
|
165
165
|
|
166
166
|
|
167
|
-
for Ulindex, Ul in enumerate(
|
167
|
+
for Ulindex, Ul in enumerate(Uniquey_train):
|
168
168
|
|
169
|
-
if Ul ==
|
169
|
+
if Ul == y_train[index]:
|
170
170
|
for Windex, w in enumerate(W):
|
171
171
|
for i, ul in enumerate(Ul):
|
172
172
|
if ul == 1.0:
|
173
173
|
k = i
|
174
|
-
|
174
|
+
cs = Divides[int(k)][Windex][0]
|
175
175
|
|
176
|
-
W[Windex] = SynapticPruning(w,
|
176
|
+
W[Windex] = SynapticPruning(w, cs, 'row', int(k),class_count)
|
177
177
|
|
178
|
-
|
178
|
+
neural_layer = inp
|
179
179
|
|
180
|
-
for Lindex, Layer in enumerate(
|
180
|
+
for Lindex, Layer in enumerate(layers):
|
181
181
|
|
182
|
-
if
|
183
|
-
|
182
|
+
if normalizations[Lindex] == 'y':
|
183
|
+
neural_layer = Normalization(neural_layer)
|
184
184
|
|
185
|
-
if
|
186
|
-
|
187
|
-
elif
|
188
|
-
|
189
|
-
elif
|
190
|
-
|
185
|
+
if activations[Lindex] == 'relu':
|
186
|
+
neural_layer = Relu(neural_layer)
|
187
|
+
elif activations[Lindex] == 'sigmoid':
|
188
|
+
neural_layer = Sigmoid(neural_layer)
|
189
|
+
elif activations[Lindex] == 'softmax':
|
190
|
+
neural_layer = Softmax(neural_layer)
|
191
191
|
|
192
192
|
if Layer == 'fex':
|
193
|
-
|
193
|
+
neural_layer,W[Lindex] = Fex(neural_layer, W[Lindex], membran_thresholds[Lindex], membran_potentials[Lindex])
|
194
194
|
elif Layer == 'cat':
|
195
|
-
|
195
|
+
neural_layer,W[Lindex] = Cat(neural_layer, W[Lindex], membran_thresholds[Lindex], membran_potentials[Lindex],1)
|
196
196
|
|
197
|
-
RealOutput = np.argmax(
|
198
|
-
PredictedOutput = np.argmax(
|
197
|
+
RealOutput = np.argmax(y_train[index])
|
198
|
+
PredictedOutput = np.argmax(neural_layer)
|
199
199
|
if RealOutput == PredictedOutput:
|
200
200
|
true += 1
|
201
|
-
|
201
|
+
acc = true / len(y_train)
|
202
202
|
TrainPredictions[index] = PredictedOutput
|
203
203
|
|
204
|
-
if
|
204
|
+
if visualize == 'y':
|
205
205
|
|
206
|
-
|
207
|
-
|
206
|
+
y_trainVisual = np.copy(y_train)
|
207
|
+
y_trainVisual = np.argmax(y_trainVisual, axis=1)
|
208
208
|
|
209
209
|
plt.figure(figsize=(12, 6))
|
210
|
-
sns.kdeplot(
|
210
|
+
sns.kdeplot(y_trainVisual, label='Real Outputs', fill=True)
|
211
211
|
sns.kdeplot(TrainPredictions, label='Predictions', fill=True)
|
212
212
|
plt.legend()
|
213
213
|
plt.xlabel('Class')
|
@@ -215,7 +215,7 @@ def TrainPLAN(
|
|
215
215
|
plt.title('Predictions and Real Outputs for Training KDE Plot')
|
216
216
|
plt.show()
|
217
217
|
|
218
|
-
if index + 1 != len(
|
218
|
+
if index + 1 != len(x_train):
|
219
219
|
|
220
220
|
plt.close('all')
|
221
221
|
|
@@ -228,91 +228,91 @@ def TrainPLAN(
|
|
228
228
|
TrainedWs[i] = TrainedWs[i] + w
|
229
229
|
|
230
230
|
|
231
|
-
W = WeightIdentification(len(
|
231
|
+
W = WeightIdentification(len(layers) - 1,class_count,neurons,x_train_size)
|
232
232
|
|
233
233
|
|
234
|
-
|
234
|
+
uni_end_time = time.time()
|
235
235
|
|
236
|
-
|
236
|
+
calculating_est = round((uni_end_time - uni_start_time) * (len(x_train) - index),3)
|
237
237
|
|
238
|
-
if
|
239
|
-
print('\rest......(sec):',
|
240
|
-
print('\rTrain
|
238
|
+
if calculating_est < 60:
|
239
|
+
print('\rest......(sec):',calculating_est,'\n',end= "")
|
240
|
+
print('\rTrain accuracy: ' ,acc ,"\n", end="")
|
241
241
|
|
242
|
-
elif
|
243
|
-
print('\rest......(min):',
|
244
|
-
print('\rTrain
|
242
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
243
|
+
print('\rest......(min):',calculating_est/60,'\n',end= "")
|
244
|
+
print('\rTrain accuracy: ' ,acc ,"\n", end="")
|
245
245
|
|
246
|
-
elif
|
247
|
-
print('\rest......(h):',
|
248
|
-
print('\rTrain
|
246
|
+
elif calculating_est > 3600:
|
247
|
+
print('\rest......(h):',calculating_est/3600,'\n',end= "")
|
248
|
+
print('\rTrain accuracy: ' ,acc ,"\n", end="")
|
249
249
|
|
250
250
|
EndTime = time.time()
|
251
251
|
|
252
|
-
|
252
|
+
calculating_est = round(EndTime - start_time,2)
|
253
253
|
|
254
254
|
print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n")
|
255
255
|
|
256
|
-
if
|
257
|
-
print('Total training time(sec): ',
|
256
|
+
if calculating_est < 60:
|
257
|
+
print('Total training time(sec): ',calculating_est)
|
258
258
|
|
259
|
-
elif
|
260
|
-
print('Total training time(min): ',
|
259
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
260
|
+
print('Total training time(min): ',calculating_est/60)
|
261
261
|
|
262
|
-
elif
|
263
|
-
print('Total training time(h): ',
|
262
|
+
elif calculating_est > 3600:
|
263
|
+
print('Total training time(h): ',calculating_est/3600)
|
264
264
|
|
265
|
-
if
|
266
|
-
print(Fore.GREEN + '\nTotal Train
|
265
|
+
if acc > 0.8:
|
266
|
+
print(Fore.GREEN + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
|
267
267
|
|
268
|
-
elif
|
269
|
-
print(Fore.MAGENTA + '\nTotal Train
|
268
|
+
elif acc < 0.8 and acc > 0.6:
|
269
|
+
print(Fore.MAGENTA + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
|
270
270
|
|
271
|
-
elif
|
272
|
-
print(Fore.RED+ '\nTotal Train
|
271
|
+
elif acc < 0.6:
|
272
|
+
print(Fore.RED+ '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
|
273
273
|
|
274
274
|
|
275
275
|
|
276
276
|
|
277
|
-
return TrainedWs,TrainPredictions,
|
277
|
+
return TrainedWs,TrainPredictions,acc
|
278
278
|
|
279
279
|
# FUNCTIONS -----
|
280
280
|
|
281
281
|
def WeightIdentification(
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
282
|
+
layer_count, # int: Number of layers in the neural network.
|
283
|
+
class_count, # int: Number of classes in the classification task.
|
284
|
+
neurons, # list[num]: List of neuron counts for each layer.
|
285
|
+
x_train_size # int: Size of the input data.
|
286
286
|
) -> str:
|
287
287
|
"""
|
288
288
|
Identifies the weights for a neural network model.
|
289
289
|
|
290
290
|
Args:
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
291
|
+
layer_count (int): Number of layers in the neural network.
|
292
|
+
class_count (int): Number of classes in the classification task.
|
293
|
+
neurons (list[num]): List of neuron counts for each layer.
|
294
|
+
x_train_size (int): Size of the input data.
|
295
295
|
|
296
296
|
Returns:
|
297
297
|
list([numpy_arrays],[...]): Weight matices of the model. .
|
298
298
|
"""
|
299
299
|
|
300
300
|
|
301
|
-
Wlen =
|
301
|
+
Wlen = layer_count + 1
|
302
302
|
W = [None] * Wlen
|
303
|
-
W[0] = np.ones((
|
304
|
-
ws =
|
303
|
+
W[0] = np.ones((neurons[0],x_train_size))
|
304
|
+
ws = layer_count - 1
|
305
305
|
for w in range(ws):
|
306
|
-
W[w + 1] = np.ones((
|
307
|
-
W[
|
306
|
+
W[w + 1] = np.ones((neurons[w + 1],neurons[w]))
|
307
|
+
W[layer_count] = np.ones((class_count,neurons[layer_count - 1]))
|
308
308
|
return W
|
309
309
|
|
310
310
|
def SynapticPruning(
|
311
311
|
w, # list[list[num]]: Weight matrix of the neural network.
|
312
|
-
|
313
|
-
|
312
|
+
cs, # list[list[num]]: Synaptic connections between neurons.
|
313
|
+
key, # int: key for identifying synaptic connections.
|
314
314
|
Class, # int: Class label for the current training instance.
|
315
|
-
|
315
|
+
class_count # int: Total number of classes in the dataset.
|
316
316
|
|
317
317
|
) -> str:
|
318
318
|
infoPruning = """
|
@@ -320,10 +320,10 @@ def SynapticPruning(
|
|
320
320
|
|
321
321
|
Args:
|
322
322
|
w (list[list[num]]): Weight matrix of the neural network.
|
323
|
-
|
324
|
-
|
323
|
+
cs (list[list[num]]): Synaptic connections between neurons.
|
324
|
+
key (str): key for identifying synaptic row or col connections.
|
325
325
|
Class (int): Class label for the current training instance.
|
326
|
-
|
326
|
+
class_count (int): Total number of classes in the dataset.
|
327
327
|
|
328
328
|
Returns:
|
329
329
|
numpy array: Weight matrix.
|
@@ -332,55 +332,55 @@ def SynapticPruning(
|
|
332
332
|
|
333
333
|
Class += 1 # because index start 0
|
334
334
|
|
335
|
-
if Class !=
|
335
|
+
if Class != class_count and Class != 1:
|
336
336
|
|
337
|
-
|
337
|
+
ce = cs / Class
|
338
338
|
|
339
|
-
w[int(
|
339
|
+
w[int(ce)-1::-1,:] = 0
|
340
340
|
|
341
|
-
w[
|
341
|
+
w[cs:,:] = 0
|
342
342
|
|
343
343
|
|
344
344
|
else:
|
345
345
|
|
346
346
|
if Class == 1:
|
347
|
-
if
|
347
|
+
if key == 'row':
|
348
348
|
|
349
|
-
w[
|
349
|
+
w[cs:,:] = 0
|
350
350
|
|
351
|
-
elif
|
351
|
+
elif key == 'col':
|
352
352
|
|
353
|
-
w[:,
|
353
|
+
w[:,cs] = 0
|
354
354
|
|
355
355
|
else:
|
356
|
-
print(Fore.RED + "ERROR103: SynapticPruning func's
|
356
|
+
print(Fore.RED + "ERROR103: SynapticPruning func's key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning)
|
357
357
|
return 'e'
|
358
358
|
else:
|
359
|
-
if
|
359
|
+
if key == 'row':
|
360
360
|
|
361
|
-
w[
|
361
|
+
w[cs:,:] = 0
|
362
362
|
|
363
|
-
|
364
|
-
w[
|
363
|
+
ce = int(round(w.shape[0] - cs / class_count))
|
364
|
+
w[ce-1::-1,:] = 0
|
365
365
|
|
366
|
-
elif
|
366
|
+
elif key == 'col':
|
367
367
|
|
368
|
-
w[:,
|
368
|
+
w[:,cs] = 0
|
369
369
|
|
370
370
|
else:
|
371
|
-
print(Fore.RED + "ERROR103: SynapticPruning func's
|
371
|
+
print(Fore.RED + "ERROR103: SynapticPruning func's key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning + Style.RESET_ALL)
|
372
372
|
return 'e'
|
373
373
|
return w
|
374
374
|
|
375
375
|
def SynapticDividing(
|
376
|
-
|
376
|
+
class_count, # int: Total number of classes in the dataset.
|
377
377
|
W # list[list[num]]: Weight matrix of the neural network.
|
378
378
|
) -> str:
|
379
379
|
"""
|
380
380
|
Divides the synaptic weights of a neural network model based on class count.
|
381
381
|
|
382
382
|
Args:
|
383
|
-
|
383
|
+
class_count (int): Total number of classes in the dataset.
|
384
384
|
W (list[list[num]]): Weight matrix of the neural network.
|
385
385
|
|
386
386
|
Returns:
|
@@ -392,25 +392,25 @@ def SynapticDividing(
|
|
392
392
|
#print('Piece:' + Piece)
|
393
393
|
#input()
|
394
394
|
# Boş bir üç boyutlu liste oluşturma
|
395
|
-
Divides = [[[0] for _ in range(len(W))] for _ in range(
|
395
|
+
Divides = [[[0] for _ in range(len(W))] for _ in range(class_count)]
|
396
396
|
|
397
397
|
for i in range(len(W)):
|
398
398
|
|
399
399
|
|
400
|
-
Piece[i] = int(math.floor(W[i].shape[0] /
|
400
|
+
Piece[i] = int(math.floor(W[i].shape[0] / class_count))
|
401
401
|
|
402
|
-
|
402
|
+
cs = 0
|
403
403
|
# j = Classes, i = Weights, [0] = CutStart.
|
404
404
|
|
405
405
|
for i in range(len(W)):
|
406
|
-
for j in range(
|
407
|
-
|
408
|
-
Divides[j][i][0] =
|
406
|
+
for j in range(class_count):
|
407
|
+
cs = cs + Piece[i]
|
408
|
+
Divides[j][i][0] = cs
|
409
409
|
#print('Divides: ' + j + i + ' = ' + Divides[j][i][0])
|
410
410
|
#input()
|
411
411
|
|
412
412
|
j = 0
|
413
|
-
|
413
|
+
cs = 0
|
414
414
|
|
415
415
|
return Divides
|
416
416
|
|
@@ -418,8 +418,8 @@ def SynapticDividing(
|
|
418
418
|
def Fex(
|
419
419
|
Input, # list[num]: Input data.
|
420
420
|
w, # list[list[num]]: Weight matrix of the neural network.
|
421
|
-
|
422
|
-
|
421
|
+
membran_threshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
|
422
|
+
membran_potential # num: Threshold value for comparison.
|
423
423
|
) -> tuple:
|
424
424
|
"""
|
425
425
|
Applies feature extraction process to the input data using synaptic pruning.
|
@@ -427,32 +427,32 @@ def Fex(
|
|
427
427
|
Args:
|
428
428
|
Input (list[num]): Input data.
|
429
429
|
w (list[list[num]]): Weight matrix of the neural network.
|
430
|
-
|
431
|
-
|
430
|
+
membran_threshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
|
431
|
+
membran_potential (num): Threshold value for comparison.
|
432
432
|
|
433
433
|
Returns:
|
434
434
|
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
435
435
|
"""
|
436
436
|
|
437
|
-
if
|
438
|
-
PruneIndex = np.where(Input <
|
439
|
-
elif
|
440
|
-
PruneIndex = np.where(Input >
|
441
|
-
elif
|
442
|
-
PruneIndex = np.where(Input ==
|
443
|
-
elif
|
444
|
-
PruneIndex = np.where(Input !=
|
437
|
+
if membran_threshold == '<':
|
438
|
+
PruneIndex = np.where(Input < membran_potential)
|
439
|
+
elif membran_threshold == '>':
|
440
|
+
PruneIndex = np.where(Input > membran_potential)
|
441
|
+
elif membran_threshold == '==':
|
442
|
+
PruneIndex = np.where(Input == membran_potential)
|
443
|
+
elif membran_threshold == '!=':
|
444
|
+
PruneIndex = np.where(Input != membran_potential)
|
445
445
|
|
446
446
|
w = SynapticPruning(w, PruneIndex, 'col', 0, 0)
|
447
447
|
|
448
|
-
|
449
|
-
return
|
448
|
+
neural_layer = np.dot(w, Input)
|
449
|
+
return neural_layer,w
|
450
450
|
|
451
451
|
def Cat(
|
452
452
|
Input, # list[num]: Input data.
|
453
453
|
w, # list[list[num]]: Weight matrix of the neural network.
|
454
|
-
|
455
|
-
|
454
|
+
membran_threshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
|
455
|
+
membran_potential, # num: Threshold value for comparison.
|
456
456
|
isTrain # int: Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
457
457
|
) -> tuple:
|
458
458
|
"""
|
@@ -461,29 +461,29 @@ def Cat(
|
|
461
461
|
Args:
|
462
462
|
Input (list[num]): Input data.
|
463
463
|
w (list[list[num]]): Weight matrix of the neural network.
|
464
|
-
|
465
|
-
|
464
|
+
membran_threshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
|
465
|
+
membran_potential (num): Threshold value for comparison.
|
466
466
|
isTrain (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
467
467
|
|
468
468
|
Returns:
|
469
469
|
tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
|
470
470
|
"""
|
471
471
|
|
472
|
-
if
|
473
|
-
PruneIndex = np.where(Input <
|
474
|
-
elif
|
475
|
-
PruneIndex = np.where(Input >
|
476
|
-
elif
|
477
|
-
PruneIndex = np.where(Input ==
|
478
|
-
elif
|
479
|
-
PruneIndex = np.where(Input !=
|
480
|
-
if isTrain == 1 and
|
472
|
+
if membran_threshold == '<':
|
473
|
+
PruneIndex = np.where(Input < membran_potential)
|
474
|
+
elif membran_threshold == '>':
|
475
|
+
PruneIndex = np.where(Input > membran_potential)
|
476
|
+
elif membran_threshold == '==':
|
477
|
+
PruneIndex = np.where(Input == membran_potential)
|
478
|
+
elif membran_threshold == '!=':
|
479
|
+
PruneIndex = np.where(Input != membran_potential)
|
480
|
+
if isTrain == 1 and membran_threshold != 'none':
|
481
481
|
|
482
482
|
w = SynapticPruning(w, PruneIndex, 'col', 0, 0)
|
483
483
|
|
484
484
|
|
485
|
-
|
486
|
-
return
|
485
|
+
neural_layer = np.dot(w, Input)
|
486
|
+
return neural_layer,w
|
487
487
|
|
488
488
|
|
489
489
|
def Normalization(
|
@@ -558,27 +558,27 @@ def Relu(
|
|
558
558
|
|
559
559
|
|
560
560
|
def TestPLAN(
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
561
|
+
x_test, # list[list[num]]: Test input data.
|
562
|
+
y_test, # list[num]: Test labels.
|
563
|
+
layers, # list[str]: List of layer names.
|
564
|
+
membran_thresholds, # list[str]: List of MEMBRAN THRESHOLDS for each layer.
|
565
|
+
membran_potentials, # list[num]: List of MEMBRAN POTENTIALS for each layer.
|
566
|
+
normalizations, # str: Whether normalization will be performed ("y" or "n").
|
567
|
+
activations, # str: Activation function list for the neural network.
|
568
|
+
visualize, # visualize Testing procces or not visualize ('y' or 'n')
|
569
569
|
W # list[list[num]]: Weight matrix of the neural network.
|
570
570
|
) -> tuple:
|
571
571
|
infoTestModel = """
|
572
572
|
Tests the neural network model with the given test data.
|
573
573
|
|
574
574
|
Args:
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
575
|
+
x_test (list[list[num]]): Test input data.
|
576
|
+
y_test (list[num]): Test labels.
|
577
|
+
layers (list[str]): List of layer names.
|
578
|
+
membran_thresholds (list[str]): List of MEMBRAN THRESHOLDS for each layer.
|
579
|
+
membran_potentials (list[num]): List of MEMBRAN POTENTIALS for each layer.
|
580
|
+
normalizatios list([str]): Whether normalization will be performed ("yes" or "no").
|
581
|
+
activations (list[str]): Activation function for the neural network.
|
582
582
|
W (list[list[num]]): Weight matrix of the neural network.
|
583
583
|
|
584
584
|
Returns:
|
@@ -589,49 +589,49 @@ def TestPLAN(
|
|
589
589
|
try:
|
590
590
|
Wc = [0] * len(W)
|
591
591
|
true = 0
|
592
|
-
TestPredictions = [None] * len(
|
592
|
+
TestPredictions = [None] * len(y_test)
|
593
593
|
for i, w in enumerate(W):
|
594
594
|
Wc[i] = np.copy(w)
|
595
595
|
print('\rCopying weights.....',i+1,'/',len(W),end = "")
|
596
596
|
|
597
597
|
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
598
|
-
|
599
|
-
for inpIndex,Input in enumerate(
|
598
|
+
start_time = time.time()
|
599
|
+
for inpIndex,Input in enumerate(x_test):
|
600
600
|
Input = np.array(Input)
|
601
601
|
Input = Input.ravel()
|
602
|
-
|
603
|
-
|
602
|
+
uni_start_time = time.time()
|
603
|
+
neural_layer = Input
|
604
604
|
|
605
|
-
for index, Layer in enumerate(
|
606
|
-
if
|
607
|
-
|
608
|
-
if
|
609
|
-
|
610
|
-
elif
|
611
|
-
|
612
|
-
elif
|
613
|
-
|
605
|
+
for index, Layer in enumerate(layers):
|
606
|
+
if normalizations[index] == 'y':
|
607
|
+
neural_layer = Normalization(neural_layer)
|
608
|
+
if activations[index] == 'relu':
|
609
|
+
neural_layer = Relu(neural_layer)
|
610
|
+
elif activations[index] == 'sigmoid':
|
611
|
+
neural_layer = Sigmoid(neural_layer)
|
612
|
+
elif activations[index] == 'softmax':
|
613
|
+
neural_layer = Softmax(neural_layer)
|
614
614
|
|
615
|
-
if
|
616
|
-
|
617
|
-
if
|
618
|
-
|
615
|
+
if layers[index] == 'fex':
|
616
|
+
neural_layer,useless = Fex(neural_layer, W[index], membran_thresholds[index], membran_potentials[index])
|
617
|
+
if layers[index] == 'cat':
|
618
|
+
neural_layer,useless = Cat(neural_layer, W[index], membran_thresholds[index], membran_potentials[index],0)
|
619
619
|
for i, w in enumerate(Wc):
|
620
620
|
W[i] = np.copy(w)
|
621
|
-
RealOutput = np.argmax(
|
622
|
-
PredictedOutput = np.argmax(
|
621
|
+
RealOutput = np.argmax(y_test[inpIndex])
|
622
|
+
PredictedOutput = np.argmax(neural_layer)
|
623
623
|
if RealOutput == PredictedOutput:
|
624
624
|
true += 1
|
625
|
-
|
625
|
+
acc = true / len(y_test)
|
626
626
|
TestPredictions[inpIndex] = PredictedOutput
|
627
627
|
|
628
|
-
if
|
628
|
+
if visualize == 'y':
|
629
629
|
|
630
|
-
|
631
|
-
|
630
|
+
y_testVisual = np.copy(y_test)
|
631
|
+
y_testVisual = np.argmax(y_testVisual, axis=1)
|
632
632
|
|
633
633
|
plt.figure(figsize=(12, 6))
|
634
|
-
sns.kdeplot(
|
634
|
+
sns.kdeplot(y_testVisual, label='Real Outputs', fill=True)
|
635
635
|
sns.kdeplot(TestPredictions, label='Predictions', fill=True)
|
636
636
|
plt.legend()
|
637
637
|
plt.xlabel('Class')
|
@@ -639,72 +639,72 @@ def TestPLAN(
|
|
639
639
|
plt.title('Predictions and Real Outputs for Testing KDE Plot')
|
640
640
|
plt.show()
|
641
641
|
|
642
|
-
if inpIndex + 1 != len(
|
642
|
+
if inpIndex + 1 != len(x_test):
|
643
643
|
|
644
644
|
plt.close('all')
|
645
645
|
|
646
|
-
|
646
|
+
uni_end_time = time.time()
|
647
647
|
|
648
|
-
|
648
|
+
calculating_est = round((uni_end_time - uni_start_time) * (len(x_test) - inpIndex),3)
|
649
649
|
|
650
|
-
if
|
651
|
-
print('\rest......(sec):',
|
652
|
-
print('\rTest
|
650
|
+
if calculating_est < 60:
|
651
|
+
print('\rest......(sec):',calculating_est,'\n',end= "")
|
652
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
653
653
|
|
654
|
-
elif
|
655
|
-
print('\rest......(min):',
|
656
|
-
print('\rTest
|
654
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
655
|
+
print('\rest......(min):',calculating_est/60,'\n',end= "")
|
656
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
657
657
|
|
658
|
-
elif
|
659
|
-
print('\rest......(h):',
|
660
|
-
print('\rTest
|
658
|
+
elif calculating_est > 3600:
|
659
|
+
print('\rest......(h):',calculating_est/3600,'\n',end= "")
|
660
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
661
661
|
|
662
662
|
EndTime = time.time()
|
663
663
|
for i, w in enumerate(Wc):
|
664
664
|
W[i] = np.copy(w)
|
665
665
|
|
666
|
-
|
666
|
+
calculating_est = round(EndTime - start_time,2)
|
667
667
|
|
668
668
|
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
669
669
|
|
670
|
-
if
|
671
|
-
print('Total testing time(sec): ',
|
670
|
+
if calculating_est < 60:
|
671
|
+
print('Total testing time(sec): ',calculating_est)
|
672
672
|
|
673
|
-
elif
|
674
|
-
print('Total testing time(min): ',
|
673
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
674
|
+
print('Total testing time(min): ',calculating_est/60)
|
675
675
|
|
676
|
-
elif
|
677
|
-
print('Total testing time(h): ',
|
676
|
+
elif calculating_est > 3600:
|
677
|
+
print('Total testing time(h): ',calculating_est/3600)
|
678
678
|
|
679
|
-
if
|
680
|
-
print(Fore.GREEN + '\nTotal Test
|
679
|
+
if acc >= 0.8:
|
680
|
+
print(Fore.GREEN + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
681
681
|
|
682
|
-
elif
|
683
|
-
print(Fore.MAGENTA + '\nTotal Test
|
682
|
+
elif acc < 0.8 and acc > 0.6:
|
683
|
+
print(Fore.MAGENTA + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
684
684
|
|
685
|
-
elif
|
686
|
-
print(Fore.RED+ '\nTotal Test
|
685
|
+
elif acc <= 0.6:
|
686
|
+
print(Fore.RED+ '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
687
687
|
|
688
688
|
except:
|
689
689
|
|
690
|
-
print(Fore.RED + "ERROR: Testing model parameters like '
|
690
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'layers' 'MembranCounts' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: TestPLAN" + infoTestModel + Style.RESET_ALL)
|
691
691
|
return 'e'
|
692
692
|
|
693
|
-
return W,TestPredictions,
|
694
|
-
|
695
|
-
def SavePLAN(
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
|
706
|
-
|
707
|
-
|
693
|
+
return W,TestPredictions,acc
|
694
|
+
|
695
|
+
def SavePLAN(model_name,
|
696
|
+
model_type,
|
697
|
+
layers,
|
698
|
+
class_count,
|
699
|
+
membran_thresholds,
|
700
|
+
membran_potentials,
|
701
|
+
normalizations,
|
702
|
+
activations,
|
703
|
+
test_acc,
|
704
|
+
log_type,
|
705
|
+
weights_type,
|
706
|
+
weights_format,
|
707
|
+
model_path,
|
708
708
|
W
|
709
709
|
):
|
710
710
|
|
@@ -712,19 +712,19 @@ def SavePLAN(ModelName,
|
|
712
712
|
Function to save a deep learning model.
|
713
713
|
|
714
714
|
Arguments:
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
715
|
+
model_name (str): Name of the model.
|
716
|
+
model_type (str): Type of the model.(options: PLAN)
|
717
|
+
layers (list): List containing 'fex' and 'cat' layers.
|
718
|
+
class_count (int): Number of classes.
|
719
|
+
membran_thresholds (list): List containing MEMBRAN THRESHOLDS.
|
720
|
+
membran_potentials (list): List containing MEMBRAN POTENTIALS.
|
721
721
|
DoNormalization (str): is that normalized data ? 'y' or 'n'.
|
722
|
-
|
723
|
-
|
724
|
-
|
725
|
-
|
722
|
+
activations (list): List containing activation functions for each layer.
|
723
|
+
test_acc (float): Test accuracy of the model.
|
724
|
+
log_type (str): Type of log to save (options: 'csv', 'txt', 'hdf5').
|
725
|
+
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
726
726
|
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
727
|
-
|
727
|
+
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
728
728
|
W: Weights of the model.
|
729
729
|
|
730
730
|
Returns:
|
@@ -734,15 +734,15 @@ def SavePLAN(ModelName,
|
|
734
734
|
# Operations to be performed by the function will be written here
|
735
735
|
pass
|
736
736
|
|
737
|
-
if
|
737
|
+
if log_type != 'csv' and log_type != 'txt' and log_type != 'hdf5':
|
738
738
|
print(Fore.RED + "ERROR109: Save Log Type (File Extension) must be 'csv' or 'txt' or 'hdf5' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
739
739
|
return 'e'
|
740
740
|
|
741
|
-
if
|
741
|
+
if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
|
742
742
|
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
743
743
|
return 'e'
|
744
744
|
|
745
|
-
if
|
745
|
+
if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
|
746
746
|
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
747
747
|
return 'e'
|
748
748
|
|
@@ -760,100 +760,100 @@ def SavePLAN(ModelName,
|
|
760
760
|
from datetime import datetime
|
761
761
|
from scipy import io
|
762
762
|
|
763
|
-
data = {'MODEL NAME':
|
764
|
-
'MODEL TYPE':
|
765
|
-
'LAYERS':
|
766
|
-
'LAYER COUNT': len(
|
767
|
-
'CLASS COUNT':
|
768
|
-
'MEMBRAN THRESHOLDS':
|
769
|
-
'MEMBRAN POTENTIALS':
|
770
|
-
'NORMALIZATION':
|
771
|
-
'ACTIVATIONS':
|
763
|
+
data = {'MODEL NAME': model_name,
|
764
|
+
'MODEL TYPE': model_type,
|
765
|
+
'LAYERS': layers,
|
766
|
+
'LAYER COUNT': len(layers),
|
767
|
+
'CLASS COUNT': class_count,
|
768
|
+
'MEMBRAN THRESHOLDS': membran_thresholds,
|
769
|
+
'MEMBRAN POTENTIALS': membran_potentials,
|
770
|
+
'NORMALIZATION': normalizations,
|
771
|
+
'ACTIVATIONS': activations,
|
772
772
|
'NEURON COUNT': NeuronCount,
|
773
773
|
'SYNAPSE COUNT': SynapseCount,
|
774
|
-
'TEST ACCURACY':
|
774
|
+
'TEST ACCURACY': test_acc,
|
775
775
|
'SAVE DATE': datetime.now(),
|
776
|
-
'WEIGHTS TYPE':
|
777
|
-
'WEIGHTS FORMAT':
|
778
|
-
'MODEL PATH':
|
776
|
+
'WEIGHTS TYPE': weights_type,
|
777
|
+
'WEIGHTS FORMAT': weights_format,
|
778
|
+
'MODEL PATH': model_path
|
779
779
|
}
|
780
780
|
try:
|
781
781
|
|
782
782
|
df = pd.DataFrame(data)
|
783
783
|
|
784
|
-
if
|
784
|
+
if log_type == 'csv':
|
785
785
|
|
786
|
-
df.to_csv(
|
786
|
+
df.to_csv(model_path + model_name + '.csv', sep='\t', index=False)
|
787
787
|
|
788
|
-
elif
|
788
|
+
elif log_type == 'txt':
|
789
789
|
|
790
|
-
df.to_csv(
|
790
|
+
df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
|
791
791
|
|
792
|
-
elif
|
792
|
+
elif log_type == 'hdf5':
|
793
793
|
|
794
|
-
df.to_hdf(
|
794
|
+
df.to_hdf(model_path + model_name + '.h5', key='data', mode='w')
|
795
795
|
|
796
796
|
except:
|
797
797
|
|
798
|
-
print(Fore.RED + "ERROR: Model log not saved probably
|
798
|
+
print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
799
799
|
return 'e'
|
800
800
|
try:
|
801
801
|
|
802
|
-
if
|
802
|
+
if weights_type == 'txt' and weights_format == 'd':
|
803
803
|
|
804
804
|
for i, w in enumerate(W):
|
805
|
-
np.savetxt(
|
805
|
+
np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w, fmt='%d')
|
806
806
|
|
807
|
-
if
|
807
|
+
if weights_type == 'txt' and weights_format == 'f':
|
808
808
|
|
809
809
|
for i, w in enumerate(W):
|
810
|
-
np.savetxt(
|
810
|
+
np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w, fmt='%f')
|
811
811
|
|
812
|
-
if
|
812
|
+
if weights_type == 'txt' and weights_format == 'raw':
|
813
813
|
|
814
814
|
for i, w in enumerate(W):
|
815
|
-
np.savetxt(
|
815
|
+
np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w)
|
816
816
|
|
817
817
|
|
818
818
|
###
|
819
819
|
|
820
820
|
|
821
|
-
if
|
821
|
+
if weights_type == 'npy' and weights_format == 'd':
|
822
822
|
|
823
823
|
for i, w in enumerate(W):
|
824
|
-
np.save(
|
824
|
+
np.save(model_path + model_name + str(i+1) + 'w.npy', w.astype(int))
|
825
825
|
|
826
|
-
if
|
826
|
+
if weights_type == 'npy' and weights_format == 'f':
|
827
827
|
|
828
828
|
for i, w in enumerate(W):
|
829
|
-
np.save(
|
829
|
+
np.save(model_path + model_name + str(i+1) + 'w.npy' , w, w.astype(float))
|
830
830
|
|
831
|
-
if
|
831
|
+
if weights_type == 'npy' and weights_format == 'raw':
|
832
832
|
|
833
833
|
for i, w in enumerate(W):
|
834
|
-
np.save(
|
834
|
+
np.save(model_path + model_name + str(i+1) + 'w.npy' , w)
|
835
835
|
|
836
836
|
|
837
837
|
###
|
838
838
|
|
839
839
|
|
840
|
-
if
|
840
|
+
if weights_type == 'mat' and weights_format == 'd':
|
841
841
|
|
842
842
|
for i, w in enumerate(W):
|
843
843
|
w = {'w': w.astype(int)}
|
844
|
-
io.savemat(
|
844
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
845
845
|
|
846
|
-
if
|
846
|
+
if weights_type == 'mat' and weights_format == 'f':
|
847
847
|
|
848
848
|
for i, w in enumerate(W):
|
849
849
|
w = {'w': w.astype(float)}
|
850
|
-
io.savemat(
|
850
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
851
851
|
|
852
|
-
if
|
852
|
+
if weights_type == 'mat' and weights_format == 'raw':
|
853
853
|
|
854
854
|
for i, w in enumerate(W):
|
855
855
|
w = {'w': w}
|
856
|
-
io.savemat(
|
856
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
857
857
|
|
858
858
|
except:
|
859
859
|
|
@@ -869,20 +869,20 @@ def SavePLAN(ModelName,
|
|
869
869
|
return print(message)
|
870
870
|
|
871
871
|
|
872
|
-
def LoadPLAN(
|
873
|
-
|
874
|
-
|
872
|
+
def LoadPLAN(model_name,
|
873
|
+
model_path,
|
874
|
+
log_type,
|
875
875
|
):
|
876
876
|
infoLoadPLAN = """
|
877
877
|
Function to load a deep learning model.
|
878
878
|
|
879
879
|
Arguments:
|
880
|
-
|
881
|
-
|
882
|
-
|
880
|
+
model_name (str): Name of the model.
|
881
|
+
model_path (str): Path where the model is saved.
|
882
|
+
log_type (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
883
883
|
|
884
884
|
Returns:
|
885
|
-
lists: W(list[num]),
|
885
|
+
lists: W(list[num]), layers, membran_thresholds, membran_potentials, Normalization,activations
|
886
886
|
"""
|
887
887
|
pass
|
888
888
|
|
@@ -892,112 +892,112 @@ def LoadPLAN(ModelName,
|
|
892
892
|
|
893
893
|
try:
|
894
894
|
|
895
|
-
if
|
896
|
-
df = pd.read_csv(
|
895
|
+
if log_type == 'csv':
|
896
|
+
df = pd.read_csv(model_path + model_name + '.' + log_type)
|
897
897
|
|
898
898
|
|
899
|
-
if
|
900
|
-
df = pd.read_csv(
|
899
|
+
if log_type == 'txt':
|
900
|
+
df = pd.read_csv(model_path + model_name + '.' + log_type, delimiter='\t')
|
901
901
|
|
902
902
|
|
903
|
-
if
|
904
|
-
df = pd.read_hdf(
|
903
|
+
if log_type == 'hdf5':
|
904
|
+
df = pd.read_hdf(model_path + model_name + '.' + log_type)
|
905
905
|
except:
|
906
|
-
print(Fore.RED + "ERROR: Model Path error.
|
907
|
-
|
908
|
-
|
909
|
-
|
910
|
-
|
911
|
-
|
912
|
-
|
913
|
-
|
914
|
-
|
915
|
-
|
906
|
+
print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: LoadPLAN" + infoLoadPLAN + Style.RESET_ALL)
|
907
|
+
|
908
|
+
model_name = str(df['MODEL NAME'].iloc[0])
|
909
|
+
layers = df['LAYERS'].tolist()
|
910
|
+
layer_count = int(df['LAYER COUNT'].iloc[0])
|
911
|
+
class_count = int(df['CLASS COUNT'].iloc[0])
|
912
|
+
membran_thresholds = df['MEMBRAN THRESHOLDS'].tolist()
|
913
|
+
membran_potentials = df['MEMBRAN POTENTIALS'].tolist()
|
914
|
+
normalizations = df['NORMALIZATION'].tolist()
|
915
|
+
activations = df['ACTIVATIONS'].tolist()
|
916
916
|
NeuronCount = int(df['NEURON COUNT'].iloc[0])
|
917
917
|
SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
|
918
|
-
|
919
|
-
|
918
|
+
test_acc = int(df['TEST ACCURACY'].iloc[0])
|
919
|
+
model_type = str(df['MODEL TYPE'].iloc[0])
|
920
920
|
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
921
921
|
WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
|
922
|
-
|
922
|
+
model_path = str(df['MODEL PATH'].iloc[0])
|
923
923
|
|
924
|
-
W = [0] *
|
924
|
+
W = [0] * layer_count
|
925
925
|
|
926
926
|
if WeightType == 'txt':
|
927
|
-
for i in range(
|
928
|
-
W[i] = np.loadtxt(
|
927
|
+
for i in range(layer_count):
|
928
|
+
W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
|
929
929
|
elif WeightType == 'npy':
|
930
|
-
for i in range(
|
931
|
-
W[i] = np.load(
|
930
|
+
for i in range(layer_count):
|
931
|
+
W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
|
932
932
|
elif WeightType == 'mat':
|
933
|
-
for i in range(
|
934
|
-
W[i] = sio.loadmat(
|
933
|
+
for i in range(layer_count):
|
934
|
+
W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
|
935
935
|
else:
|
936
936
|
raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: LoadPLAN." + infoLoadPLAN + Style.RESET_ALL)
|
937
937
|
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
938
|
-
return W,
|
938
|
+
return W,layers,membran_thresholds,membran_potentials,normalizations,activations,df
|
939
939
|
|
940
|
-
def PredictFromDiscPLAN(Input,
|
940
|
+
def PredictFromDiscPLAN(Input,model_name,model_path,log_type):
|
941
941
|
infoPredictFromDİscPLAN = """
|
942
942
|
Function to make a prediction using a divided pruning deep learning neural network (PLAN).
|
943
943
|
|
944
944
|
Arguments:
|
945
945
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
946
|
-
|
947
|
-
|
948
|
-
|
946
|
+
model_name (str): Name of the model.
|
947
|
+
model_path (str): Path where the model is saved.
|
948
|
+
log_type (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
949
949
|
|
950
950
|
Returns:
|
951
951
|
ndarray: Output from the model.
|
952
952
|
"""
|
953
|
-
W,
|
954
|
-
|
953
|
+
W,layers,membran_thresholds,membran_potentials,normalizations,activations = LoadPLAN(model_name,model_path,
|
954
|
+
log_type)[0:6]
|
955
955
|
Wc = [0] * len(W)
|
956
956
|
for i, w in enumerate(W):
|
957
957
|
Wc[i] = np.copy(w)
|
958
958
|
try:
|
959
|
-
|
960
|
-
|
961
|
-
|
962
|
-
for index, Layer in enumerate(
|
959
|
+
neural_layer = Input
|
960
|
+
neural_layer = np.array(neural_layer)
|
961
|
+
neural_layer = neural_layer.ravel()
|
962
|
+
for index, Layer in enumerate(layers):
|
963
963
|
if Normalization == 'y':
|
964
|
-
|
965
|
-
if
|
966
|
-
|
967
|
-
elif
|
968
|
-
|
969
|
-
elif
|
970
|
-
|
964
|
+
neural_layer = Normalization(neural_layer)
|
965
|
+
if activations[index] == 'relu':
|
966
|
+
neural_layer = Relu(neural_layer)
|
967
|
+
elif activations[index] == 'sigmoid':
|
968
|
+
neural_layer = Sigmoid(neural_layer)
|
969
|
+
elif activations[index] == 'softmax':
|
970
|
+
neural_layer = Softmax(neural_layer)
|
971
971
|
|
972
|
-
if
|
973
|
-
|
974
|
-
|
975
|
-
|
976
|
-
if
|
977
|
-
|
978
|
-
|
979
|
-
|
972
|
+
if layers[index] == 'fex':
|
973
|
+
neural_layer,useless = Fex(neural_layer, W[index],
|
974
|
+
membran_thresholds[index],
|
975
|
+
membran_potentials[index])
|
976
|
+
if layers[index] == 'cat':
|
977
|
+
neural_layer,useless = Cat(neural_layer, W[index],
|
978
|
+
membran_thresholds[index],
|
979
|
+
membran_potentials[index],
|
980
980
|
0)
|
981
981
|
except:
|
982
982
|
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: PredictFromDiscPLAN" + infoPredictFromDİscPLAN + Style.RESET_ALL)
|
983
983
|
return 'e'
|
984
984
|
for i, w in enumerate(Wc):
|
985
985
|
W[i] = np.copy(w)
|
986
|
-
return
|
986
|
+
return neural_layer
|
987
987
|
|
988
988
|
|
989
|
-
def PredictFromRamPLAN(Input,
|
989
|
+
def PredictFromRamPLAN(Input,layers,membran_thresholds,membran_potentials,normalizations,activations,W):
|
990
990
|
infoPredictFromRamPLAN = """
|
991
991
|
Function to make a prediction using a pruning learning artificial neural network (PLAN)
|
992
992
|
from weights and parameters stored in memory.
|
993
993
|
|
994
994
|
Arguments:
|
995
995
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
996
|
-
|
997
|
-
|
998
|
-
|
996
|
+
layers (list): Number and types of layers.
|
997
|
+
membran_thresholds (list): MEMBRAN THRESHOLDS.
|
998
|
+
membran_potentials (list): MEMBRAN POTENTIALS.
|
999
999
|
DoNormalization (str): Whether to normalize ('y' or 'n').
|
1000
|
-
|
1000
|
+
activations (list): Activation functions for each layer.
|
1001
1001
|
W (list of ndarrays): Weights of the model.
|
1002
1002
|
|
1003
1003
|
Returns:
|
@@ -1008,75 +1008,119 @@ def PredictFromRamPLAN(Input,Layers,MembranThresholds,MembranPotentials,Normaliz
|
|
1008
1008
|
for i, w in enumerate(W):
|
1009
1009
|
Wc[i] = np.copy(w)
|
1010
1010
|
try:
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1014
|
-
for index, Layer in enumerate(
|
1015
|
-
if
|
1016
|
-
|
1017
|
-
if
|
1018
|
-
|
1019
|
-
elif
|
1020
|
-
|
1021
|
-
elif
|
1022
|
-
|
1011
|
+
neural_layer = Input
|
1012
|
+
neural_layer = np.array(neural_layer)
|
1013
|
+
neural_layer = neural_layer.ravel()
|
1014
|
+
for index, Layer in enumerate(layers):
|
1015
|
+
if normalizations[index] == 'y':
|
1016
|
+
neural_layer = Normalization(neural_layer)
|
1017
|
+
if activations[index] == 'relu':
|
1018
|
+
neural_layer = Relu(neural_layer)
|
1019
|
+
elif activations[index] == 'sigmoid':
|
1020
|
+
neural_layer = Sigmoid(neural_layer)
|
1021
|
+
elif activations[index] == 'softmax':
|
1022
|
+
neural_layer = Softmax(neural_layer)
|
1023
1023
|
|
1024
|
-
if
|
1025
|
-
|
1026
|
-
|
1027
|
-
|
1028
|
-
if
|
1029
|
-
|
1030
|
-
|
1031
|
-
|
1024
|
+
if layers[index] == 'fex':
|
1025
|
+
neural_layer,useless = Fex(neural_layer, W[index],
|
1026
|
+
membran_thresholds[index],
|
1027
|
+
membran_potentials[index])
|
1028
|
+
if layers[index] == 'cat':
|
1029
|
+
neural_layer,useless = Cat(neural_layer, W[index],
|
1030
|
+
membran_thresholds[index],
|
1031
|
+
membran_potentials[index],0)
|
1032
1032
|
except:
|
1033
1033
|
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: PredictFromRamPLAN." + infoPredictFromRamPLAN + Style.RESET_ALL)
|
1034
1034
|
return 'e'
|
1035
1035
|
for i, w in enumerate(Wc):
|
1036
1036
|
W[i] = np.copy(w)
|
1037
|
-
return
|
1037
|
+
return neural_layer
|
1038
1038
|
|
1039
1039
|
|
1040
|
-
def AutoBalancer(
|
1040
|
+
def AutoBalancer(x_train, y_train, class_count):
|
1041
1041
|
infoAutoBalancer = """
|
1042
1042
|
Function to balance the training data across different classes.
|
1043
1043
|
|
1044
1044
|
Arguments:
|
1045
|
-
|
1046
|
-
|
1047
|
-
|
1045
|
+
x_train (list): Input data for training.
|
1046
|
+
y_train (list): Labels corresponding to the input data.
|
1047
|
+
class_count (int): Number of classes.
|
1048
1048
|
|
1049
1049
|
Returns:
|
1050
1050
|
tuple: A tuple containing balanced input data and labels.
|
1051
1051
|
"""
|
1052
1052
|
try:
|
1053
|
-
ClassIndices = {i: np.where(np.array(
|
1054
|
-
|
1053
|
+
ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[0] for i in range(class_count)}
|
1054
|
+
class_counts = [len(ClassIndices[i]) for i in range(class_count)]
|
1055
1055
|
|
1056
|
-
if len(set(
|
1056
|
+
if len(set(class_counts)) == 1:
|
1057
1057
|
print(Fore.WHITE + "INFO: All training data have already balanced. from: AutoBalancer" + Style.RESET_ALL)
|
1058
|
-
return
|
1058
|
+
return x_train, y_train
|
1059
1059
|
|
1060
|
-
MinCount = min(
|
1060
|
+
MinCount = min(class_counts)
|
1061
1061
|
|
1062
1062
|
BalancedIndices = []
|
1063
|
-
for i in range(
|
1063
|
+
for i in range(class_count):
|
1064
1064
|
if len(ClassIndices[i]) > MinCount:
|
1065
1065
|
SelectedIndices = np.random.choice(ClassIndices[i], MinCount, replace=False)
|
1066
1066
|
else:
|
1067
1067
|
SelectedIndices = ClassIndices[i]
|
1068
1068
|
BalancedIndices.extend(SelectedIndices)
|
1069
1069
|
|
1070
|
-
BalancedInputs = [
|
1071
|
-
BalancedLabels = [
|
1070
|
+
BalancedInputs = [x_train[idx] for idx in BalancedIndices]
|
1071
|
+
BalancedLabels = [y_train[idx] for idx in BalancedIndices]
|
1072
1072
|
|
1073
|
-
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(
|
1073
|
+
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)) + " to: " + str(len(BalancedInputs)) + ". from: AutoBalancer " + Style.RESET_ALL)
|
1074
1074
|
except:
|
1075
1075
|
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoAutoBalancer)
|
1076
1076
|
return 'e'
|
1077
1077
|
|
1078
1078
|
return BalancedInputs, BalancedLabels
|
1079
1079
|
|
1080
|
+
def SyntheticAugmentation(x, y, class_count):
|
1081
|
+
"""
|
1082
|
+
Generates synthetic examples to balance classes with fewer examples.
|
1083
|
+
|
1084
|
+
Arguments:
|
1085
|
+
x -- Input dataset (examples) - list format
|
1086
|
+
y -- Class labels (one-hot encoded) - list format
|
1087
|
+
class_count -- Number of classes
|
1088
|
+
|
1089
|
+
Returns:
|
1090
|
+
x_balanced -- Balanced input dataset (list format)
|
1091
|
+
y_balanced -- Balanced class labels (one-hot encoded, list format)
|
1092
|
+
"""
|
1093
|
+
# Calculate class distribution
|
1094
|
+
class_distribution = {i: 0 for i in range(class_count)}
|
1095
|
+
for label in y:
|
1096
|
+
class_distribution[np.argmax(label)] += 1
|
1097
|
+
|
1098
|
+
max_class_count = max(class_distribution.values())
|
1099
|
+
|
1100
|
+
x_balanced = list(x)
|
1101
|
+
y_balanced = list(y)
|
1102
|
+
|
1103
|
+
for class_label in range(class_count):
|
1104
|
+
class_indices = [i for i, label in enumerate(y) if np.argmax(label) == class_label]
|
1105
|
+
num_samples = len(class_indices)
|
1106
|
+
|
1107
|
+
if num_samples < max_class_count:
|
1108
|
+
while num_samples < max_class_count:
|
1109
|
+
# Select two random examples
|
1110
|
+
random_indices = np.random.choice(class_indices, 2, replace=False)
|
1111
|
+
sample1 = x[random_indices[0]]
|
1112
|
+
sample2 = x[random_indices[1]]
|
1113
|
+
|
1114
|
+
# Generate a new synthetic example between the two selected examples
|
1115
|
+
synthetic_sample = sample1 + (np.array(sample2) - np.array(sample1)) * np.random.rand()
|
1116
|
+
|
1117
|
+
x_balanced.append(synthetic_sample.tolist())
|
1118
|
+
y_balanced.append(y[class_indices[0]]) # The new example has the same class label
|
1119
|
+
|
1120
|
+
num_samples += 1
|
1121
|
+
|
1122
|
+
return np.array(x_balanced), np.array(y_balanced)
|
1123
|
+
|
1080
1124
|
|
1081
1125
|
def GetWeights():
|
1082
1126
|
|
@@ -1090,6 +1134,6 @@ def GetPreds():
|
|
1090
1134
|
|
1091
1135
|
return 1
|
1092
1136
|
|
1093
|
-
def
|
1137
|
+
def Getacc():
|
1094
1138
|
|
1095
1139
|
return 2
|