pyerualjetwork 1.3.8__py3-none-any.whl → 2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan/__init__.py +2 -1
- plan/plan.py +211 -420
- pyerualjetwork-2.0.0.dist-info/METADATA +8 -0
- pyerualjetwork-2.0.0.dist-info/RECORD +6 -0
- pyerualjetwork-1.3.8.dist-info/METADATA +0 -8
- pyerualjetwork-1.3.8.dist-info/RECORD +0 -6
- {pyerualjetwork-1.3.8.dist-info → pyerualjetwork-2.0.0.dist-info}/WHEEL +0 -0
- {pyerualjetwork-1.3.8.dist-info → pyerualjetwork-2.0.0.dist-info}/top_level.txt +0 -0
plan/__init__.py
CHANGED
@@ -2,4 +2,5 @@
|
|
2
2
|
|
3
3
|
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
4
|
|
5
|
-
from .plan import
|
5
|
+
from .plan import auto_balancer, normalization, Softmax, Syy
|
6
|
+
igmoid, Relu, synaptic_pruning, synaptic_dividing, weight_identification, fex, cat, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation
|
plan/plan.py
CHANGED
@@ -13,17 +13,10 @@ import matplotlib.pyplot as plt
|
|
13
13
|
import seaborn as sns
|
14
14
|
|
15
15
|
# BUILD -----
|
16
|
-
def
|
17
|
-
x_train: List[Union[int, float]],
|
16
|
+
def fit(
|
17
|
+
x_train: List[Union[int, float]],
|
18
18
|
y_train: List[Union[int, float, str]], # At least two.. and one hot encoded
|
19
|
-
|
20
|
-
layers: List[str],
|
21
|
-
neurons: List[Union[int, float]],
|
22
|
-
membran_thresholds: List[str],
|
23
|
-
membran_potentials: List[Union[int, float]],
|
24
|
-
normalizations: List[str],
|
25
|
-
activations: List[str],
|
26
|
-
visualize: str
|
19
|
+
action_potential: List[Union[int, float]],
|
27
20
|
) -> str:
|
28
21
|
|
29
22
|
infoPLAN = """
|
@@ -32,128 +25,40 @@ def TrainPLAN(
|
|
32
25
|
Args:
|
33
26
|
x_train (list[num]): List of input data.
|
34
27
|
y_train (list[num]): List of y_train. (one hot encoded)
|
35
|
-
|
36
|
-
layers (list[str]): List of layer names. (options: 'fex' (Feature Extraction), 'cat' (Catalyser))
|
37
|
-
neurons (list[num]): List of neuron counts for each layer.
|
38
|
-
membran_thresholds (list[str]): List of membran_thresholds.
|
39
|
-
membran_potentials (list[num]): List of membran_potentials.
|
40
|
-
normalizations (List[str]): Whether normalization will be performed at indexed layers ("y" or "n").
|
41
|
-
activations (list[str]): List of activation functions.
|
42
|
-
visualize (str): visualize Training procces or not visualize ('y' or 'n')
|
28
|
+
action_potential (float): Input ACTION potential
|
43
29
|
|
44
30
|
Returns:
|
45
31
|
list([num]): (Weight matrices list, train_predictions list, Trainacc).
|
46
32
|
error handled ?: Process status ('e')
|
47
33
|
"""
|
34
|
+
if action_potential < 0 or action_potential > 1:
|
48
35
|
|
49
|
-
|
50
|
-
print(Fore.RED + "ERROR109: visualize parameter must be 'y' or 'n'. TrainPLAN",infoPLAN)
|
36
|
+
print(Fore.RED + "ERROR101: ACTION potential value must be in range 0-1. from: fit",infoPLAN)
|
51
37
|
return 'e'
|
52
38
|
|
53
|
-
|
54
|
-
LastNeuron = neurons[-1:][0]
|
55
|
-
if LastNeuron != class_count:
|
56
|
-
print(Fore.RED + "ERROR108: Last layer of neuron count must be equal class count. from: TrainPLAN",infoPLAN)
|
57
|
-
return 'e'
|
58
|
-
|
59
|
-
if len(normalizations) != len(membran_potentials):
|
60
|
-
|
61
|
-
print(Fore.RED + "ERROR307: Normalization list length must be equal to length of membran_thresholds List,membran_potentials List,layers List,neurons List. from: TrainPLAN",infoPLAN)
|
62
|
-
return 'e'
|
63
|
-
|
64
39
|
if len(x_train) != len(y_train):
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
if normalizations[i] != 'y' and normalizations[i] != 'n':
|
71
|
-
print(Fore.RED + "ERROR105: Normalization list must be 'y' or 'n'.",infoPLAN)
|
72
|
-
return 'e'
|
73
|
-
|
74
|
-
if membran_thresholds[i] == 'none':
|
75
|
-
print(Fore.MAGENTA + "WARNING102: We are advise to do not put 'none' Threshold sign. But some cases improves performance of the model from: TrainPLAN",infoPLAN + Style.RESET_ALL)
|
76
|
-
time.sleep(3)
|
77
|
-
|
78
|
-
if isinstance(Value, str):
|
79
|
-
print(Fore.RED + "ERROR201: MEMBRAN POTENTIALS must be numeric. from: TrainPLAN")
|
80
|
-
return 'e'
|
81
|
-
|
82
|
-
if isinstance(neurons[i], str):
|
83
|
-
print(Fore.RED + "ERROR202: neurons list must be numeric.")
|
84
|
-
return 'e'
|
85
|
-
|
86
|
-
if len(membran_thresholds) != len(membran_potentials):
|
87
|
-
print(Fore.RED + "ERROR302: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
|
88
|
-
return 'e'
|
89
|
-
|
90
|
-
if len(layers) != len(neurons):
|
91
|
-
print(Fore.RED + "ERROR303: layers list and neurons list must same length. from: TrainPLAN",infoPLAN)
|
92
|
-
return 'e'
|
93
|
-
|
94
|
-
if len(membran_potentials) != len(layers) or len(membran_thresholds) != len(layers):
|
95
|
-
print(Fore.RED + "ERROR306: MEMBRAN POTENTIALS and MEMBRAN THRESHOLDS lists length must be same layers list length. from: TrainPLAN",infoPLAN)
|
96
|
-
return 'e'
|
97
|
-
|
98
|
-
|
99
|
-
for Activation in activations:
|
100
|
-
if Activation != 'softmax' and Activation != 'sigmoid' and Activation != 'relu' and Activation != 'none':
|
101
|
-
print(Fore.RED + "ERROR108: activations list must be 'sigmoid' or 'softmax' or 'relu' or 'none' from: TrainPLAN",infoPLAN)
|
102
|
-
return 'e'
|
103
|
-
|
104
|
-
|
105
|
-
for index, Neuron in enumerate(neurons):
|
106
|
-
if Neuron < 1:
|
107
|
-
print(Fore.RED + "ERROR101: neurons list must be positive non zero integer. from: TrainPLAN",infoPLAN)
|
108
|
-
return 'e'
|
109
|
-
|
110
|
-
if index + 1 != len(neurons) and Neuron % 2 != 0:
|
111
|
-
print(Fore.MAGENTA + "WARNING101: We strongly advise to do Neuron counts be should even numbers. from: TrainPLAN",infoPLAN)
|
112
|
-
time.sleep(3)
|
113
|
-
|
114
|
-
if Neuron < class_count:
|
115
|
-
print(Fore.RED + "ERROR102: Neuron count must be greater than class count(For PLAN). from: TrainPLAN")
|
116
|
-
return 'e'
|
117
|
-
|
118
|
-
if layers[index] != 'fex' and layers[index] != 'cat':
|
119
|
-
print(Fore.RED + "ERROR107: layers list must be 'fex'(Feature Extraction Layer) or 'cat' (Catalyser Layer). from: TrainPLAN",infoPLAN)
|
120
|
-
return 'e'
|
121
|
-
|
122
|
-
if len(membran_thresholds) != len(membran_potentials):
|
123
|
-
print(Fore.RED + "ERROR305: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
|
124
|
-
return 'e'
|
125
|
-
|
126
|
-
|
127
|
-
for i, Sign in enumerate(membran_thresholds):
|
128
|
-
if Sign != '>' and Sign != '<' and Sign != '==' and Sign != '!=' and Sign != 'none':
|
129
|
-
print(Fore.RED + "ERROR104: MEMBRAN THRESHOLDS must be '>' or '<' or '==' or '!='. or 'none' WE SUGGEST '<' FOR FEX LAYER AND '==' FOR CAT LAYER (Your data, your hyperparameter) from: TrainPLAN",infoPLAN)
|
130
|
-
return 'e'
|
131
|
-
|
132
|
-
if layers[i] == 'fex' and Sign == 'none':
|
133
|
-
print(Fore.RED + "ERROR109: at layer type 'fex', pairing with 'none' Threshold is not acceptlable. if you want to 'none' put '==' and make threshold value '0'. from: TrainPLAN ",infoPLAN)
|
134
|
-
return 'e'
|
135
|
-
|
136
|
-
Uniquey_train = set()
|
40
|
+
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit",infoPLAN)
|
41
|
+
return 'e'
|
42
|
+
|
43
|
+
class_count = set()
|
137
44
|
for sublist in y_train:
|
138
45
|
|
139
|
-
|
46
|
+
class_count.add(tuple(sublist))
|
140
47
|
|
141
48
|
|
142
|
-
|
49
|
+
class_count = list(class_count)
|
143
50
|
|
144
51
|
y_train = [tuple(sublist) for sublist in y_train]
|
145
52
|
|
146
|
-
|
147
|
-
|
148
|
-
print(Fore.RED + "ERROR106: Label variety length must be same Class Count. from: TrainPLAN",infoPLAN)
|
149
|
-
return 'e'
|
53
|
+
neurons = [len(class_count),len(class_count)]
|
54
|
+
layers = ['fex', 'cat']
|
150
55
|
|
151
56
|
x_train[0] = np.array(x_train[0])
|
152
57
|
x_train[0] = x_train[0].ravel()
|
153
58
|
x_train_size = len(x_train[0])
|
154
59
|
|
155
|
-
W =
|
156
|
-
Divides, Piece =
|
60
|
+
W = weight_identification(len(layers) - 1,len(class_count),neurons,x_train_size)
|
61
|
+
Divides, Piece = synaptic_dividing(len(class_count),W)
|
157
62
|
trained_W = [1] * len(W)
|
158
63
|
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL,)
|
159
64
|
train_predictions = [None] * len(y_train)
|
@@ -165,11 +70,11 @@ def TrainPLAN(
|
|
165
70
|
inp = inp.ravel()
|
166
71
|
|
167
72
|
if x_train_size != len(inp):
|
168
|
-
print(Fore.RED +"ERROR304: All input matrices or vectors in x_train list, must be same size. from:
|
73
|
+
print(Fore.RED +"ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",infoPLAN + Style.RESET_ALL)
|
169
74
|
return 'e'
|
170
75
|
|
171
76
|
|
172
|
-
for Ulindex, Ul in enumerate(
|
77
|
+
for Ulindex, Ul in enumerate(class_count):
|
173
78
|
|
174
79
|
if Ul == y_train[index]:
|
175
80
|
for Windex, w in enumerate(W):
|
@@ -180,26 +85,19 @@ def TrainPLAN(
|
|
180
85
|
cs = Divides[int(k)][Windex][0]
|
181
86
|
|
182
87
|
|
183
|
-
W[Windex] =
|
88
|
+
W[Windex] = synaptic_pruning(w, cs, 'row', int(k),len(class_count),Piece[Windex],1)
|
184
89
|
|
185
90
|
neural_layer = inp
|
186
91
|
|
187
92
|
for Lindex, Layer in enumerate(layers):
|
188
93
|
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
if activations[Lindex] == 'relu':
|
193
|
-
neural_layer = Relu(neural_layer)
|
194
|
-
elif activations[Lindex] == 'sigmoid':
|
195
|
-
neural_layer = Sigmoid(neural_layer)
|
196
|
-
elif activations[Lindex] == 'softmax':
|
197
|
-
neural_layer = Softmax(neural_layer)
|
94
|
+
|
95
|
+
neural_layer = normalization(neural_layer)
|
198
96
|
|
199
97
|
if Layer == 'fex':
|
200
|
-
neural_layer,W[Lindex] =
|
98
|
+
neural_layer,W[Lindex] = fex(neural_layer, W[Lindex], action_potential, Piece[Windex], 1)
|
201
99
|
elif Layer == 'cat':
|
202
|
-
neural_layer,W[Lindex] =
|
100
|
+
neural_layer,W[Lindex] = cat(neural_layer, W[Lindex], action_potential, 1, Piece[Windex])
|
203
101
|
|
204
102
|
RealOutput = np.argmax(y_train[index])
|
205
103
|
PredictedOutput = np.argmax(neural_layer)
|
@@ -208,34 +106,11 @@ def TrainPLAN(
|
|
208
106
|
acc = true / len(y_train)
|
209
107
|
train_predictions[index] = PredictedOutput
|
210
108
|
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
plt.figure(figsize=(12, 6))
|
217
|
-
sns.kdeplot(y_trainVisual, label='Real Outputs', fill=True)
|
218
|
-
sns.kdeplot(train_predictions, label='Predictions', fill=True)
|
219
|
-
plt.legend()
|
220
|
-
plt.xlabel('Class')
|
221
|
-
plt.ylabel('Data size')
|
222
|
-
plt.title('Predictions and Real Outputs for Training KDE Plot')
|
223
|
-
plt.show()
|
224
|
-
|
225
|
-
if index + 1 != len(x_train):
|
226
|
-
|
227
|
-
plt.close('all')
|
228
|
-
|
229
|
-
if index == 0:
|
230
|
-
for i, w in enumerate(W):
|
231
|
-
trained_W[i] = w
|
232
|
-
|
233
|
-
else:
|
234
|
-
for i, w in enumerate(W):
|
235
|
-
trained_W[i] = trained_W[i] + w
|
236
|
-
|
237
|
-
|
238
|
-
W = WeightIdentification(len(layers) - 1,class_count,neurons,x_train_size)
|
109
|
+
for i, w in enumerate(W):
|
110
|
+
trained_W[i] = trained_W[i] + w
|
111
|
+
|
112
|
+
|
113
|
+
W = weight_identification(len(layers) - 1, len(class_count), neurons, x_train_size)
|
239
114
|
|
240
115
|
|
241
116
|
uni_end_time = time.time()
|
@@ -285,7 +160,7 @@ def TrainPLAN(
|
|
285
160
|
|
286
161
|
# FUNCTIONS -----
|
287
162
|
|
288
|
-
def
|
163
|
+
def weight_identification(
|
289
164
|
layer_count, # int: Number of layers in the neural network.
|
290
165
|
class_count, # int: Number of classes in the classification task.
|
291
166
|
neurons, # list[num]: List of neuron counts for each layer.
|
@@ -314,7 +189,7 @@ def WeightIdentification(
|
|
314
189
|
W[layer_count] = np.ones((class_count,neurons[layer_count - 1]))
|
315
190
|
return W
|
316
191
|
|
317
|
-
def
|
192
|
+
def synaptic_pruning(
|
318
193
|
w, # list[list[num]]: Weight matrix of the neural network.
|
319
194
|
cs, # list[list[num]]: Synaptic connections between neurons.
|
320
195
|
key, # int: key for identifying synaptic connections.
|
@@ -375,7 +250,7 @@ def SynapticPruning(
|
|
375
250
|
w[:,cs] = 0
|
376
251
|
|
377
252
|
else:
|
378
|
-
print(Fore.RED + "ERROR103:
|
253
|
+
print(Fore.RED + "ERROR103: synaptic_pruning func's key parameter must be 'row' or 'col' from: synaptic_pruning" + infoPruning)
|
379
254
|
return 'e'
|
380
255
|
else:
|
381
256
|
if key == 'row':
|
@@ -390,11 +265,11 @@ def SynapticPruning(
|
|
390
265
|
w[:,cs] = 0
|
391
266
|
|
392
267
|
else:
|
393
|
-
print(Fore.RED + "ERROR103:
|
268
|
+
print(Fore.RED + "ERROR103: synaptic_pruning func's key parameter must be 'row' or 'col' from: synaptic_pruning" + infoPruning + Style.RESET_ALL)
|
394
269
|
return 'e'
|
395
270
|
return w
|
396
271
|
|
397
|
-
def
|
272
|
+
def synaptic_dividing(
|
398
273
|
class_count, # int: Total number of classes in the dataset.
|
399
274
|
W # list[list[num]]: Weight matrix of the neural network.
|
400
275
|
) -> str:
|
@@ -439,11 +314,10 @@ def SynapticDividing(
|
|
439
314
|
return Divides, Piece
|
440
315
|
|
441
316
|
|
442
|
-
def
|
317
|
+
def fex(
|
443
318
|
Input, # list[num]: Input data.
|
444
|
-
w, # list[list[num]]: Weight matrix of the neural network
|
445
|
-
|
446
|
-
membran_potential, # num: Threshold value for comparison.
|
319
|
+
w, # list[list[num]]: Weight matrix of the neural network.,
|
320
|
+
action_potential, # num: Threshold value for comparison.
|
447
321
|
piece, # ???
|
448
322
|
is_training # num: 1 or 0
|
449
323
|
) -> tuple:
|
@@ -453,32 +327,25 @@ def Fex(
|
|
453
327
|
Args:
|
454
328
|
Input (list[num]): Input data.
|
455
329
|
w (list[list[num]]): Weight matrix of the neural network.
|
456
|
-
|
457
|
-
|
330
|
+
ACTION_threshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
|
331
|
+
action_potential (num): Threshold value for comparison.
|
458
332
|
|
459
333
|
Returns:
|
460
334
|
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
461
335
|
"""
|
462
336
|
|
463
|
-
if membran_threshold == '<':
|
464
|
-
PruneIndex = np.where(Input < membran_potential)
|
465
|
-
elif membran_threshold == '>':
|
466
|
-
PruneIndex = np.where(Input > membran_potential)
|
467
|
-
elif membran_threshold == '==':
|
468
|
-
PruneIndex = np.where(Input == membran_potential)
|
469
|
-
elif membran_threshold == '!=':
|
470
|
-
PruneIndex = np.where(Input != membran_potential)
|
471
337
|
|
472
|
-
|
338
|
+
PruneIndex = np.where(Input < action_potential)
|
339
|
+
w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, is_training)
|
473
340
|
|
474
341
|
neural_layer = np.dot(w, Input)
|
342
|
+
|
475
343
|
return neural_layer,w
|
476
344
|
|
477
|
-
def
|
345
|
+
def cat(
|
478
346
|
Input, # list[num]: Input data.
|
479
347
|
w, # list[list[num]]: Weight matrix of the neural network.
|
480
|
-
|
481
|
-
membran_potential, # num: Threshold value for comparison.
|
348
|
+
action_potential, # num: Threshold value for comparison.
|
482
349
|
isTrain,
|
483
350
|
piece # int: Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
484
351
|
) -> tuple:
|
@@ -488,32 +355,27 @@ def Cat(
|
|
488
355
|
Args:
|
489
356
|
Input (list[num]): Input data.
|
490
357
|
w (list[list[num]]): Weight matrix of the neural network.
|
491
|
-
|
492
|
-
|
358
|
+
ACTION_threshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
|
359
|
+
action_potential (num): Threshold value for comparison.
|
493
360
|
isTrain (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
494
361
|
|
495
362
|
Returns:
|
496
363
|
tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
|
497
364
|
"""
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
PruneIndex = np.where(Input > membran_potential)
|
503
|
-
elif membran_threshold == '==':
|
504
|
-
PruneIndex = np.where(Input == membran_potential)
|
505
|
-
elif membran_threshold == '!=':
|
506
|
-
PruneIndex = np.where(Input != membran_potential)
|
507
|
-
if isTrain == 1 and membran_threshold != 'none':
|
365
|
+
|
366
|
+
PruneIndex = np.where(Input == action_potential)
|
367
|
+
|
368
|
+
if isTrain == 1:
|
508
369
|
|
509
|
-
|
370
|
+
w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, isTrain)
|
510
371
|
|
511
372
|
|
512
373
|
neural_layer = np.dot(w, Input)
|
374
|
+
|
513
375
|
return neural_layer,w
|
514
376
|
|
515
377
|
|
516
|
-
def
|
378
|
+
def normalization(
|
517
379
|
Input # list[num]: Input data to be normalized.
|
518
380
|
):
|
519
381
|
"""
|
@@ -586,187 +448,155 @@ def Relu(
|
|
586
448
|
|
587
449
|
|
588
450
|
|
589
|
-
def
|
451
|
+
def evaluate(
|
590
452
|
x_test, # list[list[num]]: Test input data.
|
591
453
|
y_test, # list[num]: Test labels.
|
592
|
-
|
593
|
-
membran_thresholds, # list[str]: List of MEMBRAN THRESHOLDS for each layer.
|
594
|
-
membran_potentials, # list[num]: List of MEMBRAN POTENTIALS for each layer.
|
595
|
-
normalizations, # str: Whether normalization will be performed ("y" or "n").
|
596
|
-
activations, # str: Activation function list for the neural network.
|
454
|
+
action_potential, # list[num]: List of ACTION POTENTIALS for each layer.
|
597
455
|
visualize, # visualize Testing procces or not visualize ('y' or 'n')
|
598
456
|
W # list[list[num]]: Weight matrix of the neural network.
|
599
457
|
) -> tuple:
|
600
|
-
|
458
|
+
infoTestModel = """
|
601
459
|
Tests the neural network model with the given test data.
|
602
460
|
|
603
461
|
Args:
|
604
462
|
x_test (list[list[num]]): Test input data.
|
605
463
|
y_test (list[num]): Test labels.
|
606
|
-
|
607
|
-
|
608
|
-
membran_potentials (list[num]): List of MEMBRAN POTENTIALS for each layer.
|
609
|
-
normalizatios list([str]): Whether normalization will be performed ("yes" or "no").
|
610
|
-
activations (list[str]): Activation function for the neural network.
|
464
|
+
action_potential (float): Input ACTION potential
|
465
|
+
visualize (str): Visualize test progress ? ('y' or 'n')
|
611
466
|
W (list[list[num]]): Weight matrix of the neural network.
|
612
467
|
|
613
468
|
Returns:
|
614
469
|
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
615
470
|
"""
|
471
|
+
|
472
|
+
layers = ['fex','cat']
|
616
473
|
|
617
474
|
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
627
|
-
start_time = time.time()
|
628
|
-
for inpIndex,Input in enumerate(x_test):
|
629
|
-
Input = np.array(Input)
|
630
|
-
Input = Input.ravel()
|
631
|
-
uni_start_time = time.time()
|
632
|
-
neural_layer = Input
|
633
|
-
|
634
|
-
for index, Layer in enumerate(layers):
|
635
|
-
if normalizations[index] == 'y':
|
636
|
-
neural_layer = Normalization(neural_layer)
|
637
|
-
if activations[index] == 'relu':
|
638
|
-
neural_layer = Relu(neural_layer)
|
639
|
-
elif activations[index] == 'sigmoid':
|
640
|
-
neural_layer = Sigmoid(neural_layer)
|
641
|
-
elif activations[index] == 'softmax':
|
642
|
-
neural_layer = Softmax(neural_layer)
|
643
|
-
|
644
|
-
if layers[index] == 'fex':
|
645
|
-
neural_layer,useless = Fex(neural_layer, W[index], membran_thresholds[index], membran_potentials[index],0,0)
|
646
|
-
if layers[index] == 'cat':
|
647
|
-
neural_layer,useless = Cat(neural_layer, W[index], membran_thresholds[index], membran_potentials[index],0,0)
|
648
|
-
for i, w in enumerate(Wc):
|
649
|
-
W[i] = np.copy(w)
|
650
|
-
RealOutput = np.argmax(y_test[inpIndex])
|
651
|
-
PredictedOutput = np.argmax(neural_layer)
|
652
|
-
if RealOutput == PredictedOutput:
|
653
|
-
true += 1
|
654
|
-
acc = true / len(y_test)
|
655
|
-
TestPredictions[inpIndex] = PredictedOutput
|
656
|
-
|
657
|
-
if visualize == 'y':
|
658
|
-
|
659
|
-
y_testVisual = np.copy(y_test)
|
660
|
-
y_testVisual = np.argmax(y_testVisual, axis=1)
|
661
|
-
|
662
|
-
plt.figure(figsize=(12, 6))
|
663
|
-
sns.kdeplot(y_testVisual, label='Real Outputs', fill=True)
|
664
|
-
sns.kdeplot(TestPredictions, label='Predictions', fill=True)
|
665
|
-
plt.legend()
|
666
|
-
plt.xlabel('Class')
|
667
|
-
plt.ylabel('Data size')
|
668
|
-
plt.title('Predictions and Real Outputs for Testing KDE Plot')
|
669
|
-
plt.show()
|
670
|
-
|
671
|
-
if inpIndex + 1 != len(x_test):
|
672
|
-
|
673
|
-
plt.close('all')
|
674
|
-
|
675
|
-
uni_end_time = time.time()
|
676
|
-
|
677
|
-
calculating_est = round((uni_end_time - uni_start_time) * (len(x_test) - inpIndex),3)
|
678
|
-
|
679
|
-
if calculating_est < 60:
|
680
|
-
print('\rest......(sec):',calculating_est,'\n',end= "")
|
681
|
-
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
475
|
+
try:
|
476
|
+
Wc = [0] * len(W)
|
477
|
+
true = 0
|
478
|
+
TestPredictions = [None] * len(y_test)
|
479
|
+
for i, w in enumerate(W):
|
480
|
+
Wc[i] = np.copy(w)
|
481
|
+
print('\rCopying weights.....',i+1,'/',len(W),end = "")
|
682
482
|
|
683
|
-
|
684
|
-
|
685
|
-
|
483
|
+
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
484
|
+
start_time = time.time()
|
485
|
+
for inpIndex,Input in enumerate(x_test):
|
486
|
+
Input = np.array(Input)
|
487
|
+
Input = Input.ravel()
|
488
|
+
uni_start_time = time.time()
|
489
|
+
neural_layer = Input
|
490
|
+
|
491
|
+
for index, Layer in enumerate(layers):
|
686
492
|
|
687
|
-
|
688
|
-
|
689
|
-
|
493
|
+
neural_layer = normalization(neural_layer)
|
494
|
+
|
495
|
+
if layers[index] == 'fex':
|
496
|
+
neural_layer = fex(neural_layer, W[index], action_potential, 0, 0)[0]
|
497
|
+
if layers[index] == 'cat':
|
498
|
+
neural_layer = cat(neural_layer, W[index], action_potential, 0, 0)[0]
|
690
499
|
|
691
|
-
EndTime = time.time()
|
692
500
|
for i, w in enumerate(Wc):
|
693
501
|
W[i] = np.copy(w)
|
694
|
-
|
695
|
-
|
502
|
+
RealOutput = np.argmax(y_test[inpIndex])
|
503
|
+
PredictedOutput = np.argmax(neural_layer)
|
504
|
+
if RealOutput == PredictedOutput:
|
505
|
+
true += 1
|
506
|
+
acc = true / len(y_test)
|
507
|
+
TestPredictions[inpIndex] = PredictedOutput
|
696
508
|
|
697
|
-
|
509
|
+
if visualize == 'y':
|
698
510
|
|
699
|
-
|
700
|
-
|
511
|
+
y_testVisual = np.copy(y_test)
|
512
|
+
y_testVisual = np.argmax(y_testVisual, axis=1)
|
701
513
|
|
702
|
-
|
703
|
-
|
514
|
+
plt.figure(figsize=(12, 6))
|
515
|
+
sns.kdeplot(y_testVisual, label='Real Outputs', fill=True)
|
516
|
+
sns.kdeplot(TestPredictions, label='Predictions', fill=True)
|
517
|
+
plt.legend()
|
518
|
+
plt.xlabel('Class')
|
519
|
+
plt.ylabel('Data size')
|
520
|
+
plt.title('Predictions and Real Outputs for Testing KDE Plot')
|
521
|
+
plt.show()
|
704
522
|
|
705
|
-
|
706
|
-
print('Total testing time(h): ',calculating_est/3600)
|
523
|
+
if inpIndex + 1 != len(x_test):
|
707
524
|
|
708
|
-
|
709
|
-
print(Fore.GREEN + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
525
|
+
plt.close('all')
|
710
526
|
|
711
|
-
|
712
|
-
|
527
|
+
uni_end_time = time.time()
|
528
|
+
|
529
|
+
calculating_est = round((uni_end_time - uni_start_time) * (len(x_test) - inpIndex),3)
|
530
|
+
|
531
|
+
if calculating_est < 60:
|
532
|
+
print('\rest......(sec):',calculating_est,'\n',end= "")
|
533
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
713
534
|
|
714
|
-
elif
|
715
|
-
print(
|
535
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
536
|
+
print('\rest......(min):',calculating_est/60,'\n',end= "")
|
537
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
538
|
+
|
539
|
+
elif calculating_est > 3600:
|
540
|
+
print('\rest......(h):',calculating_est/3600,'\n',end= "")
|
541
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
542
|
+
|
543
|
+
EndTime = time.time()
|
544
|
+
for i, w in enumerate(Wc):
|
545
|
+
W[i] = np.copy(w)
|
716
546
|
|
547
|
+
calculating_est = round(EndTime - start_time,2)
|
548
|
+
|
549
|
+
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
550
|
+
|
551
|
+
if calculating_est < 60:
|
552
|
+
print('Total testing time(sec): ',calculating_est)
|
717
553
|
|
718
|
-
|
719
|
-
|
554
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
555
|
+
print('Total testing time(min): ',calculating_est/60)
|
720
556
|
|
721
|
-
|
722
|
-
|
723
|
-
sns.kdeplot(TestPredictions, label='Predictions', fill=True)
|
724
|
-
plt.legend()
|
725
|
-
plt.xlabel('Class')
|
726
|
-
plt.ylabel('Data size')
|
727
|
-
plt.title('Predictions and Real Outputs for Testing KDE Plot')
|
728
|
-
plt.show()
|
557
|
+
elif calculating_est > 3600:
|
558
|
+
print('Total testing time(h): ',calculating_est/3600)
|
729
559
|
|
560
|
+
if acc >= 0.8:
|
561
|
+
print(Fore.GREEN + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
730
562
|
|
731
|
-
|
563
|
+
elif acc < 0.8 and acc > 0.6:
|
564
|
+
print(Fore.MAGENTA + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
565
|
+
|
566
|
+
elif acc <= 0.6:
|
567
|
+
print(Fore.RED+ '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
568
|
+
|
569
|
+
|
570
|
+
|
571
|
+
except:
|
732
572
|
|
733
|
-
print(Fore.RED + "ERROR: Testing model parameters like '
|
573
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'action_potential' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
|
734
574
|
return 'e'
|
735
575
|
|
736
576
|
|
737
577
|
|
738
|
-
|
578
|
+
return W,TestPredictions,acc
|
739
579
|
|
740
|
-
def
|
580
|
+
def save_model(model_name,
|
741
581
|
model_type,
|
742
|
-
layers,
|
743
582
|
class_count,
|
744
|
-
|
745
|
-
membran_potentials,
|
746
|
-
normalizations,
|
747
|
-
activations,
|
583
|
+
action_potential,
|
748
584
|
test_acc,
|
749
|
-
log_type,
|
750
585
|
weights_type,
|
751
586
|
weights_format,
|
752
587
|
model_path,
|
753
588
|
W
|
754
589
|
):
|
755
590
|
|
756
|
-
|
591
|
+
infosave_model = """
|
757
592
|
Function to save a deep learning model.
|
758
593
|
|
759
594
|
Arguments:
|
760
595
|
model_name (str): Name of the model.
|
761
596
|
model_type (str): Type of the model.(options: PLAN)
|
762
|
-
layers (list): List containing 'fex' and 'cat' layers.
|
763
597
|
class_count (int): Number of classes.
|
764
|
-
|
765
|
-
membran_potentials (list): List containing MEMBRAN POTENTIALS.
|
766
|
-
DoNormalization (str): is that normalized data ? 'y' or 'n'.
|
767
|
-
activations (list): List containing activation functions for each layer.
|
598
|
+
action_potential (list): List containing ACTION POTENTIALS.
|
768
599
|
test_acc (float): Test accuracy of the model.
|
769
|
-
log_type (str): Type of log to save (options: 'csv', 'txt', 'hdf5').
|
770
600
|
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
771
601
|
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
772
602
|
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
@@ -779,27 +609,26 @@ def SavePLAN(model_name,
|
|
779
609
|
# Operations to be performed by the function will be written here
|
780
610
|
pass
|
781
611
|
|
782
|
-
|
783
|
-
|
784
|
-
return 'e'
|
785
|
-
|
612
|
+
layers = ['fex','cat']
|
613
|
+
|
786
614
|
if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
|
787
|
-
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from:
|
615
|
+
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" + infosave_model + Style.RESET_ALL)
|
788
616
|
return 'e'
|
789
617
|
|
790
618
|
if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
|
791
|
-
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from:
|
619
|
+
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" + infosave_model + Style.RESET_ALL)
|
792
620
|
return 'e'
|
793
621
|
|
794
622
|
NeuronCount = 0
|
795
623
|
SynapseCount = 0
|
624
|
+
|
796
625
|
try:
|
797
626
|
for w in W:
|
798
627
|
NeuronCount += np.shape(w)[0]
|
799
628
|
SynapseCount += np.shape(w)[0] * np.shape(w)[1]
|
800
629
|
except:
|
801
630
|
|
802
|
-
print(Fore.RED + "ERROR: Weight matrices has a problem from:
|
631
|
+
print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + infosave_model + Style.RESET_ALL)
|
803
632
|
return 'e'
|
804
633
|
import pandas as pd
|
805
634
|
from datetime import datetime
|
@@ -810,10 +639,7 @@ def SavePLAN(model_name,
|
|
810
639
|
'LAYERS': layers,
|
811
640
|
'LAYER COUNT': len(layers),
|
812
641
|
'CLASS COUNT': class_count,
|
813
|
-
'
|
814
|
-
'MEMBRAN POTENTIALS': membran_potentials,
|
815
|
-
'NORMALIZATION': normalizations,
|
816
|
-
'ACTIVATIONS': activations,
|
642
|
+
'ACTION POTENTIAL': action_potential,
|
817
643
|
'NEURON COUNT': NeuronCount,
|
818
644
|
'SYNAPSE COUNT': SynapseCount,
|
819
645
|
'TEST ACCURACY': test_acc,
|
@@ -825,22 +651,14 @@ def SavePLAN(model_name,
|
|
825
651
|
try:
|
826
652
|
|
827
653
|
df = pd.DataFrame(data)
|
828
|
-
|
829
|
-
if log_type == 'csv':
|
830
|
-
|
831
|
-
df.to_csv(model_path + model_name + '.csv', sep='\t', index=False)
|
832
|
-
|
833
|
-
elif log_type == 'txt':
|
834
|
-
|
835
|
-
df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
|
836
|
-
|
837
|
-
elif log_type == 'hdf5':
|
654
|
+
|
838
655
|
|
839
|
-
|
656
|
+
df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
|
840
657
|
|
658
|
+
|
841
659
|
except:
|
842
660
|
|
843
|
-
print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from:
|
661
|
+
print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" + infosave_model + Style.RESET_ALL)
|
844
662
|
return 'e'
|
845
663
|
try:
|
846
664
|
|
@@ -902,7 +720,7 @@ def SavePLAN(model_name,
|
|
902
720
|
|
903
721
|
except:
|
904
722
|
|
905
|
-
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from:
|
723
|
+
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
|
906
724
|
return 'e'
|
907
725
|
print(df)
|
908
726
|
message = (
|
@@ -914,11 +732,10 @@ def SavePLAN(model_name,
|
|
914
732
|
return print(message)
|
915
733
|
|
916
734
|
|
917
|
-
def
|
735
|
+
def load_model(model_name,
|
918
736
|
model_path,
|
919
|
-
log_type,
|
920
737
|
):
|
921
|
-
|
738
|
+
infoload_model = """
|
922
739
|
Function to load a deep learning model.
|
923
740
|
|
924
741
|
Arguments:
|
@@ -927,7 +744,7 @@ def LoadPLAN(model_name,
|
|
927
744
|
log_type (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
928
745
|
|
929
746
|
Returns:
|
930
|
-
lists: W(list[num]),
|
747
|
+
lists: W(list[num]), action_potential, df (DataFrame of the model)
|
931
748
|
"""
|
932
749
|
pass
|
933
750
|
|
@@ -936,28 +753,18 @@ def LoadPLAN(model_name,
|
|
936
753
|
import scipy.io as sio
|
937
754
|
|
938
755
|
try:
|
939
|
-
|
940
|
-
|
941
|
-
df = pd.read_csv(model_path + model_name + '.' + log_type)
|
942
|
-
|
943
|
-
|
944
|
-
if log_type == 'txt':
|
945
|
-
df = pd.read_csv(model_path + model_name + '.' + log_type, delimiter='\t')
|
946
|
-
|
756
|
+
|
757
|
+
df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
|
947
758
|
|
948
|
-
if log_type == 'hdf5':
|
949
|
-
df = pd.read_hdf(model_path + model_name + '.' + log_type)
|
950
759
|
except:
|
951
|
-
|
760
|
+
|
761
|
+
print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" + infoload_model + Style.RESET_ALL)
|
952
762
|
|
953
763
|
model_name = str(df['MODEL NAME'].iloc[0])
|
954
764
|
layers = df['LAYERS'].tolist()
|
955
765
|
layer_count = int(df['LAYER COUNT'].iloc[0])
|
956
766
|
class_count = int(df['CLASS COUNT'].iloc[0])
|
957
|
-
|
958
|
-
membran_potentials = df['MEMBRAN POTENTIALS'].tolist()
|
959
|
-
normalizations = df['NORMALIZATION'].tolist()
|
960
|
-
activations = df['ACTIVATIONS'].tolist()
|
767
|
+
action_potential = int(df['ACTION POTENTIAL'].iloc[0])
|
961
768
|
NeuronCount = int(df['NEURON COUNT'].iloc[0])
|
962
769
|
SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
|
963
770
|
test_acc = int(df['TEST ACCURACY'].iloc[0])
|
@@ -978,25 +785,26 @@ def LoadPLAN(model_name,
|
|
978
785
|
for i in range(layer_count):
|
979
786
|
W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
|
980
787
|
else:
|
981
|
-
raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from:
|
788
|
+
raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
|
982
789
|
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
983
|
-
return W,
|
790
|
+
return W,action_potential,df
|
984
791
|
|
985
|
-
def
|
986
|
-
|
792
|
+
def predict_model_ssd(Input,model_name,model_path):
|
793
|
+
|
794
|
+
infopredict_model_ssd = """
|
987
795
|
Function to make a prediction using a divided pruning deep learning neural network (PLAN).
|
988
796
|
|
989
797
|
Arguments:
|
990
798
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
991
799
|
model_name (str): Name of the model.
|
992
800
|
model_path (str): Path where the model is saved.
|
993
|
-
log_type (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
994
|
-
|
995
801
|
Returns:
|
996
802
|
ndarray: Output from the model.
|
997
803
|
"""
|
998
|
-
W,
|
999
|
-
|
804
|
+
W,action_potential = load_model(model_name,model_path)[0:2]
|
805
|
+
|
806
|
+
layers = ['fex','cat']
|
807
|
+
|
1000
808
|
Wc = [0] * len(W)
|
1001
809
|
for i, w in enumerate(W):
|
1002
810
|
Wc[i] = np.copy(w)
|
@@ -1005,50 +813,38 @@ def PredictFromDiscPLAN(Input,model_name,model_path,log_type):
|
|
1005
813
|
neural_layer = np.array(neural_layer)
|
1006
814
|
neural_layer = neural_layer.ravel()
|
1007
815
|
for index, Layer in enumerate(layers):
|
1008
|
-
|
1009
|
-
|
1010
|
-
if activations[index] == 'relu':
|
1011
|
-
neural_layer = Relu(neural_layer)
|
1012
|
-
elif activations[index] == 'sigmoid':
|
1013
|
-
neural_layer = Sigmoid(neural_layer)
|
1014
|
-
elif activations[index] == 'softmax':
|
1015
|
-
neural_layer = Softmax(neural_layer)
|
816
|
+
|
817
|
+
neural_layer = normalization(neural_layer)
|
1016
818
|
|
1017
819
|
if layers[index] == 'fex':
|
1018
|
-
neural_layer
|
1019
|
-
membran_thresholds[index],
|
1020
|
-
membran_potentials[index],0,0)
|
820
|
+
neural_layer = fex(neural_layer, W[index], action_potential,0, 0)[0]
|
1021
821
|
if layers[index] == 'cat':
|
1022
|
-
neural_layer
|
1023
|
-
membran_thresholds[index],
|
1024
|
-
membran_potentials[index],
|
1025
|
-
0,0)
|
822
|
+
neural_layer = cat(neural_layer, W[index], action_potential, 0, 0)[0]
|
1026
823
|
except:
|
1027
|
-
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from:
|
824
|
+
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" + infopredict_model_ssd + Style.RESET_ALL)
|
1028
825
|
return 'e'
|
1029
826
|
for i, w in enumerate(Wc):
|
1030
827
|
W[i] = np.copy(w)
|
1031
828
|
return neural_layer
|
1032
829
|
|
1033
830
|
|
1034
|
-
def
|
1035
|
-
|
831
|
+
def predict_model_ram(Input,action_potential,W):
|
832
|
+
|
833
|
+
infopredict_model_ram = """
|
1036
834
|
Function to make a prediction using a pruning learning artificial neural network (PLAN)
|
1037
835
|
from weights and parameters stored in memory.
|
1038
836
|
|
1039
837
|
Arguments:
|
1040
838
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
1041
|
-
|
1042
|
-
membran_thresholds (list): MEMBRAN THRESHOLDS.
|
1043
|
-
membran_potentials (list): MEMBRAN POTENTIALS.
|
1044
|
-
DoNormalization (str): Whether to normalize ('y' or 'n').
|
1045
|
-
activations (list): Activation functions for each layer.
|
839
|
+
action_potential (list): ACTION POTENTIAL.
|
1046
840
|
W (list of ndarrays): Weights of the model.
|
1047
841
|
|
1048
842
|
Returns:
|
1049
843
|
ndarray: Output from the model.
|
1050
844
|
"""
|
1051
845
|
|
846
|
+
layers = ['fex','cat']
|
847
|
+
|
1052
848
|
Wc = [0] * len(W)
|
1053
849
|
for i, w in enumerate(W):
|
1054
850
|
Wc[i] = np.copy(w)
|
@@ -1057,34 +853,25 @@ def PredictFromRamPLAN(Input,layers,membran_thresholds,membran_potentials,normal
|
|
1057
853
|
neural_layer = np.array(neural_layer)
|
1058
854
|
neural_layer = neural_layer.ravel()
|
1059
855
|
for index, Layer in enumerate(layers):
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1063
|
-
neural_layer = Relu(neural_layer)
|
1064
|
-
elif activations[index] == 'sigmoid':
|
1065
|
-
neural_layer = Sigmoid(neural_layer)
|
1066
|
-
elif activations[index] == 'softmax':
|
1067
|
-
neural_layer = Softmax(neural_layer)
|
1068
|
-
|
856
|
+
|
857
|
+
neural_layer = normalization(neural_layer)
|
858
|
+
|
1069
859
|
if layers[index] == 'fex':
|
1070
|
-
neural_layer
|
1071
|
-
membran_thresholds[index],
|
1072
|
-
membran_potentials[index],0,0)
|
860
|
+
neural_layer = fex(neural_layer, W[index], action_potential,0, 0)[0]
|
1073
861
|
if layers[index] == 'cat':
|
1074
|
-
neural_layer
|
1075
|
-
|
1076
|
-
membran_potentials[index],
|
1077
|
-
0,0)
|
862
|
+
neural_layer = cat(neural_layer, W[index], action_potential, 0, 0)[0]
|
863
|
+
|
1078
864
|
except:
|
1079
|
-
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from:
|
865
|
+
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + infopredict_model_ram + Style.RESET_ALL)
|
1080
866
|
return 'e'
|
1081
867
|
for i, w in enumerate(Wc):
|
1082
868
|
W[i] = np.copy(w)
|
1083
869
|
return neural_layer
|
1084
870
|
|
1085
871
|
|
1086
|
-
def
|
1087
|
-
|
872
|
+
def auto_balancer(x_train, y_train, class_count):
|
873
|
+
|
874
|
+
infoauto_balancer = """
|
1088
875
|
Function to balance the training data across different classes.
|
1089
876
|
|
1090
877
|
Arguments:
|
@@ -1097,13 +884,13 @@ def AutoBalancer(x_train, y_train, class_count):
|
|
1097
884
|
"""
|
1098
885
|
try:
|
1099
886
|
ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[0] for i in range(class_count)}
|
1100
|
-
|
887
|
+
classes = [len(ClassIndices[i]) for i in range(class_count)]
|
1101
888
|
|
1102
|
-
if len(set(
|
1103
|
-
print(Fore.WHITE + "INFO: All training data have already balanced. from:
|
889
|
+
if len(set(classes)) == 1:
|
890
|
+
print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
|
1104
891
|
return x_train, y_train
|
1105
892
|
|
1106
|
-
MinCount = min(
|
893
|
+
MinCount = min(classes)
|
1107
894
|
|
1108
895
|
BalancedIndices = []
|
1109
896
|
for i in range(class_count):
|
@@ -1116,14 +903,14 @@ def AutoBalancer(x_train, y_train, class_count):
|
|
1116
903
|
BalancedInputs = [x_train[idx] for idx in BalancedIndices]
|
1117
904
|
BalancedLabels = [y_train[idx] for idx in BalancedIndices]
|
1118
905
|
|
1119
|
-
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)) + " to: " + str(len(BalancedInputs)) + ". from:
|
906
|
+
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
|
1120
907
|
except:
|
1121
|
-
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" +
|
908
|
+
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
|
1122
909
|
return 'e'
|
1123
910
|
|
1124
911
|
return BalancedInputs, BalancedLabels
|
1125
912
|
|
1126
|
-
def
|
913
|
+
def synthetic_augmentation(x, y, class_count):
|
1127
914
|
"""
|
1128
915
|
Generates synthetic examples to balance classes with fewer examples.
|
1129
916
|
|
@@ -1168,18 +955,22 @@ def SyntheticAugmentation(x, y, class_count):
|
|
1168
955
|
return np.array(x_balanced), np.array(y_balanced)
|
1169
956
|
|
1170
957
|
|
1171
|
-
def
|
958
|
+
def get_weights():
|
1172
959
|
|
1173
960
|
return 0
|
1174
961
|
|
1175
|
-
def
|
962
|
+
def get_df():
|
1176
963
|
|
1177
|
-
return
|
964
|
+
return 2
|
1178
965
|
|
1179
|
-
def
|
966
|
+
def get_preds():
|
1180
967
|
|
1181
968
|
return 1
|
1182
969
|
|
1183
|
-
def
|
970
|
+
def get_acc():
|
1184
971
|
|
1185
972
|
return 2
|
973
|
+
|
974
|
+
def get_pot():
|
975
|
+
|
976
|
+
return 1
|
@@ -0,0 +1,8 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: pyerualjetwork
|
3
|
+
Version: 2.0.0
|
4
|
+
Summary: Advanced python deep learning library. New features: More simple and practical, all functions and variables are snake_case. (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
|
+
Author: Hasan Can Beydili
|
6
|
+
Author-email: tchasancan@gmail.com
|
7
|
+
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
8
|
+
|
@@ -0,0 +1,6 @@
|
|
1
|
+
plan/__init__.py,sha256=I8tSmV3wEwAdpVm2w0nVD-EW6p1n2DYcqPKx600odCs,381
|
2
|
+
plan/plan.py,sha256=eK0-QW-PDDGOIKUd7M4UZDcUBNPBM8yHWcf7_2BuaNQ,33061
|
3
|
+
pyerualjetwork-2.0.0.dist-info/METADATA,sha256=XGRSO_yYaLNlTwuzTKyQkdXsdm_-hatGaEVPCkw9UZc,431
|
4
|
+
pyerualjetwork-2.0.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
5
|
+
pyerualjetwork-2.0.0.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
|
6
|
+
pyerualjetwork-2.0.0.dist-info/RECORD,,
|
@@ -1,8 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: pyerualjetwork
|
3
|
-
Version: 1.3.8
|
4
|
-
Summary: Advanced python deep learning library. MASSIVE Technic Update, unlocked class limits. (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
|
-
Author: Hasan Can Beydili
|
6
|
-
Author-email: tchasancan@gmail.com
|
7
|
-
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
8
|
-
|
@@ -1,6 +0,0 @@
|
|
1
|
-
plan/__init__.py,sha256=LQbg-AnTUz7KA1E77-mg7X-zRM-7IiK7c3zK-j063rc,375
|
2
|
-
plan/plan.py,sha256=y8wVs2cA8G_XdTVCdYsTPmZYsJ2xu68L8uU-bbfkmu4,45348
|
3
|
-
pyerualjetwork-1.3.8.dist-info/METADATA,sha256=iNBBKRs72NOtRCe0dDT08Swkg08iwJ0-f1Uyf8gHcro,393
|
4
|
-
pyerualjetwork-1.3.8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
5
|
-
pyerualjetwork-1.3.8.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
|
6
|
-
pyerualjetwork-1.3.8.dist-info/RECORD,,
|
File without changes
|
File without changes
|