pyerualjetwork 4.3.7.dev1__py3-none-any.whl → 4.3.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/activation_functions.py +2 -2
- pyerualjetwork/activation_functions_cuda.py +63 -114
- pyerualjetwork/data_operations_cuda.py +1 -1
- pyerualjetwork/model_operations.py +14 -14
- pyerualjetwork/model_operations_cuda.py +16 -17
- pyerualjetwork/parallel.py +118 -0
- pyerualjetwork/plan.py +61 -256
- pyerualjetwork/plan_cuda.py +60 -267
- pyerualjetwork/planeat.py +12 -44
- pyerualjetwork/planeat_cuda.py +9 -45
- pyerualjetwork/visualizations.py +29 -26
- pyerualjetwork/visualizations_cuda.py +20 -22
- {pyerualjetwork-4.3.7.dev1.dist-info → pyerualjetwork-4.3.8.dist-info}/METADATA +2 -19
- pyerualjetwork-4.3.8.dist-info/RECORD +25 -0
- pyerualjetwork-4.3.8.dist-info/top_level.txt +1 -0
- pyerualjetwork-4.3.7.dev1.dist-info/RECORD +0 -44
- pyerualjetwork-4.3.7.dev1.dist-info/top_level.txt +0 -2
- pyerualjetwork_afterburner/__init__.py +0 -11
- pyerualjetwork_afterburner/activation_functions.py +0 -290
- pyerualjetwork_afterburner/activation_functions_cuda.py +0 -289
- pyerualjetwork_afterburner/data_operations.py +0 -406
- pyerualjetwork_afterburner/data_operations_cuda.py +0 -461
- pyerualjetwork_afterburner/help.py +0 -17
- pyerualjetwork_afterburner/loss_functions.py +0 -21
- pyerualjetwork_afterburner/loss_functions_cuda.py +0 -21
- pyerualjetwork_afterburner/memory_operations.py +0 -298
- pyerualjetwork_afterburner/metrics.py +0 -190
- pyerualjetwork_afterburner/metrics_cuda.py +0 -163
- pyerualjetwork_afterburner/model_operations.py +0 -408
- pyerualjetwork_afterburner/model_operations_cuda.py +0 -420
- pyerualjetwork_afterburner/plan.py +0 -425
- pyerualjetwork_afterburner/plan_cuda.py +0 -436
- pyerualjetwork_afterburner/planeat.py +0 -793
- pyerualjetwork_afterburner/planeat_cuda.py +0 -797
- pyerualjetwork_afterburner/ui.py +0 -22
- pyerualjetwork_afterburner/visualizations.py +0 -823
- pyerualjetwork_afterburner/visualizations_cuda.py +0 -825
- {pyerualjetwork-4.3.7.dev1.dist-info → pyerualjetwork-4.3.8.dist-info}/WHEEL +0 -0
pyerualjetwork/plan.py
CHANGED
@@ -16,31 +16,21 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
16
16
|
"""
|
17
17
|
|
18
18
|
import numpy as np
|
19
|
-
import math
|
20
19
|
|
21
20
|
### LIBRARY IMPORTS ###
|
22
21
|
from .ui import loading_bars, initialize_loading_bar
|
23
|
-
from .data_operations import normalization,
|
22
|
+
from .data_operations import normalization, batcher
|
24
23
|
from .loss_functions import binary_crossentropy, categorical_crossentropy
|
25
|
-
from .activation_functions import apply_activation,
|
24
|
+
from .activation_functions import apply_activation, all_activations
|
26
25
|
from .metrics import metrics
|
27
26
|
from .model_operations import get_acc, get_preds, get_preds_softmax
|
28
27
|
from .memory_operations import optimize_labels
|
29
28
|
from .visualizations import (
|
30
29
|
draw_neural_web,
|
31
|
-
update_neural_web_for_fit,
|
32
|
-
plot_evaluate,
|
33
|
-
update_neuron_history,
|
34
|
-
initialize_visualization_for_fit,
|
35
|
-
update_weight_visualization_for_fit,
|
36
|
-
update_decision_boundary_for_fit,
|
37
|
-
update_validation_history_for_fit,
|
38
|
-
display_visualization_for_fit,
|
39
30
|
display_visualizations_for_learner,
|
40
31
|
update_history_plots_for_learner,
|
41
32
|
initialize_visualization_for_learner,
|
42
|
-
update_neuron_history_for_learner
|
43
|
-
show
|
33
|
+
update_neuron_history_for_learner
|
44
34
|
)
|
45
35
|
|
46
36
|
### GLOBAL VARIABLES ###
|
@@ -52,18 +42,8 @@ bar_format_learner = loading_bars()[1]
|
|
52
42
|
def fit(
|
53
43
|
x_train,
|
54
44
|
y_train,
|
55
|
-
val=False,
|
56
|
-
val_count=None,
|
57
45
|
activation_potentiation=['linear'],
|
58
|
-
|
59
|
-
y_val=None,
|
60
|
-
show_training=None,
|
61
|
-
interval=100,
|
62
|
-
LTD=0,
|
63
|
-
decision_boundary_status=True,
|
64
|
-
train_bar=True,
|
65
|
-
auto_normalization=True,
|
66
|
-
neurons_history=False,
|
46
|
+
W=None,
|
67
47
|
dtype=np.float32
|
68
48
|
):
|
69
49
|
"""
|
@@ -71,110 +51,35 @@ def fit(
|
|
71
51
|
|
72
52
|
fit Args:
|
73
53
|
|
74
|
-
x_train (
|
54
|
+
x_train (aray-like[num]): List or numarray of input data.
|
75
55
|
|
76
|
-
y_train (
|
77
|
-
|
78
|
-
val (None or True): validation in training process ? None or True default: None (optional)
|
79
|
-
|
80
|
-
val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
|
56
|
+
y_train (aray-like[num]): List or numarray of target labels. (one hot encoded)
|
81
57
|
|
82
58
|
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
|
83
59
|
|
84
|
-
|
85
|
-
|
86
|
-
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
|
87
|
-
|
88
|
-
show_training (bool, str): True or None default: None (optional)
|
89
|
-
|
90
|
-
LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
|
91
|
-
|
92
|
-
interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
|
93
|
-
|
94
|
-
decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
|
95
|
-
|
96
|
-
train_bar (bool): Training loading bar? True or False. Default is True. (optional)
|
97
|
-
|
98
|
-
auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
|
99
|
-
|
100
|
-
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
|
60
|
+
W (numpy.ndarray): If you want to re-continue or update model
|
101
61
|
|
102
62
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
103
63
|
|
104
64
|
Returns:
|
105
|
-
numpyarray
|
65
|
+
numpyarray: (Weight matrix).
|
106
66
|
"""
|
107
67
|
|
108
|
-
# Pre-
|
109
|
-
|
110
|
-
x_train = x_train.astype(dtype, copy=False)
|
111
|
-
|
112
|
-
if train_bar and val:
|
113
|
-
train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
|
114
|
-
elif train_bar and val == False:
|
115
|
-
train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
|
116
|
-
|
68
|
+
# Pre-check
|
69
|
+
|
117
70
|
if len(x_train) != len(y_train):
|
118
71
|
raise ValueError("x_train and y_train must have the same length.")
|
119
72
|
|
120
|
-
|
121
|
-
x_val, y_val = x_train, y_train
|
122
|
-
|
123
|
-
elif val and (x_val is not None and y_val is not None):
|
124
|
-
x_val = x_val.astype(dtype, copy=False)
|
125
|
-
y_val = y_val.astype(dtype, copy=False)
|
126
|
-
|
127
|
-
val_list = [] if val else None
|
128
|
-
val_count = val_count or 10
|
129
|
-
# Defining weights
|
130
|
-
STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TIME POTENTIATION WEIGHT
|
131
|
-
LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TIME POTENTIATION WEIGHT
|
132
|
-
# Initialize visualization
|
133
|
-
vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
|
134
|
-
|
135
|
-
# Training process
|
136
|
-
for index, inp in enumerate(x_train):
|
137
|
-
inp = np.array(inp, copy=False).ravel()
|
138
|
-
y_decoded = decode_one_hot(y_train[index])
|
139
|
-
# Weight updates
|
140
|
-
STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
|
141
|
-
LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
|
142
|
-
if val and index != 0:
|
143
|
-
if index % math.ceil((val_count / len(x_train)) * 100) == 0:
|
144
|
-
val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW)[get_acc()]
|
145
|
-
val_list.append(val_acc)
|
146
|
-
|
147
|
-
# Visualization updates
|
148
|
-
if show_training:
|
149
|
-
update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
|
150
|
-
if decision_boundary_status:
|
151
|
-
update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
|
152
|
-
update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
|
153
|
-
update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
|
154
|
-
if neurons_history:
|
155
|
-
update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
|
156
|
-
if train_bar:
|
157
|
-
train_progress.update(1)
|
158
|
-
|
159
|
-
STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
|
160
|
-
|
161
|
-
# Finalize visualization
|
162
|
-
if show_training:
|
163
|
-
ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
|
164
|
-
ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
|
165
|
-
ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
|
166
|
-
ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
|
167
|
-
show()
|
73
|
+
LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
|
168
74
|
|
169
|
-
|
170
|
-
|
171
|
-
show()
|
75
|
+
x_train = apply_activation(x_train, activation_potentiation)
|
76
|
+
LTPW += y_train.T @ x_train
|
172
77
|
|
173
78
|
return normalization(LTPW, dtype=dtype)
|
174
79
|
|
175
80
|
|
176
|
-
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
|
177
|
-
neural_web_history=False, show_current_activations=False,
|
81
|
+
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
|
82
|
+
neural_web_history=False, show_current_activations=False,
|
178
83
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
179
84
|
interval=33.33, target_acc=None, target_loss=None,
|
180
85
|
start_this_act=None, start_this_W=None, dtype=np.float32):
|
@@ -218,9 +123,9 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
218
123
|
|
219
124
|
batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
|
220
125
|
|
221
|
-
|
126
|
+
pop_size (int, optional): Population size of each generation. Default: count of activation functions
|
222
127
|
|
223
|
-
|
128
|
+
early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
|
224
129
|
|
225
130
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
226
131
|
|
@@ -253,14 +158,19 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
253
158
|
|
254
159
|
data = 'Train'
|
255
160
|
|
256
|
-
|
161
|
+
except_this = ['spiral', 'circular']
|
162
|
+
activation_potentiation = [item for item in all_activations() if item not in except_this]
|
257
163
|
activation_potentiation_len = len(activation_potentiation)
|
258
164
|
|
259
165
|
# Pre-checks
|
260
166
|
|
167
|
+
if pop_size is None: pop_size = activation_potentiation_len
|
168
|
+
|
261
169
|
x_train = x_train.astype(dtype, copy=False)
|
262
170
|
y_train = optimize_labels(y_train, cuda=False)
|
263
171
|
|
172
|
+
if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
|
173
|
+
|
264
174
|
if gen is None:
|
265
175
|
gen = activation_potentiation_len
|
266
176
|
|
@@ -289,16 +199,16 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
289
199
|
|
290
200
|
progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
|
291
201
|
|
292
|
-
if fit_start is False:
|
293
|
-
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=
|
202
|
+
if fit_start is False or pop_size > activation_potentiation_len:
|
203
|
+
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
|
294
204
|
|
295
205
|
if start_this_act is not None and start_this_W is not None:
|
296
206
|
weight_pop[0] = start_this_W
|
297
207
|
act_pop[0] = start_this_act
|
298
208
|
|
299
209
|
else:
|
300
|
-
weight_pop = []
|
301
|
-
act_pop = []
|
210
|
+
weight_pop = [0] * pop_size
|
211
|
+
act_pop = [0] * pop_size
|
302
212
|
|
303
213
|
for i in range(gen):
|
304
214
|
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
@@ -308,16 +218,16 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
308
218
|
progress.last_print_n = 0
|
309
219
|
progress.update(0)
|
310
220
|
|
311
|
-
for j in range(
|
221
|
+
for j in range(pop_size):
|
312
222
|
|
313
223
|
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
314
224
|
|
315
|
-
if fit_start is True and i == 0:
|
316
|
-
act_pop
|
317
|
-
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1],
|
318
|
-
weight_pop
|
225
|
+
if fit_start is True and i == 0 and j < activation_potentiation_len:
|
226
|
+
act_pop[j] = activation_potentiation[j]
|
227
|
+
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], dtype=dtype)
|
228
|
+
weight_pop[j] = W
|
319
229
|
|
320
|
-
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j],
|
230
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
|
321
231
|
acc = model[get_acc()]
|
322
232
|
|
323
233
|
if strategy == 'accuracy': target_pop.append(acc)
|
@@ -351,6 +261,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
351
261
|
best_acc = acc
|
352
262
|
best_weights = np.copy(weight_pop[j])
|
353
263
|
final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
|
264
|
+
|
354
265
|
best_model = model
|
355
266
|
|
356
267
|
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
@@ -401,7 +312,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
401
312
|
# Check target accuracy
|
402
313
|
if target_acc is not None and best_acc >= target_acc:
|
403
314
|
progress.close()
|
404
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
315
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
405
316
|
activation_potentiation=final_activations, dtype=dtype)
|
406
317
|
|
407
318
|
if loss == 'categorical_crossentropy':
|
@@ -412,8 +323,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
412
323
|
y_pred_batch=train_model[get_preds_softmax()])
|
413
324
|
|
414
325
|
print('\nActivations: ', final_activations)
|
415
|
-
print(f'Train Accuracy
|
416
|
-
print(f'Train Loss
|
326
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
327
|
+
print(f'Train Loss: ', train_loss, '\n')
|
417
328
|
|
418
329
|
# Display final visualizations
|
419
330
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -423,7 +334,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
423
334
|
# Check target loss
|
424
335
|
if target_loss is not None and best_loss <= target_loss:
|
425
336
|
progress.close()
|
426
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
337
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
427
338
|
activation_potentiation=final_activations, dtype=dtype)
|
428
339
|
|
429
340
|
if loss == 'categorical_crossentropy':
|
@@ -434,8 +345,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
434
345
|
y_pred_batch=train_model[get_preds_softmax()])
|
435
346
|
|
436
347
|
print('\nActivations: ', final_activations)
|
437
|
-
print(f'Train Accuracy
|
438
|
-
print(f'Train Loss
|
348
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
349
|
+
print(f'Train Loss: ', train_loss, '\n')
|
439
350
|
|
440
351
|
# Display final visualizations
|
441
352
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -454,7 +365,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
454
365
|
if early_stop == True and i > 0:
|
455
366
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
456
367
|
progress.close()
|
457
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
368
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
458
369
|
activation_potentiation=final_activations, dtype=dtype)
|
459
370
|
|
460
371
|
if loss == 'categorical_crossentropy':
|
@@ -465,8 +376,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
465
376
|
y_pred_batch=train_model[get_preds_softmax()])
|
466
377
|
|
467
378
|
print('\nActivations: ', final_activations)
|
468
|
-
print(f'Train Accuracy
|
469
|
-
print(f'Train Loss
|
379
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
380
|
+
print(f'Train Loss: ', train_loss, '\n')
|
470
381
|
|
471
382
|
# Display final visualizations
|
472
383
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -475,7 +386,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
475
386
|
|
476
387
|
# Final evaluation
|
477
388
|
progress.close()
|
478
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
389
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
479
390
|
activation_potentiation=final_activations, dtype=dtype)
|
480
391
|
|
481
392
|
if loss == 'categorical_crossentropy':
|
@@ -484,144 +395,38 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
484
395
|
train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
485
396
|
|
486
397
|
print('\nActivations: ', final_activations)
|
487
|
-
print(f'Train Accuracy
|
488
|
-
print(f'Train Loss
|
398
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
399
|
+
print(f'Train Loss: ', train_loss, '\n')
|
489
400
|
|
490
401
|
# Display final visualizations
|
491
402
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
|
492
403
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
493
404
|
|
494
405
|
|
495
|
-
|
496
|
-
def feed_forward(
|
497
|
-
Input, # list[num]: Input data.
|
498
|
-
w, # num: Weight matrix of the neural network.
|
499
|
-
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
500
|
-
activation_potentiation,
|
501
|
-
Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
|
502
|
-
LTD=0
|
503
|
-
) -> tuple:
|
504
|
-
"""
|
505
|
-
Applies feature extraction process to the input data using synaptic potentiation.
|
506
|
-
|
507
|
-
Args:
|
508
|
-
Input (num): Input data.
|
509
|
-
w (num): Weight matrix of the neural network.
|
510
|
-
is_training (bool): Flag indicating if the function is called during training (True or False).
|
511
|
-
Class (int): if is during training then which class(label) ? is isnt then put None.
|
512
|
-
# activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
513
|
-
|
514
|
-
Returns:
|
515
|
-
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
516
|
-
or
|
517
|
-
num: neural network output
|
518
|
-
"""
|
519
|
-
|
520
|
-
Output = apply_activation(Input, activation_potentiation)
|
521
|
-
|
522
|
-
Input = Output
|
523
|
-
|
524
|
-
if is_training == True:
|
525
|
-
|
526
|
-
for _ in range(LTD):
|
527
|
-
|
528
|
-
depression_vector = np.random.rand(*Input.shape)
|
529
|
-
|
530
|
-
Input -= depression_vector
|
531
|
-
|
532
|
-
w[Class, :] = Input
|
533
|
-
return w
|
534
|
-
|
535
|
-
else:
|
536
|
-
|
537
|
-
neural_layer = np.dot(w, Input)
|
538
|
-
|
539
|
-
return neural_layer
|
540
|
-
|
541
|
-
|
542
406
|
def evaluate(
|
543
|
-
x_test,
|
544
|
-
y_test,
|
545
|
-
W,
|
546
|
-
activation_potentiation=['linear']
|
547
|
-
loading_bar_status=True, # Optionally show loading bar.
|
548
|
-
show_metrics=None, # Optionally show metrics.
|
549
|
-
dtype=np.float32
|
407
|
+
x_test,
|
408
|
+
y_test,
|
409
|
+
W,
|
410
|
+
activation_potentiation=['linear']
|
550
411
|
) -> tuple:
|
551
412
|
"""
|
552
413
|
Evaluates the neural network model using the given test data.
|
553
414
|
|
554
415
|
Args:
|
555
|
-
x_test (np.ndarray): Test
|
556
|
-
|
557
|
-
y_test (np.ndarray): Test labels. one-hot encoded.
|
558
|
-
|
559
|
-
W (list[np.ndarray]): List of neural network weight matrices.
|
560
|
-
|
561
|
-
activation_potentiation (list): List of activation functions.
|
562
|
-
|
563
|
-
loading_bar_status (bool): Option to show a loading bar (optional).
|
564
|
-
|
565
|
-
show_metrics (bool): Option to show metrics (optional).
|
416
|
+
x_test (np.ndarray): Test data.
|
566
417
|
|
567
|
-
|
418
|
+
y_test (np.ndarray): Test labels (one-hot encoded).
|
568
419
|
|
420
|
+
W (np.ndarray): Neural net weight matrix.
|
421
|
+
|
422
|
+
activation_potentiation (list): Activation list. Default = ['linear'].
|
423
|
+
|
569
424
|
Returns:
|
570
|
-
tuple:
|
425
|
+
tuple: Model (list).
|
571
426
|
"""
|
572
|
-
# Pre-checks
|
573
|
-
|
574
|
-
x_test = x_test.astype(dtype, copy=False)
|
575
|
-
|
576
|
-
if len(y_test[0]) < 256:
|
577
|
-
if y_test.dtype != np.uint8:
|
578
|
-
y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
|
579
|
-
elif len(y_test[0]) <= 32767:
|
580
|
-
if y_test.dtype != np.uint16:
|
581
|
-
y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
|
582
|
-
else:
|
583
|
-
if y_test.dtype != np.uint32:
|
584
|
-
y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
|
585
|
-
|
586
|
-
predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=dtype)
|
587
|
-
real_classes = np.empty(len(x_test), dtype=y_test.dtype)
|
588
|
-
predict_classes = np.empty(len(x_test), dtype=y_test.dtype)
|
589
|
-
|
590
|
-
true_predict = 0
|
591
|
-
acc_list = np.empty(len(x_test), dtype=dtype)
|
592
|
-
|
593
|
-
if loading_bar_status:
|
594
|
-
loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
|
595
|
-
|
596
|
-
for inpIndex in range(len(x_test)):
|
597
|
-
Input = x_test[inpIndex].ravel()
|
598
|
-
|
599
|
-
neural_layer = Input
|
600
|
-
|
601
|
-
neural_layer = feed_forward(neural_layer, np.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
|
602
|
-
|
603
|
-
predict_probabilitys[inpIndex] = Softmax(neural_layer)
|
604
|
-
|
605
|
-
RealOutput = np.argmax(y_test[inpIndex])
|
606
|
-
real_classes[inpIndex] = RealOutput
|
607
|
-
PredictedOutput = np.argmax(neural_layer)
|
608
|
-
predict_classes[inpIndex] = PredictedOutput
|
609
|
-
|
610
|
-
if RealOutput == PredictedOutput:
|
611
|
-
true_predict += 1
|
612
|
-
|
613
|
-
acc = true_predict / (inpIndex + 1)
|
614
|
-
acc_list[inpIndex] = acc
|
615
|
-
|
616
|
-
if loading_bar_status:
|
617
|
-
loading_bar.update(1)
|
618
|
-
loading_bar.set_postfix({"Test Accuracy": acc})
|
619
|
-
|
620
|
-
if loading_bar_status:
|
621
|
-
loading_bar.close()
|
622
427
|
|
623
|
-
|
624
|
-
|
625
|
-
|
428
|
+
x_test = apply_activation(x_test, activation_potentiation)
|
429
|
+
result = x_test @ W.T
|
430
|
+
softmax_preds = np.exp(result) / np.sum(np.exp(result), axis=1, keepdims=True); accuracy = (np.argmax(result, axis=1) == np.argmax(y_test, axis=1)).mean()
|
626
431
|
|
627
|
-
return W,
|
432
|
+
return W, None, accuracy, None, None, softmax_preds
|