pyerualjetwork 4.3.8.dev15__py3-none-any.whl → 4.3.9b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/activation_functions.py +2 -2
- pyerualjetwork/activation_functions_cuda.py +63 -114
- pyerualjetwork/data_operations_cuda.py +1 -1
- pyerualjetwork/model_operations.py +14 -14
- pyerualjetwork/model_operations_cuda.py +16 -17
- pyerualjetwork/plan.py +87 -268
- pyerualjetwork/plan_cuda.py +82 -276
- pyerualjetwork/planeat.py +12 -44
- pyerualjetwork/planeat_cuda.py +9 -45
- pyerualjetwork/visualizations.py +29 -26
- pyerualjetwork/visualizations_cuda.py +19 -20
- {pyerualjetwork-4.3.8.dev15.dist-info → pyerualjetwork-4.3.9b0.dist-info}/METADATA +2 -19
- pyerualjetwork-4.3.9b0.dist-info/RECORD +24 -0
- pyerualjetwork-4.3.9b0.dist-info/top_level.txt +1 -0
- pyerualjetwork-4.3.8.dev15.dist-info/RECORD +0 -45
- pyerualjetwork-4.3.8.dev15.dist-info/top_level.txt +0 -2
- pyerualjetwork_afterburner/__init__.py +0 -11
- pyerualjetwork_afterburner/activation_functions.py +0 -290
- pyerualjetwork_afterburner/activation_functions_cuda.py +0 -289
- pyerualjetwork_afterburner/data_operations.py +0 -406
- pyerualjetwork_afterburner/data_operations_cuda.py +0 -461
- pyerualjetwork_afterburner/help.py +0 -17
- pyerualjetwork_afterburner/loss_functions.py +0 -21
- pyerualjetwork_afterburner/loss_functions_cuda.py +0 -21
- pyerualjetwork_afterburner/memory_operations.py +0 -298
- pyerualjetwork_afterburner/metrics.py +0 -190
- pyerualjetwork_afterburner/metrics_cuda.py +0 -163
- pyerualjetwork_afterburner/model_operations.py +0 -408
- pyerualjetwork_afterburner/model_operations_cuda.py +0 -420
- pyerualjetwork_afterburner/parallel.py +0 -118
- pyerualjetwork_afterburner/plan.py +0 -432
- pyerualjetwork_afterburner/plan_cuda.py +0 -441
- pyerualjetwork_afterburner/planeat.py +0 -793
- pyerualjetwork_afterburner/planeat_cuda.py +0 -752
- pyerualjetwork_afterburner/ui.py +0 -22
- pyerualjetwork_afterburner/visualizations.py +0 -823
- pyerualjetwork_afterburner/visualizations_cuda.py +0 -825
- {pyerualjetwork-4.3.8.dev15.dist-info → pyerualjetwork-4.3.9b0.dist-info}/WHEEL +0 -0
pyerualjetwork/plan.py
CHANGED
@@ -16,31 +16,21 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
16
16
|
"""
|
17
17
|
|
18
18
|
import numpy as np
|
19
|
-
import math
|
20
19
|
|
21
20
|
### LIBRARY IMPORTS ###
|
22
21
|
from .ui import loading_bars, initialize_loading_bar
|
23
|
-
from .data_operations import normalization,
|
22
|
+
from .data_operations import normalization, batcher
|
24
23
|
from .loss_functions import binary_crossentropy, categorical_crossentropy
|
25
|
-
from .activation_functions import apply_activation,
|
24
|
+
from .activation_functions import apply_activation, all_activations
|
26
25
|
from .metrics import metrics
|
27
26
|
from .model_operations import get_acc, get_preds, get_preds_softmax
|
28
27
|
from .memory_operations import optimize_labels
|
29
28
|
from .visualizations import (
|
30
29
|
draw_neural_web,
|
31
|
-
update_neural_web_for_fit,
|
32
|
-
plot_evaluate,
|
33
|
-
update_neuron_history,
|
34
|
-
initialize_visualization_for_fit,
|
35
|
-
update_weight_visualization_for_fit,
|
36
|
-
update_decision_boundary_for_fit,
|
37
|
-
update_validation_history_for_fit,
|
38
|
-
display_visualization_for_fit,
|
39
30
|
display_visualizations_for_learner,
|
40
31
|
update_history_plots_for_learner,
|
41
32
|
initialize_visualization_for_learner,
|
42
|
-
update_neuron_history_for_learner
|
43
|
-
show
|
33
|
+
update_neuron_history_for_learner
|
44
34
|
)
|
45
35
|
|
46
36
|
### GLOBAL VARIABLES ###
|
@@ -52,18 +42,9 @@ bar_format_learner = loading_bars()[1]
|
|
52
42
|
def fit(
|
53
43
|
x_train,
|
54
44
|
y_train,
|
55
|
-
val=False,
|
56
|
-
val_count=None,
|
57
45
|
activation_potentiation=['linear'],
|
58
|
-
|
59
|
-
|
60
|
-
show_training=None,
|
61
|
-
interval=100,
|
62
|
-
LTD=0,
|
63
|
-
decision_boundary_status=True,
|
64
|
-
train_bar=True,
|
65
|
-
auto_normalization=True,
|
66
|
-
neurons_history=False,
|
46
|
+
W=None,
|
47
|
+
normalization=False,
|
67
48
|
dtype=np.float32
|
68
49
|
):
|
69
50
|
"""
|
@@ -71,110 +52,39 @@ def fit(
|
|
71
52
|
|
72
53
|
fit Args:
|
73
54
|
|
74
|
-
x_train (
|
55
|
+
x_train (aray-like[num]): List or numarray of input data.
|
75
56
|
|
76
|
-
y_train (
|
77
|
-
|
78
|
-
val (None or True): validation in training process ? None or True default: None (optional)
|
79
|
-
|
80
|
-
val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
|
57
|
+
y_train (aray-like[num]): List or numarray of target labels. (one hot encoded)
|
81
58
|
|
82
59
|
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
|
83
60
|
|
84
|
-
|
85
|
-
|
86
|
-
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
|
87
|
-
|
88
|
-
show_training (bool, str): True or None default: None (optional)
|
89
|
-
|
90
|
-
LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
|
91
|
-
|
92
|
-
interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
|
93
|
-
|
94
|
-
decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
|
95
|
-
|
96
|
-
train_bar (bool): Training loading bar? True or False. Default is True. (optional)
|
97
|
-
|
98
|
-
auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
|
61
|
+
W (numpy.ndarray): If you want to re-continue or update model
|
99
62
|
|
100
|
-
|
63
|
+
normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
101
64
|
|
102
65
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
103
66
|
|
104
67
|
Returns:
|
105
|
-
numpyarray
|
68
|
+
numpyarray: (Weight matrix).
|
106
69
|
"""
|
107
70
|
|
108
|
-
# Pre-
|
71
|
+
# Pre-check
|
72
|
+
|
73
|
+
if len(x_train) != len(y_train): raise ValueError("x_train and y_train must have the same length.")
|
109
74
|
|
110
|
-
|
75
|
+
weight = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
|
111
76
|
|
112
|
-
if
|
113
|
-
|
114
|
-
|
115
|
-
train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
|
116
|
-
|
117
|
-
if len(x_train) != len(y_train):
|
118
|
-
raise ValueError("x_train and y_train must have the same length.")
|
119
|
-
|
120
|
-
if val and (x_val is None and y_val is None):
|
121
|
-
x_val, y_val = x_train, y_train
|
122
|
-
|
123
|
-
elif val and (x_val is not None and y_val is not None):
|
124
|
-
x_val = x_val.astype(dtype, copy=False)
|
125
|
-
y_val = y_val.astype(dtype, copy=False)
|
126
|
-
|
127
|
-
val_list = [] if val else None
|
128
|
-
val_count = val_count or 10
|
129
|
-
# Defining weights
|
130
|
-
STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TIME POTENTIATION WEIGHT
|
131
|
-
LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TIME POTENTIATION WEIGHT
|
132
|
-
# Initialize visualization
|
133
|
-
vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
|
134
|
-
|
135
|
-
# Training process
|
136
|
-
for index, inp in enumerate(x_train):
|
137
|
-
inp = np.array(inp, copy=False).ravel()
|
138
|
-
y_decoded = decode_one_hot(y_train[index])
|
139
|
-
# Weight updates
|
140
|
-
STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
|
141
|
-
LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
|
142
|
-
if val and index != 0:
|
143
|
-
if index % math.ceil((val_count / len(x_train)) * 100) == 0:
|
144
|
-
val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW)[get_acc()]
|
145
|
-
val_list.append(val_acc)
|
146
|
-
|
147
|
-
# Visualization updates
|
148
|
-
if show_training:
|
149
|
-
update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
|
150
|
-
if decision_boundary_status:
|
151
|
-
update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
|
152
|
-
update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
|
153
|
-
update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
|
154
|
-
if neurons_history:
|
155
|
-
update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
|
156
|
-
if train_bar:
|
157
|
-
train_progress.update(1)
|
158
|
-
|
159
|
-
STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
|
160
|
-
|
161
|
-
# Finalize visualization
|
162
|
-
if show_training:
|
163
|
-
ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
|
164
|
-
ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
|
165
|
-
ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
|
166
|
-
ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
|
167
|
-
show()
|
77
|
+
if normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
|
78
|
+
elif normalization is False: x_train = apply_activation(x_train, activation_potentiation)
|
79
|
+
else: raise ValueError('normalization parameter only be True or False')
|
168
80
|
|
169
|
-
|
170
|
-
ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
|
171
|
-
show()
|
81
|
+
weight += y_train.T @ x_train
|
172
82
|
|
173
|
-
return normalization(
|
83
|
+
return normalization(weight, dtype=dtype)
|
174
84
|
|
175
85
|
|
176
|
-
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
|
177
|
-
neural_web_history=False, show_current_activations=False,
|
86
|
+
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
|
87
|
+
neural_web_history=False, show_current_activations=False, normalization=False,
|
178
88
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
179
89
|
interval=33.33, target_acc=None, target_loss=None,
|
180
90
|
start_this_act=None, start_this_W=None, dtype=np.float32):
|
@@ -218,9 +128,9 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
218
128
|
|
219
129
|
batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
|
220
130
|
|
221
|
-
|
131
|
+
pop_size (int, optional): Population size of each generation. Default: count of activation functions
|
222
132
|
|
223
|
-
|
133
|
+
early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
|
224
134
|
|
225
135
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
226
136
|
|
@@ -253,14 +163,19 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
253
163
|
|
254
164
|
data = 'Train'
|
255
165
|
|
256
|
-
|
166
|
+
except_this = ['spiral', 'circular']
|
167
|
+
activation_potentiation = [item for item in all_activations() if item not in except_this]
|
257
168
|
activation_potentiation_len = len(activation_potentiation)
|
258
169
|
|
259
170
|
# Pre-checks
|
260
171
|
|
172
|
+
if pop_size is None: pop_size = activation_potentiation_len
|
173
|
+
|
261
174
|
x_train = x_train.astype(dtype, copy=False)
|
262
175
|
y_train = optimize_labels(y_train, cuda=False)
|
263
176
|
|
177
|
+
if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
|
178
|
+
|
264
179
|
if gen is None:
|
265
180
|
gen = activation_potentiation_len
|
266
181
|
|
@@ -273,9 +188,9 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
273
188
|
|
274
189
|
# Initialize progress bar
|
275
190
|
if batch_size == 1:
|
276
|
-
ncols =
|
191
|
+
ncols = 78
|
277
192
|
else:
|
278
|
-
ncols =
|
193
|
+
ncols = 49
|
279
194
|
|
280
195
|
# Initialize variables
|
281
196
|
best_acc = 0
|
@@ -289,16 +204,16 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
289
204
|
|
290
205
|
progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
|
291
206
|
|
292
|
-
if fit_start is False:
|
293
|
-
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=
|
207
|
+
if fit_start is False or pop_size > activation_potentiation_len:
|
208
|
+
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
|
294
209
|
|
295
210
|
if start_this_act is not None and start_this_W is not None:
|
296
211
|
weight_pop[0] = start_this_W
|
297
212
|
act_pop[0] = start_this_act
|
298
213
|
|
299
214
|
else:
|
300
|
-
weight_pop = []
|
301
|
-
act_pop = []
|
215
|
+
weight_pop = [0] * pop_size
|
216
|
+
act_pop = [0] * pop_size
|
302
217
|
|
303
218
|
for i in range(gen):
|
304
219
|
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
@@ -308,16 +223,16 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
308
223
|
progress.last_print_n = 0
|
309
224
|
progress.update(0)
|
310
225
|
|
311
|
-
for j in range(
|
226
|
+
for j in range(pop_size):
|
312
227
|
|
313
228
|
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
314
229
|
|
315
|
-
if fit_start is True and i == 0:
|
316
|
-
act_pop
|
317
|
-
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[
|
318
|
-
weight_pop
|
230
|
+
if fit_start is True and i == 0 and j < activation_potentiation_len:
|
231
|
+
act_pop[j] = activation_potentiation[j]
|
232
|
+
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], normalization=normalization, dtype=dtype)
|
233
|
+
weight_pop[j] = W
|
319
234
|
|
320
|
-
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j],
|
235
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
|
321
236
|
acc = model[get_acc()]
|
322
237
|
|
323
238
|
if strategy == 'accuracy': target_pop.append(acc)
|
@@ -351,15 +266,14 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
351
266
|
best_acc = acc
|
352
267
|
best_weights = np.copy(weight_pop[j])
|
353
268
|
final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
|
269
|
+
|
354
270
|
best_model = model
|
355
271
|
|
356
272
|
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
357
273
|
|
358
274
|
if batch_size == 1:
|
359
|
-
postfix_dict[f"{data} Accuracy"] = best_acc
|
360
|
-
|
361
|
-
postfix_dict[f"{data} Batch Accuracy"] = acc
|
362
|
-
progress.set_postfix(postfix_dict)
|
275
|
+
postfix_dict[f"{data} Accuracy"] = np.round(best_acc, 4)
|
276
|
+
progress.set_postfix(postfix_dict)
|
363
277
|
|
364
278
|
if show_current_activations:
|
365
279
|
print(f", Current Activations={final_activations}", end='')
|
@@ -370,11 +284,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
370
284
|
train_loss = binary_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
|
371
285
|
|
372
286
|
if batch_size == 1:
|
373
|
-
postfix_dict[f"{data} Loss"] = train_loss
|
374
|
-
|
375
|
-
else:
|
376
|
-
postfix_dict[f"{data} Batch Loss"] = train_loss
|
377
|
-
progress.set_postfix(postfix_dict)
|
287
|
+
postfix_dict[f"{data} Loss"] = np.round(train_loss, 4)
|
288
|
+
progress.set_postfix(postfix_dict)
|
378
289
|
best_loss = train_loss
|
379
290
|
|
380
291
|
# Update visualizations during training
|
@@ -401,7 +312,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
401
312
|
# Check target accuracy
|
402
313
|
if target_acc is not None and best_acc >= target_acc:
|
403
314
|
progress.close()
|
404
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
315
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
405
316
|
activation_potentiation=final_activations, dtype=dtype)
|
406
317
|
|
407
318
|
if loss == 'categorical_crossentropy':
|
@@ -412,8 +323,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
412
323
|
y_pred_batch=train_model[get_preds_softmax()])
|
413
324
|
|
414
325
|
print('\nActivations: ', final_activations)
|
415
|
-
print(f'Train Accuracy
|
416
|
-
print(f'Train Loss
|
326
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
327
|
+
print(f'Train Loss: ', train_loss, '\n')
|
417
328
|
|
418
329
|
# Display final visualizations
|
419
330
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -423,7 +334,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
423
334
|
# Check target loss
|
424
335
|
if target_loss is not None and best_loss <= target_loss:
|
425
336
|
progress.close()
|
426
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
337
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
427
338
|
activation_potentiation=final_activations, dtype=dtype)
|
428
339
|
|
429
340
|
if loss == 'categorical_crossentropy':
|
@@ -434,8 +345,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
434
345
|
y_pred_batch=train_model[get_preds_softmax()])
|
435
346
|
|
436
347
|
print('\nActivations: ', final_activations)
|
437
|
-
print(f'Train Accuracy
|
438
|
-
print(f'Train Loss
|
348
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
349
|
+
print(f'Train Loss: ', train_loss, '\n')
|
439
350
|
|
440
351
|
# Display final visualizations
|
441
352
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -447,6 +358,20 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
447
358
|
best_acc_per_gen_list.append(best_acc)
|
448
359
|
loss_list.append(best_loss)
|
449
360
|
|
361
|
+
if batch_size != 1:
|
362
|
+
train_model = evaluate(x_train, y_train, best_weights, final_activations)
|
363
|
+
|
364
|
+
if loss == 'categorical_crossentropy':
|
365
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
366
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
367
|
+
else:
|
368
|
+
train_loss = binary_crossentropy(y_true_batch=y_train,
|
369
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
370
|
+
|
371
|
+
postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
|
372
|
+
postfix_dict[f"{data} Loss"] = np.round(train_loss, 4)
|
373
|
+
progress.set_postfix(postfix_dict)
|
374
|
+
|
450
375
|
weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
|
451
376
|
target_pop = []
|
452
377
|
|
@@ -454,7 +379,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
454
379
|
if early_stop == True and i > 0:
|
455
380
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
456
381
|
progress.close()
|
457
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
382
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
458
383
|
activation_potentiation=final_activations, dtype=dtype)
|
459
384
|
|
460
385
|
if loss == 'categorical_crossentropy':
|
@@ -465,8 +390,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
465
390
|
y_pred_batch=train_model[get_preds_softmax()])
|
466
391
|
|
467
392
|
print('\nActivations: ', final_activations)
|
468
|
-
print(f'Train Accuracy
|
469
|
-
print(f'Train Loss
|
393
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
394
|
+
print(f'Train Loss: ', train_loss, '\n')
|
470
395
|
|
471
396
|
# Display final visualizations
|
472
397
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -475,7 +400,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
475
400
|
|
476
401
|
# Final evaluation
|
477
402
|
progress.close()
|
478
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
403
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
479
404
|
activation_potentiation=final_activations, dtype=dtype)
|
480
405
|
|
481
406
|
if loss == 'categorical_crossentropy':
|
@@ -484,144 +409,38 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
484
409
|
train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
485
410
|
|
486
411
|
print('\nActivations: ', final_activations)
|
487
|
-
print(f'Train Accuracy
|
488
|
-
print(f'Train Loss
|
412
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
413
|
+
print(f'Train Loss: ', train_loss, '\n')
|
489
414
|
|
490
415
|
# Display final visualizations
|
491
416
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
|
492
417
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
493
418
|
|
494
419
|
|
495
|
-
|
496
|
-
def feed_forward(
|
497
|
-
Input, # list[num]: Input data.
|
498
|
-
w, # num: Weight matrix of the neural network.
|
499
|
-
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
500
|
-
activation_potentiation,
|
501
|
-
Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
|
502
|
-
LTD=0
|
503
|
-
) -> tuple:
|
504
|
-
"""
|
505
|
-
Applies feature extraction process to the input data using synaptic potentiation.
|
506
|
-
|
507
|
-
Args:
|
508
|
-
Input (num): Input data.
|
509
|
-
w (num): Weight matrix of the neural network.
|
510
|
-
is_training (bool): Flag indicating if the function is called during training (True or False).
|
511
|
-
Class (int): if is during training then which class(label) ? is isnt then put None.
|
512
|
-
# activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
513
|
-
|
514
|
-
Returns:
|
515
|
-
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
516
|
-
or
|
517
|
-
num: neural network output
|
518
|
-
"""
|
519
|
-
|
520
|
-
Output = apply_activation(Input, activation_potentiation)
|
521
|
-
|
522
|
-
Input = Output
|
523
|
-
|
524
|
-
if is_training == True:
|
525
|
-
|
526
|
-
for _ in range(LTD):
|
527
|
-
|
528
|
-
depression_vector = np.random.rand(*Input.shape)
|
529
|
-
|
530
|
-
Input -= depression_vector
|
531
|
-
|
532
|
-
w[Class, :] = Input
|
533
|
-
return w
|
534
|
-
|
535
|
-
else:
|
536
|
-
|
537
|
-
neural_layer = np.dot(w, Input)
|
538
|
-
|
539
|
-
return neural_layer
|
540
|
-
|
541
|
-
|
542
420
|
def evaluate(
|
543
|
-
x_test,
|
544
|
-
y_test,
|
545
|
-
W,
|
546
|
-
activation_potentiation=['linear']
|
547
|
-
loading_bar_status=True, # Optionally show loading bar.
|
548
|
-
show_metrics=None, # Optionally show metrics.
|
549
|
-
dtype=np.float32
|
421
|
+
x_test,
|
422
|
+
y_test,
|
423
|
+
W,
|
424
|
+
activation_potentiation=['linear']
|
550
425
|
) -> tuple:
|
551
426
|
"""
|
552
427
|
Evaluates the neural network model using the given test data.
|
553
428
|
|
554
429
|
Args:
|
555
|
-
x_test (np.ndarray): Test
|
556
|
-
|
557
|
-
y_test (np.ndarray): Test labels. one-hot encoded.
|
558
|
-
|
559
|
-
W (list[np.ndarray]): List of neural network weight matrices.
|
560
|
-
|
561
|
-
activation_potentiation (list): List of activation functions.
|
562
|
-
|
563
|
-
loading_bar_status (bool): Option to show a loading bar (optional).
|
564
|
-
|
565
|
-
show_metrics (bool): Option to show metrics (optional).
|
430
|
+
x_test (np.ndarray): Test data.
|
566
431
|
|
567
|
-
|
432
|
+
y_test (np.ndarray): Test labels (one-hot encoded).
|
568
433
|
|
434
|
+
W (np.ndarray): Neural net weight matrix.
|
435
|
+
|
436
|
+
activation_potentiation (list): Activation list. Default = ['linear'].
|
437
|
+
|
569
438
|
Returns:
|
570
|
-
tuple:
|
439
|
+
tuple: Model (list).
|
571
440
|
"""
|
572
|
-
# Pre-checks
|
573
|
-
|
574
|
-
x_test = x_test.astype(dtype, copy=False)
|
575
|
-
|
576
|
-
if len(y_test[0]) < 256:
|
577
|
-
if y_test.dtype != np.uint8:
|
578
|
-
y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
|
579
|
-
elif len(y_test[0]) <= 32767:
|
580
|
-
if y_test.dtype != np.uint16:
|
581
|
-
y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
|
582
|
-
else:
|
583
|
-
if y_test.dtype != np.uint32:
|
584
|
-
y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
|
585
|
-
|
586
|
-
predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=dtype)
|
587
|
-
real_classes = np.empty(len(x_test), dtype=y_test.dtype)
|
588
|
-
predict_classes = np.empty(len(x_test), dtype=y_test.dtype)
|
589
|
-
|
590
|
-
true_predict = 0
|
591
|
-
acc_list = np.empty(len(x_test), dtype=dtype)
|
592
|
-
|
593
|
-
if loading_bar_status:
|
594
|
-
loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
|
595
|
-
|
596
|
-
for inpIndex in range(len(x_test)):
|
597
|
-
Input = x_test[inpIndex].ravel()
|
598
|
-
|
599
|
-
neural_layer = Input
|
600
|
-
|
601
|
-
neural_layer = feed_forward(neural_layer, np.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
|
602
|
-
|
603
|
-
predict_probabilitys[inpIndex] = Softmax(neural_layer)
|
604
|
-
|
605
|
-
RealOutput = np.argmax(y_test[inpIndex])
|
606
|
-
real_classes[inpIndex] = RealOutput
|
607
|
-
PredictedOutput = np.argmax(neural_layer)
|
608
|
-
predict_classes[inpIndex] = PredictedOutput
|
609
|
-
|
610
|
-
if RealOutput == PredictedOutput:
|
611
|
-
true_predict += 1
|
612
|
-
|
613
|
-
acc = true_predict / (inpIndex + 1)
|
614
|
-
acc_list[inpIndex] = acc
|
615
|
-
|
616
|
-
if loading_bar_status:
|
617
|
-
loading_bar.update(1)
|
618
|
-
loading_bar.set_postfix({"Test Accuracy": acc})
|
619
|
-
|
620
|
-
if loading_bar_status:
|
621
|
-
loading_bar.close()
|
622
441
|
|
623
|
-
|
624
|
-
|
625
|
-
|
442
|
+
x_test = apply_activation(x_test, activation_potentiation)
|
443
|
+
result = x_test @ W.T
|
444
|
+
softmax_preds = np.exp(result) / np.sum(np.exp(result), axis=1, keepdims=True); accuracy = (np.argmax(result, axis=1) == np.argmax(y_test, axis=1)).mean()
|
626
445
|
|
627
|
-
return W,
|
446
|
+
return W, None, accuracy, None, None, softmax_preds
|