pyerualjetwork 4.3.8.dev15__py3-none-any.whl → 4.3.9b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/activation_functions.py +2 -2
- pyerualjetwork/activation_functions_cuda.py +63 -114
- pyerualjetwork/data_operations_cuda.py +1 -1
- pyerualjetwork/model_operations.py +14 -14
- pyerualjetwork/model_operations_cuda.py +16 -17
- pyerualjetwork/plan.py +89 -268
- pyerualjetwork/plan_cuda.py +84 -276
- pyerualjetwork/planeat.py +12 -44
- pyerualjetwork/planeat_cuda.py +9 -45
- pyerualjetwork/visualizations.py +29 -26
- pyerualjetwork/visualizations_cuda.py +19 -20
- {pyerualjetwork-4.3.8.dev15.dist-info → pyerualjetwork-4.3.9b1.dist-info}/METADATA +2 -19
- pyerualjetwork-4.3.9b1.dist-info/RECORD +24 -0
- pyerualjetwork-4.3.9b1.dist-info/top_level.txt +1 -0
- pyerualjetwork-4.3.8.dev15.dist-info/RECORD +0 -45
- pyerualjetwork-4.3.8.dev15.dist-info/top_level.txt +0 -2
- pyerualjetwork_afterburner/__init__.py +0 -11
- pyerualjetwork_afterburner/activation_functions.py +0 -290
- pyerualjetwork_afterburner/activation_functions_cuda.py +0 -289
- pyerualjetwork_afterburner/data_operations.py +0 -406
- pyerualjetwork_afterburner/data_operations_cuda.py +0 -461
- pyerualjetwork_afterburner/help.py +0 -17
- pyerualjetwork_afterburner/loss_functions.py +0 -21
- pyerualjetwork_afterburner/loss_functions_cuda.py +0 -21
- pyerualjetwork_afterburner/memory_operations.py +0 -298
- pyerualjetwork_afterburner/metrics.py +0 -190
- pyerualjetwork_afterburner/metrics_cuda.py +0 -163
- pyerualjetwork_afterburner/model_operations.py +0 -408
- pyerualjetwork_afterburner/model_operations_cuda.py +0 -420
- pyerualjetwork_afterburner/parallel.py +0 -118
- pyerualjetwork_afterburner/plan.py +0 -432
- pyerualjetwork_afterburner/plan_cuda.py +0 -441
- pyerualjetwork_afterburner/planeat.py +0 -793
- pyerualjetwork_afterburner/planeat_cuda.py +0 -752
- pyerualjetwork_afterburner/ui.py +0 -22
- pyerualjetwork_afterburner/visualizations.py +0 -823
- pyerualjetwork_afterburner/visualizations_cuda.py +0 -825
- {pyerualjetwork-4.3.8.dev15.dist-info → pyerualjetwork-4.3.9b1.dist-info}/WHEEL +0 -0
pyerualjetwork/plan.py
CHANGED
@@ -16,31 +16,21 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
16
16
|
"""
|
17
17
|
|
18
18
|
import numpy as np
|
19
|
-
import math
|
20
19
|
|
21
20
|
### LIBRARY IMPORTS ###
|
22
21
|
from .ui import loading_bars, initialize_loading_bar
|
23
|
-
from .data_operations import normalization,
|
22
|
+
from .data_operations import normalization, batcher
|
24
23
|
from .loss_functions import binary_crossentropy, categorical_crossentropy
|
25
|
-
from .activation_functions import apply_activation,
|
24
|
+
from .activation_functions import apply_activation, all_activations
|
26
25
|
from .metrics import metrics
|
27
26
|
from .model_operations import get_acc, get_preds, get_preds_softmax
|
28
27
|
from .memory_operations import optimize_labels
|
29
28
|
from .visualizations import (
|
30
29
|
draw_neural_web,
|
31
|
-
update_neural_web_for_fit,
|
32
|
-
plot_evaluate,
|
33
|
-
update_neuron_history,
|
34
|
-
initialize_visualization_for_fit,
|
35
|
-
update_weight_visualization_for_fit,
|
36
|
-
update_decision_boundary_for_fit,
|
37
|
-
update_validation_history_for_fit,
|
38
|
-
display_visualization_for_fit,
|
39
30
|
display_visualizations_for_learner,
|
40
31
|
update_history_plots_for_learner,
|
41
32
|
initialize_visualization_for_learner,
|
42
|
-
update_neuron_history_for_learner
|
43
|
-
show
|
33
|
+
update_neuron_history_for_learner
|
44
34
|
)
|
45
35
|
|
46
36
|
### GLOBAL VARIABLES ###
|
@@ -52,18 +42,9 @@ bar_format_learner = loading_bars()[1]
|
|
52
42
|
def fit(
|
53
43
|
x_train,
|
54
44
|
y_train,
|
55
|
-
val=False,
|
56
|
-
val_count=None,
|
57
45
|
activation_potentiation=['linear'],
|
58
|
-
|
59
|
-
|
60
|
-
show_training=None,
|
61
|
-
interval=100,
|
62
|
-
LTD=0,
|
63
|
-
decision_boundary_status=True,
|
64
|
-
train_bar=True,
|
65
|
-
auto_normalization=True,
|
66
|
-
neurons_history=False,
|
46
|
+
W=None,
|
47
|
+
normalization=False,
|
67
48
|
dtype=np.float32
|
68
49
|
):
|
69
50
|
"""
|
@@ -71,110 +52,39 @@ def fit(
|
|
71
52
|
|
72
53
|
fit Args:
|
73
54
|
|
74
|
-
x_train (
|
55
|
+
x_train (aray-like[num]): List or numarray of input data.
|
75
56
|
|
76
|
-
y_train (
|
77
|
-
|
78
|
-
val (None or True): validation in training process ? None or True default: None (optional)
|
79
|
-
|
80
|
-
val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
|
57
|
+
y_train (aray-like[num]): List or numarray of target labels. (one hot encoded)
|
81
58
|
|
82
59
|
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
|
83
60
|
|
84
|
-
|
85
|
-
|
86
|
-
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
|
87
|
-
|
88
|
-
show_training (bool, str): True or None default: None (optional)
|
89
|
-
|
90
|
-
LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
|
91
|
-
|
92
|
-
interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
|
93
|
-
|
94
|
-
decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
|
95
|
-
|
96
|
-
train_bar (bool): Training loading bar? True or False. Default is True. (optional)
|
97
|
-
|
98
|
-
auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
|
61
|
+
W (numpy.ndarray): If you want to re-continue or update model
|
99
62
|
|
100
|
-
|
63
|
+
normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
101
64
|
|
102
65
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
103
66
|
|
104
67
|
Returns:
|
105
|
-
numpyarray
|
68
|
+
numpyarray: (Weight matrix).
|
106
69
|
"""
|
107
70
|
|
108
|
-
# Pre-
|
71
|
+
# Pre-check
|
72
|
+
|
73
|
+
if len(x_train) != len(y_train): raise ValueError("x_train and y_train must have the same length.")
|
109
74
|
|
110
|
-
|
75
|
+
weight = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
|
111
76
|
|
112
|
-
if
|
113
|
-
|
114
|
-
|
115
|
-
train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
|
116
|
-
|
117
|
-
if len(x_train) != len(y_train):
|
118
|
-
raise ValueError("x_train and y_train must have the same length.")
|
119
|
-
|
120
|
-
if val and (x_val is None and y_val is None):
|
121
|
-
x_val, y_val = x_train, y_train
|
122
|
-
|
123
|
-
elif val and (x_val is not None and y_val is not None):
|
124
|
-
x_val = x_val.astype(dtype, copy=False)
|
125
|
-
y_val = y_val.astype(dtype, copy=False)
|
126
|
-
|
127
|
-
val_list = [] if val else None
|
128
|
-
val_count = val_count or 10
|
129
|
-
# Defining weights
|
130
|
-
STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TIME POTENTIATION WEIGHT
|
131
|
-
LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TIME POTENTIATION WEIGHT
|
132
|
-
# Initialize visualization
|
133
|
-
vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
|
134
|
-
|
135
|
-
# Training process
|
136
|
-
for index, inp in enumerate(x_train):
|
137
|
-
inp = np.array(inp, copy=False).ravel()
|
138
|
-
y_decoded = decode_one_hot(y_train[index])
|
139
|
-
# Weight updates
|
140
|
-
STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
|
141
|
-
LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
|
142
|
-
if val and index != 0:
|
143
|
-
if index % math.ceil((val_count / len(x_train)) * 100) == 0:
|
144
|
-
val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW)[get_acc()]
|
145
|
-
val_list.append(val_acc)
|
146
|
-
|
147
|
-
# Visualization updates
|
148
|
-
if show_training:
|
149
|
-
update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
|
150
|
-
if decision_boundary_status:
|
151
|
-
update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
|
152
|
-
update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
|
153
|
-
update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
|
154
|
-
if neurons_history:
|
155
|
-
update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
|
156
|
-
if train_bar:
|
157
|
-
train_progress.update(1)
|
158
|
-
|
159
|
-
STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
|
160
|
-
|
161
|
-
# Finalize visualization
|
162
|
-
if show_training:
|
163
|
-
ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
|
164
|
-
ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
|
165
|
-
ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
|
166
|
-
ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
|
167
|
-
show()
|
77
|
+
if normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
|
78
|
+
elif normalization is False: x_train = apply_activation(x_train, activation_potentiation)
|
79
|
+
else: raise ValueError('normalization parameter only be True or False')
|
168
80
|
|
169
|
-
|
170
|
-
ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
|
171
|
-
show()
|
81
|
+
weight += y_train.T @ x_train
|
172
82
|
|
173
|
-
return normalization(
|
83
|
+
return normalization(weight, dtype=dtype)
|
174
84
|
|
175
85
|
|
176
|
-
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
|
177
|
-
neural_web_history=False, show_current_activations=False,
|
86
|
+
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
|
87
|
+
neural_web_history=False, show_current_activations=False, normalization=False,
|
178
88
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
179
89
|
interval=33.33, target_acc=None, target_loss=None,
|
180
90
|
start_this_act=None, start_this_W=None, dtype=np.float32):
|
@@ -218,14 +128,16 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
218
128
|
|
219
129
|
batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
|
220
130
|
|
221
|
-
|
131
|
+
pop_size (int, optional): Population size of each generation. Default: count of activation functions
|
222
132
|
|
223
|
-
|
133
|
+
early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
|
224
134
|
|
225
135
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
226
136
|
|
227
137
|
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
228
138
|
|
139
|
+
normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
140
|
+
|
229
141
|
loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
230
142
|
|
231
143
|
interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
@@ -253,14 +165,19 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
253
165
|
|
254
166
|
data = 'Train'
|
255
167
|
|
256
|
-
|
168
|
+
except_this = ['spiral', 'circular']
|
169
|
+
activation_potentiation = [item for item in all_activations() if item not in except_this]
|
257
170
|
activation_potentiation_len = len(activation_potentiation)
|
258
171
|
|
259
172
|
# Pre-checks
|
260
173
|
|
174
|
+
if pop_size is None: pop_size = activation_potentiation_len
|
175
|
+
|
261
176
|
x_train = x_train.astype(dtype, copy=False)
|
262
177
|
y_train = optimize_labels(y_train, cuda=False)
|
263
178
|
|
179
|
+
if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
|
180
|
+
|
264
181
|
if gen is None:
|
265
182
|
gen = activation_potentiation_len
|
266
183
|
|
@@ -273,9 +190,9 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
273
190
|
|
274
191
|
# Initialize progress bar
|
275
192
|
if batch_size == 1:
|
276
|
-
ncols =
|
193
|
+
ncols = 78
|
277
194
|
else:
|
278
|
-
ncols =
|
195
|
+
ncols = 49
|
279
196
|
|
280
197
|
# Initialize variables
|
281
198
|
best_acc = 0
|
@@ -289,16 +206,16 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
289
206
|
|
290
207
|
progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
|
291
208
|
|
292
|
-
if fit_start is False:
|
293
|
-
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=
|
209
|
+
if fit_start is False or pop_size > activation_potentiation_len:
|
210
|
+
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
|
294
211
|
|
295
212
|
if start_this_act is not None and start_this_W is not None:
|
296
213
|
weight_pop[0] = start_this_W
|
297
214
|
act_pop[0] = start_this_act
|
298
215
|
|
299
216
|
else:
|
300
|
-
weight_pop = []
|
301
|
-
act_pop = []
|
217
|
+
weight_pop = [0] * pop_size
|
218
|
+
act_pop = [0] * pop_size
|
302
219
|
|
303
220
|
for i in range(gen):
|
304
221
|
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
@@ -308,16 +225,16 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
308
225
|
progress.last_print_n = 0
|
309
226
|
progress.update(0)
|
310
227
|
|
311
|
-
for j in range(
|
228
|
+
for j in range(pop_size):
|
312
229
|
|
313
230
|
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
314
231
|
|
315
|
-
if fit_start is True and i == 0:
|
316
|
-
act_pop
|
317
|
-
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[
|
318
|
-
weight_pop
|
232
|
+
if fit_start is True and i == 0 and j < activation_potentiation_len:
|
233
|
+
act_pop[j] = activation_potentiation[j]
|
234
|
+
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], normalization=normalization, dtype=dtype)
|
235
|
+
weight_pop[j] = W
|
319
236
|
|
320
|
-
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j],
|
237
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
|
321
238
|
acc = model[get_acc()]
|
322
239
|
|
323
240
|
if strategy == 'accuracy': target_pop.append(acc)
|
@@ -351,15 +268,14 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
351
268
|
best_acc = acc
|
352
269
|
best_weights = np.copy(weight_pop[j])
|
353
270
|
final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
|
271
|
+
|
354
272
|
best_model = model
|
355
273
|
|
356
274
|
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
357
275
|
|
358
276
|
if batch_size == 1:
|
359
|
-
postfix_dict[f"{data} Accuracy"] = best_acc
|
360
|
-
|
361
|
-
postfix_dict[f"{data} Batch Accuracy"] = acc
|
362
|
-
progress.set_postfix(postfix_dict)
|
277
|
+
postfix_dict[f"{data} Accuracy"] = np.round(best_acc, 4)
|
278
|
+
progress.set_postfix(postfix_dict)
|
363
279
|
|
364
280
|
if show_current_activations:
|
365
281
|
print(f", Current Activations={final_activations}", end='')
|
@@ -370,11 +286,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
370
286
|
train_loss = binary_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
|
371
287
|
|
372
288
|
if batch_size == 1:
|
373
|
-
postfix_dict[f"{data} Loss"] = train_loss
|
374
|
-
|
375
|
-
else:
|
376
|
-
postfix_dict[f"{data} Batch Loss"] = train_loss
|
377
|
-
progress.set_postfix(postfix_dict)
|
289
|
+
postfix_dict[f"{data} Loss"] = np.round(train_loss, 4)
|
290
|
+
progress.set_postfix(postfix_dict)
|
378
291
|
best_loss = train_loss
|
379
292
|
|
380
293
|
# Update visualizations during training
|
@@ -401,7 +314,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
401
314
|
# Check target accuracy
|
402
315
|
if target_acc is not None and best_acc >= target_acc:
|
403
316
|
progress.close()
|
404
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
317
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
405
318
|
activation_potentiation=final_activations, dtype=dtype)
|
406
319
|
|
407
320
|
if loss == 'categorical_crossentropy':
|
@@ -412,8 +325,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
412
325
|
y_pred_batch=train_model[get_preds_softmax()])
|
413
326
|
|
414
327
|
print('\nActivations: ', final_activations)
|
415
|
-
print(f'Train Accuracy
|
416
|
-
print(f'Train Loss
|
328
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
329
|
+
print(f'Train Loss: ', train_loss, '\n')
|
417
330
|
|
418
331
|
# Display final visualizations
|
419
332
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -423,7 +336,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
423
336
|
# Check target loss
|
424
337
|
if target_loss is not None and best_loss <= target_loss:
|
425
338
|
progress.close()
|
426
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
339
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
427
340
|
activation_potentiation=final_activations, dtype=dtype)
|
428
341
|
|
429
342
|
if loss == 'categorical_crossentropy':
|
@@ -434,8 +347,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
434
347
|
y_pred_batch=train_model[get_preds_softmax()])
|
435
348
|
|
436
349
|
print('\nActivations: ', final_activations)
|
437
|
-
print(f'Train Accuracy
|
438
|
-
print(f'Train Loss
|
350
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
351
|
+
print(f'Train Loss: ', train_loss, '\n')
|
439
352
|
|
440
353
|
# Display final visualizations
|
441
354
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -447,6 +360,20 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
447
360
|
best_acc_per_gen_list.append(best_acc)
|
448
361
|
loss_list.append(best_loss)
|
449
362
|
|
363
|
+
if batch_size != 1:
|
364
|
+
train_model = evaluate(x_train, y_train, best_weights, final_activations)
|
365
|
+
|
366
|
+
if loss == 'categorical_crossentropy':
|
367
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
368
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
369
|
+
else:
|
370
|
+
train_loss = binary_crossentropy(y_true_batch=y_train,
|
371
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
372
|
+
|
373
|
+
postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
|
374
|
+
postfix_dict[f"{data} Loss"] = np.round(train_loss, 4)
|
375
|
+
progress.set_postfix(postfix_dict)
|
376
|
+
|
450
377
|
weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
|
451
378
|
target_pop = []
|
452
379
|
|
@@ -454,7 +381,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
454
381
|
if early_stop == True and i > 0:
|
455
382
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
456
383
|
progress.close()
|
457
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
384
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
458
385
|
activation_potentiation=final_activations, dtype=dtype)
|
459
386
|
|
460
387
|
if loss == 'categorical_crossentropy':
|
@@ -465,8 +392,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
465
392
|
y_pred_batch=train_model[get_preds_softmax()])
|
466
393
|
|
467
394
|
print('\nActivations: ', final_activations)
|
468
|
-
print(f'Train Accuracy
|
469
|
-
print(f'Train Loss
|
395
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
396
|
+
print(f'Train Loss: ', train_loss, '\n')
|
470
397
|
|
471
398
|
# Display final visualizations
|
472
399
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -475,7 +402,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
475
402
|
|
476
403
|
# Final evaluation
|
477
404
|
progress.close()
|
478
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
405
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
479
406
|
activation_potentiation=final_activations, dtype=dtype)
|
480
407
|
|
481
408
|
if loss == 'categorical_crossentropy':
|
@@ -484,144 +411,38 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
484
411
|
train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
485
412
|
|
486
413
|
print('\nActivations: ', final_activations)
|
487
|
-
print(f'Train Accuracy
|
488
|
-
print(f'Train Loss
|
414
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
415
|
+
print(f'Train Loss: ', train_loss, '\n')
|
489
416
|
|
490
417
|
# Display final visualizations
|
491
418
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
|
492
419
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
493
420
|
|
494
421
|
|
495
|
-
|
496
|
-
def feed_forward(
|
497
|
-
Input, # list[num]: Input data.
|
498
|
-
w, # num: Weight matrix of the neural network.
|
499
|
-
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
500
|
-
activation_potentiation,
|
501
|
-
Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
|
502
|
-
LTD=0
|
503
|
-
) -> tuple:
|
504
|
-
"""
|
505
|
-
Applies feature extraction process to the input data using synaptic potentiation.
|
506
|
-
|
507
|
-
Args:
|
508
|
-
Input (num): Input data.
|
509
|
-
w (num): Weight matrix of the neural network.
|
510
|
-
is_training (bool): Flag indicating if the function is called during training (True or False).
|
511
|
-
Class (int): if is during training then which class(label) ? is isnt then put None.
|
512
|
-
# activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
513
|
-
|
514
|
-
Returns:
|
515
|
-
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
516
|
-
or
|
517
|
-
num: neural network output
|
518
|
-
"""
|
519
|
-
|
520
|
-
Output = apply_activation(Input, activation_potentiation)
|
521
|
-
|
522
|
-
Input = Output
|
523
|
-
|
524
|
-
if is_training == True:
|
525
|
-
|
526
|
-
for _ in range(LTD):
|
527
|
-
|
528
|
-
depression_vector = np.random.rand(*Input.shape)
|
529
|
-
|
530
|
-
Input -= depression_vector
|
531
|
-
|
532
|
-
w[Class, :] = Input
|
533
|
-
return w
|
534
|
-
|
535
|
-
else:
|
536
|
-
|
537
|
-
neural_layer = np.dot(w, Input)
|
538
|
-
|
539
|
-
return neural_layer
|
540
|
-
|
541
|
-
|
542
422
|
def evaluate(
|
543
|
-
x_test,
|
544
|
-
y_test,
|
545
|
-
W,
|
546
|
-
activation_potentiation=['linear']
|
547
|
-
loading_bar_status=True, # Optionally show loading bar.
|
548
|
-
show_metrics=None, # Optionally show metrics.
|
549
|
-
dtype=np.float32
|
423
|
+
x_test,
|
424
|
+
y_test,
|
425
|
+
W,
|
426
|
+
activation_potentiation=['linear']
|
550
427
|
) -> tuple:
|
551
428
|
"""
|
552
429
|
Evaluates the neural network model using the given test data.
|
553
430
|
|
554
431
|
Args:
|
555
|
-
x_test (np.ndarray): Test
|
556
|
-
|
557
|
-
y_test (np.ndarray): Test labels. one-hot encoded.
|
558
|
-
|
559
|
-
W (list[np.ndarray]): List of neural network weight matrices.
|
560
|
-
|
561
|
-
activation_potentiation (list): List of activation functions.
|
562
|
-
|
563
|
-
loading_bar_status (bool): Option to show a loading bar (optional).
|
564
|
-
|
565
|
-
show_metrics (bool): Option to show metrics (optional).
|
432
|
+
x_test (np.ndarray): Test data.
|
566
433
|
|
567
|
-
|
434
|
+
y_test (np.ndarray): Test labels (one-hot encoded).
|
568
435
|
|
436
|
+
W (np.ndarray): Neural net weight matrix.
|
437
|
+
|
438
|
+
activation_potentiation (list): Activation list. Default = ['linear'].
|
439
|
+
|
569
440
|
Returns:
|
570
|
-
tuple:
|
441
|
+
tuple: Model (list).
|
571
442
|
"""
|
572
|
-
# Pre-checks
|
573
|
-
|
574
|
-
x_test = x_test.astype(dtype, copy=False)
|
575
|
-
|
576
|
-
if len(y_test[0]) < 256:
|
577
|
-
if y_test.dtype != np.uint8:
|
578
|
-
y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
|
579
|
-
elif len(y_test[0]) <= 32767:
|
580
|
-
if y_test.dtype != np.uint16:
|
581
|
-
y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
|
582
|
-
else:
|
583
|
-
if y_test.dtype != np.uint32:
|
584
|
-
y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
|
585
|
-
|
586
|
-
predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=dtype)
|
587
|
-
real_classes = np.empty(len(x_test), dtype=y_test.dtype)
|
588
|
-
predict_classes = np.empty(len(x_test), dtype=y_test.dtype)
|
589
|
-
|
590
|
-
true_predict = 0
|
591
|
-
acc_list = np.empty(len(x_test), dtype=dtype)
|
592
|
-
|
593
|
-
if loading_bar_status:
|
594
|
-
loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
|
595
|
-
|
596
|
-
for inpIndex in range(len(x_test)):
|
597
|
-
Input = x_test[inpIndex].ravel()
|
598
|
-
|
599
|
-
neural_layer = Input
|
600
|
-
|
601
|
-
neural_layer = feed_forward(neural_layer, np.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
|
602
|
-
|
603
|
-
predict_probabilitys[inpIndex] = Softmax(neural_layer)
|
604
|
-
|
605
|
-
RealOutput = np.argmax(y_test[inpIndex])
|
606
|
-
real_classes[inpIndex] = RealOutput
|
607
|
-
PredictedOutput = np.argmax(neural_layer)
|
608
|
-
predict_classes[inpIndex] = PredictedOutput
|
609
|
-
|
610
|
-
if RealOutput == PredictedOutput:
|
611
|
-
true_predict += 1
|
612
|
-
|
613
|
-
acc = true_predict / (inpIndex + 1)
|
614
|
-
acc_list[inpIndex] = acc
|
615
|
-
|
616
|
-
if loading_bar_status:
|
617
|
-
loading_bar.update(1)
|
618
|
-
loading_bar.set_postfix({"Test Accuracy": acc})
|
619
|
-
|
620
|
-
if loading_bar_status:
|
621
|
-
loading_bar.close()
|
622
443
|
|
623
|
-
|
624
|
-
|
625
|
-
|
444
|
+
x_test = apply_activation(x_test, activation_potentiation)
|
445
|
+
result = x_test @ W.T
|
446
|
+
softmax_preds = np.exp(result) / np.sum(np.exp(result), axis=1, keepdims=True); accuracy = (np.argmax(result, axis=1) == np.argmax(y_test, axis=1)).mean()
|
626
447
|
|
627
|
-
return W,
|
448
|
+
return W, None, accuracy, None, None, softmax_preds
|