pyerualjetwork 4.2.9b7__py3-none-any.whl → 4.3.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-4.2.9b7.dist-info → pyerualjetwork-4.3.0.1.dist-info}/METADATA +2 -1
- pyerualjetwork-4.3.0.1.dist-info/RECORD +24 -0
- pyerualjetwork-4.3.0.1.dist-info/top_level.txt +1 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/__init__.py +1 -1
- pyerualjetwork-jetstorm/activation_functions.py +291 -0
- pyerualjetwork-jetstorm/activation_functions_cuda.py +290 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/data_operations.py +2 -3
- {pyerualjetwork → pyerualjetwork-jetstorm}/model_operations.py +14 -14
- {pyerualjetwork → pyerualjetwork-jetstorm}/model_operations_cuda.py +16 -17
- {pyerualjetwork → pyerualjetwork-jetstorm}/plan.py +46 -248
- {pyerualjetwork → pyerualjetwork-jetstorm}/plan_cuda.py +44 -263
- {pyerualjetwork → pyerualjetwork-jetstorm}/planeat.py +14 -47
- {pyerualjetwork → pyerualjetwork-jetstorm}/planeat_cuda.py +11 -48
- pyerualjetwork/activation_functions.py +0 -343
- pyerualjetwork/activation_functions_cuda.py +0 -341
- pyerualjetwork-4.2.9b7.dist-info/RECORD +0 -24
- pyerualjetwork-4.2.9b7.dist-info/top_level.txt +0 -1
- {pyerualjetwork-4.2.9b7.dist-info → pyerualjetwork-4.3.0.1.dist-info}/WHEEL +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/data_operations_cuda.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/help.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/loss_functions.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/loss_functions_cuda.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/memory_operations.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/metrics.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/metrics_cuda.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/ui.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/visualizations.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/visualizations_cuda.py +0 -0
@@ -16,31 +16,21 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
16
16
|
"""
|
17
17
|
|
18
18
|
import cupy as cp
|
19
|
-
import math
|
20
19
|
|
21
20
|
### LIBRARY IMPORTS ###
|
22
21
|
from .ui import loading_bars, initialize_loading_bar
|
23
|
-
from .data_operations_cuda import normalization
|
22
|
+
from .data_operations_cuda import normalization
|
24
23
|
from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
|
25
|
-
from .activation_functions_cuda import apply_activation,
|
24
|
+
from .activation_functions_cuda import apply_activation, all_activations
|
26
25
|
from .metrics_cuda import metrics
|
27
26
|
from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
|
28
27
|
from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
|
29
28
|
from .visualizations_cuda import (
|
30
29
|
draw_neural_web,
|
31
|
-
update_neural_web_for_fit,
|
32
|
-
plot_evaluate,
|
33
|
-
update_neuron_history,
|
34
|
-
initialize_visualization_for_fit,
|
35
|
-
update_weight_visualization_for_fit,
|
36
|
-
update_decision_boundary_for_fit,
|
37
|
-
update_validation_history_for_fit,
|
38
|
-
display_visualization_for_fit,
|
39
30
|
display_visualizations_for_learner,
|
40
31
|
update_history_plots_for_learner,
|
41
32
|
initialize_visualization_for_learner,
|
42
|
-
update_neuron_history_for_learner
|
43
|
-
show
|
33
|
+
update_neuron_history_for_learner
|
44
34
|
)
|
45
35
|
|
46
36
|
### GLOBAL VARIABLES ###
|
@@ -52,144 +42,42 @@ bar_format_learner = loading_bars()[1]
|
|
52
42
|
def fit(
|
53
43
|
x_train,
|
54
44
|
y_train,
|
55
|
-
val=False,
|
56
|
-
val_count=None,
|
57
45
|
activation_potentiation=['linear'],
|
58
|
-
|
59
|
-
|
60
|
-
show_training=None,
|
61
|
-
interval=100,
|
62
|
-
LTD=0,
|
63
|
-
decision_boundary_status=True,
|
64
|
-
train_bar=True,
|
65
|
-
auto_normalization=True,
|
66
|
-
neurons_history=False,
|
67
|
-
dtype=cp.float32,
|
68
|
-
memory='gpu'
|
46
|
+
W=None,
|
47
|
+
dtype=cp.float32
|
69
48
|
):
|
70
49
|
"""
|
71
|
-
Creates a model to fitting data
|
50
|
+
Creates a model to fitting data.,
|
72
51
|
|
73
52
|
fit Args:
|
74
53
|
|
75
|
-
x_train (
|
54
|
+
x_train (aray-like[cupy]): List or cupy array of input data.
|
76
55
|
|
77
|
-
y_train (
|
78
|
-
|
79
|
-
val (None or True): validation in training process ? None or True default: None (optional)
|
80
|
-
|
81
|
-
val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
|
56
|
+
y_train (aray-like[cupy]): List or cupy array of target labels. (one hot encoded)
|
82
57
|
|
83
58
|
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
|
84
59
|
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
show_training (bool, str): True or None default: None (optional)
|
90
|
-
|
91
|
-
LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
|
92
|
-
|
93
|
-
interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
|
94
|
-
|
95
|
-
decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
|
96
|
-
|
97
|
-
train_bar (bool): Training loading bar? True or False. Default is True. (optional)
|
98
|
-
|
99
|
-
auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
|
100
|
-
|
101
|
-
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
|
102
|
-
|
103
|
-
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
104
|
-
|
105
|
-
memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
|
60
|
+
W (cupy.ndarray): If you want to re-continue or update model
|
61
|
+
|
62
|
+
dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
106
63
|
|
107
64
|
Returns:
|
108
|
-
|
65
|
+
cupyarray: (Weight matrix).
|
109
66
|
"""
|
110
|
-
# Pre-
|
67
|
+
# Pre-check
|
111
68
|
|
112
|
-
if
|
113
|
-
train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
|
114
|
-
elif train_bar and val == False:
|
115
|
-
train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
|
69
|
+
if len(x_train) != len(y_train): raise ValueError("x_train and y_train must have the same length.")
|
116
70
|
|
117
|
-
|
118
|
-
raise ValueError("x_train and y_train must have the same length.")
|
119
|
-
|
120
|
-
if val and (x_val is None or y_val is None):
|
121
|
-
x_val, y_val = x_train, y_train
|
122
|
-
|
123
|
-
if memory == 'gpu':
|
124
|
-
x_train = transfer_to_gpu(x_train, dtype=dtype)
|
125
|
-
y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
|
71
|
+
LTPW = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
|
126
72
|
|
127
|
-
|
128
|
-
|
129
|
-
y_val = transfer_to_gpu(y_val, dtype=y_train.dtype)
|
130
|
-
|
131
|
-
elif memory == 'cpu':
|
132
|
-
x_train = transfer_to_cpu(x_train, dtype=dtype)
|
133
|
-
y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
|
134
|
-
|
135
|
-
if val:
|
136
|
-
x_val = transfer_to_cpu(x_val, dtype=dtype)
|
137
|
-
y_val = transfer_to_cpu(y_val, dtype=y_train.dtype)
|
138
|
-
|
139
|
-
else:
|
140
|
-
raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
|
141
|
-
|
142
|
-
val_list = [] if val else None
|
143
|
-
val_count = val_count or 10
|
144
|
-
# Defining weights
|
145
|
-
STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TERM POTENTIATION WEIGHT
|
146
|
-
LTPW = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TERM POTENTIATION WEIGHT
|
147
|
-
# Initialize visualization
|
148
|
-
vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
|
149
|
-
|
150
|
-
# Training process
|
151
|
-
for index, inp in enumerate(x_train):
|
152
|
-
inp = transfer_to_gpu(inp, dtype=dtype).ravel()
|
153
|
-
y_decoded = decode_one_hot(cp.array(y_train[index], copy=False, dtype=y_train.dtype))
|
154
|
-
# Weight updates
|
155
|
-
STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
|
156
|
-
LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
|
157
|
-
|
158
|
-
if val and index != 0:
|
159
|
-
if index % math.ceil((val_count / len(x_train)) * 100) == 0:
|
160
|
-
val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW, memory=memory)[get_acc()]
|
161
|
-
val_list.append(val_acc)
|
162
|
-
|
163
|
-
# Visualization updates
|
164
|
-
if show_training:
|
165
|
-
update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
|
166
|
-
if decision_boundary_status:
|
167
|
-
update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
|
168
|
-
update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
|
169
|
-
update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
|
170
|
-
if neurons_history:
|
171
|
-
update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
|
172
|
-
if train_bar:
|
173
|
-
train_progress.update(1)
|
174
|
-
|
175
|
-
STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
|
176
|
-
|
177
|
-
if show_training:
|
178
|
-
ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
|
179
|
-
ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
|
180
|
-
ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
|
181
|
-
ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
|
182
|
-
show()
|
183
|
-
|
184
|
-
if neurons_history:
|
185
|
-
ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
|
186
|
-
show()
|
73
|
+
x_train = apply_activation(x_train, activation_potentiation)
|
74
|
+
LTPW += cp.array(y_train, dtype=optimize_labels(y_train, cuda=True).dtype).T @ x_train
|
187
75
|
|
188
76
|
return normalization(LTPW, dtype=dtype)
|
189
77
|
|
190
78
|
|
191
79
|
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
|
192
|
-
neural_web_history=False, show_current_activations=False,
|
80
|
+
neural_web_history=False, show_current_activations=False,
|
193
81
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
194
82
|
interval=33.33, target_acc=None, target_loss=None,
|
195
83
|
start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
|
@@ -234,8 +122,6 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
234
122
|
|
235
123
|
early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
|
236
124
|
|
237
|
-
auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
|
238
|
-
|
239
125
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
240
126
|
|
241
127
|
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
@@ -339,10 +225,10 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
339
225
|
|
340
226
|
if fit_start is True and i == 0:
|
341
227
|
act_pop.append(activation_potentiation[j])
|
342
|
-
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1],
|
228
|
+
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], dtype=dtype)
|
343
229
|
weight_pop.append(W)
|
344
230
|
|
345
|
-
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j]
|
231
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j])
|
346
232
|
acc = model[get_acc()]
|
347
233
|
|
348
234
|
if strategy == 'accuracy': target_pop.append(acc)
|
@@ -374,8 +260,9 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
374
260
|
(strategy == 'recall' and recall_score >= best_recall)):
|
375
261
|
|
376
262
|
best_acc = acc
|
377
|
-
best_weights = weight_pop[j]
|
378
|
-
final_activations = act_pop[j]
|
263
|
+
best_weights = cp.copy(weight_pop[j])
|
264
|
+
final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
|
265
|
+
|
379
266
|
best_model = model
|
380
267
|
|
381
268
|
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
@@ -427,7 +314,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
427
314
|
if target_acc is not None and best_acc >= target_acc:
|
428
315
|
progress.close()
|
429
316
|
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
430
|
-
activation_potentiation=final_activations
|
317
|
+
activation_potentiation=final_activations)
|
431
318
|
|
432
319
|
if loss == 'categorical_crossentropy':
|
433
320
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -437,8 +324,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
437
324
|
y_pred_batch=train_model[get_preds_softmax()])
|
438
325
|
|
439
326
|
print('\nActivations: ', final_activations)
|
440
|
-
print(f'Train Accuracy
|
441
|
-
print(f'Train Loss
|
327
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
328
|
+
print(f'Train Loss: ', train_loss, '\n')
|
442
329
|
|
443
330
|
# Display final visualizations
|
444
331
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -448,8 +335,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
448
335
|
# Check target loss
|
449
336
|
if target_loss is not None and best_loss <= target_loss:
|
450
337
|
progress.close()
|
451
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
452
|
-
activation_potentiation=final_activations
|
338
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
339
|
+
activation_potentiation=final_activations)
|
453
340
|
|
454
341
|
if loss == 'categorical_crossentropy':
|
455
342
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -459,8 +346,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
459
346
|
y_pred_batch=train_model[get_preds_softmax()])
|
460
347
|
|
461
348
|
print('\nActivations: ', final_activations)
|
462
|
-
print(f'Train Accuracy
|
463
|
-
print(f'Train Loss
|
349
|
+
print(f'Train Accuracy: ', train_model[get_acc()])
|
350
|
+
print(f'Train Loss: ', train_loss, '\n')
|
464
351
|
|
465
352
|
# Display final visualizations
|
466
353
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -479,8 +366,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
479
366
|
if early_stop == True and i > 0:
|
480
367
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
481
368
|
progress.close()
|
482
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
483
|
-
activation_potentiation=final_activations
|
369
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
370
|
+
activation_potentiation=final_activations)
|
484
371
|
|
485
372
|
if loss == 'categorical_crossentropy':
|
486
373
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -490,8 +377,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
490
377
|
y_pred_batch=train_model[get_preds_softmax()])
|
491
378
|
|
492
379
|
print('\nActivations: ', final_activations)
|
493
|
-
print(f'Train Accuracy
|
494
|
-
print(f'Train Loss
|
380
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
381
|
+
print(f'Train Loss: ', train_loss, '\n')
|
495
382
|
|
496
383
|
# Display final visualizations
|
497
384
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
@@ -500,8 +387,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
500
387
|
|
501
388
|
# Final evaluation
|
502
389
|
progress.close()
|
503
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
504
|
-
activation_potentiation=final_activations
|
390
|
+
train_model = evaluate(x_train, y_train, W=best_weights,
|
391
|
+
activation_potentiation=final_activations)
|
505
392
|
|
506
393
|
if loss == 'categorical_crossentropy':
|
507
394
|
train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
@@ -509,70 +396,18 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
509
396
|
train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
510
397
|
|
511
398
|
print('\nActivations: ', final_activations)
|
512
|
-
print(f'Train Accuracy
|
513
|
-
print(f'Train Loss
|
399
|
+
print(f'Train Accuracy: ', train_model[get_acc()])
|
400
|
+
print(f'Train Loss : ', train_loss, '\n')
|
514
401
|
|
515
402
|
# Display final visualizations
|
516
403
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
|
517
404
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
518
405
|
|
519
|
-
|
520
|
-
|
521
|
-
def feed_forward(
|
522
|
-
Input, # list[num]: Input data.
|
523
|
-
w, # num: Weight matrix of the neural network.
|
524
|
-
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
525
|
-
activation_potentiation,
|
526
|
-
Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
|
527
|
-
LTD=0
|
528
|
-
) -> tuple:
|
529
|
-
"""
|
530
|
-
Applies feature extraction process to the input data using synaptic potentiation.
|
531
|
-
|
532
|
-
Args:
|
533
|
-
Input (num): Input data.
|
534
|
-
w (num): Weight matrix of the neural network.
|
535
|
-
is_training (bool): Flag indicating if the function is called during training (True or False).
|
536
|
-
Class (int): if is during training then which class(label) ? is isnt then put None.
|
537
|
-
# activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
538
|
-
|
539
|
-
Returns:
|
540
|
-
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
541
|
-
or
|
542
|
-
num: neural network output
|
543
|
-
"""
|
544
|
-
|
545
|
-
Output = apply_activation(Input, activation_potentiation)
|
546
|
-
|
547
|
-
Input = Output
|
548
|
-
|
549
|
-
if is_training == True:
|
550
|
-
|
551
|
-
for _ in range(LTD):
|
552
|
-
|
553
|
-
depression_vector = cp.random.rand(*Input.shape)
|
554
|
-
|
555
|
-
Input -= depression_vector
|
556
|
-
|
557
|
-
w[Class, :] = Input
|
558
|
-
return w
|
559
|
-
|
560
|
-
else:
|
561
|
-
|
562
|
-
neural_layer = cp.dot(w, Input)
|
563
|
-
|
564
|
-
return neural_layer
|
565
|
-
|
566
|
-
|
567
406
|
def evaluate(
|
568
407
|
x_test,
|
569
408
|
y_test,
|
570
409
|
W,
|
571
|
-
activation_potentiation=['linear']
|
572
|
-
loading_bar_status=True,
|
573
|
-
show_metrics=False,
|
574
|
-
dtype=cp.float32,
|
575
|
-
memory='gpu'
|
410
|
+
activation_potentiation=['linear']
|
576
411
|
) -> tuple:
|
577
412
|
"""
|
578
413
|
Evaluates the neural network model using the given test data.
|
@@ -582,70 +417,16 @@ def evaluate(
|
|
582
417
|
|
583
418
|
y_test (cp.ndarray): Test labels (one-hot encoded).
|
584
419
|
|
585
|
-
W (
|
420
|
+
W (cp.ndarray): Neural net weight matrix.
|
586
421
|
|
587
422
|
activation_potentiation (list): Activation list. Default = ['linear'].
|
588
423
|
|
589
|
-
loading_bar_status (bool): Loading bar (optional). Default = True.
|
590
|
-
|
591
|
-
show_metrics (bool): Visualize metrics ? (optional). Default = False.
|
592
|
-
|
593
|
-
dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
594
|
-
|
595
|
-
memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
|
596
|
-
|
597
424
|
Returns:
|
598
425
|
tuple: Model (list).
|
599
426
|
"""
|
600
427
|
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
elif memory == 'cpu':
|
606
|
-
x_test = transfer_to_cpu(x_test, dtype=dtype)
|
607
|
-
y_test = transfer_to_cpu(y_test, dtype=y_test.dtype)
|
608
|
-
|
609
|
-
else:
|
610
|
-
raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
|
611
|
-
|
612
|
-
predict_probabilitys = cp.empty((len(x_test), W.shape[0]), dtype=dtype)
|
613
|
-
real_classes = cp.empty(len(x_test), dtype=y_test.dtype)
|
614
|
-
predict_classes = cp.empty(len(x_test), dtype=y_test.dtype)
|
615
|
-
|
616
|
-
true_predict = 0
|
617
|
-
acc_list = cp.empty(len(x_test), dtype=dtype)
|
618
|
-
|
619
|
-
if loading_bar_status:
|
620
|
-
loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
|
621
|
-
|
622
|
-
for inpIndex in range(len(x_test)):
|
623
|
-
Input = transfer_to_gpu(x_test[inpIndex], dtype=dtype).ravel()
|
624
|
-
neural_layer = Input
|
625
|
-
|
626
|
-
neural_layer = feed_forward(neural_layer, cp.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
|
627
|
-
|
628
|
-
predict_probabilitys[inpIndex] = Softmax(neural_layer)
|
629
|
-
|
630
|
-
RealOutput = decode_one_hot(transfer_to_gpu(y_test[inpIndex], dtype=y_test[inpIndex].dtype))
|
631
|
-
real_classes[inpIndex] = RealOutput
|
632
|
-
PredictedOutput = cp.argmax(neural_layer)
|
633
|
-
predict_classes[inpIndex] = PredictedOutput
|
634
|
-
|
635
|
-
if RealOutput == PredictedOutput:
|
636
|
-
true_predict += 1
|
637
|
-
|
638
|
-
acc = true_predict / (inpIndex + 1)
|
639
|
-
acc_list[inpIndex] = acc
|
640
|
-
|
641
|
-
if loading_bar_status:
|
642
|
-
loading_bar.update(1)
|
643
|
-
loading_bar.set_postfix({"Test Accuracy": acc})
|
644
|
-
|
645
|
-
if loading_bar_status:
|
646
|
-
loading_bar.close()
|
647
|
-
|
648
|
-
if show_metrics:
|
649
|
-
plot_evaluate(x_test, y_test, predict_classes, acc_list, W=cp.copy(W), activation_potentiation=activation_potentiation)
|
428
|
+
x_test = apply_activation(x_test, activation_potentiation)
|
429
|
+
result = x_test @ W.T
|
430
|
+
softmax_preds = cp.exp(result) / cp.sum(cp.exp(result), axis=1, keepdims=True); accuracy = (cp.argmax(result, axis=1) == cp.argmax(y_test, axis=1)).mean()
|
650
431
|
|
651
|
-
return W,
|
432
|
+
return W, None, accuracy, None, None, softmax_preds
|
@@ -15,10 +15,8 @@ ANAPLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/
|
|
15
15
|
import numpy as np
|
16
16
|
import random
|
17
17
|
import math
|
18
|
-
import copy
|
19
18
|
|
20
19
|
### LIBRARY IMPORTS ###
|
21
|
-
from .plan import feed_forward
|
22
20
|
from .data_operations import normalization
|
23
21
|
from .ui import loading_bars, initialize_loading_bar
|
24
22
|
from .activation_functions import apply_activation, all_activations
|
@@ -279,7 +277,7 @@ def evolver(weights,
|
|
279
277
|
|
280
278
|
good_activations = list(activation_potentiations[slice_center:])
|
281
279
|
bad_activations = list(activation_potentiations[:slice_center])
|
282
|
-
best_activations =
|
280
|
+
best_activations = good_activations[-1].copy() if isinstance(good_activations[-1], list) else good_activations[-1]
|
283
281
|
|
284
282
|
|
285
283
|
### PLANEAT IS APPLIED ACCORDING TO THE SPECIFIED POLICY, STRATEGY, AND PROBABILITY CONFIGURATION:
|
@@ -293,10 +291,11 @@ def evolver(weights,
|
|
293
291
|
epsilon = np.finfo(float).eps
|
294
292
|
|
295
293
|
child_W = np.copy(bad_weights)
|
296
|
-
child_act = copy
|
294
|
+
child_act = bad_activations.copy()
|
297
295
|
|
298
296
|
mutated_W = np.copy(bad_weights)
|
299
|
-
mutated_act = copy
|
297
|
+
mutated_act = bad_activations.copy()
|
298
|
+
|
300
299
|
|
301
300
|
for i in range(len(bad_weights)):
|
302
301
|
|
@@ -400,7 +399,7 @@ def evolver(weights,
|
|
400
399
|
return weights, activation_potentiations
|
401
400
|
|
402
401
|
|
403
|
-
def evaluate(x_population, weights, activation_potentiations
|
402
|
+
def evaluate(x_population, weights, activation_potentiations):
|
404
403
|
"""
|
405
404
|
Evaluates the performance of a population of genomes, applying different activation functions
|
406
405
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
@@ -413,62 +412,30 @@ def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dty
|
|
413
412
|
activation_potentiations (list or str): A list where each entry represents an activation function
|
414
413
|
or a potentiation strategy applied to each genome. If only one
|
415
414
|
activation function is used, this can be a single string.
|
416
|
-
rl_mode (bool, optional): If True, reinforcement learning mode is activated, this accepts x_population is a single genome. (Also weights and activation_potentations a single genomes part.)
|
417
|
-
Default is False.
|
418
|
-
|
419
|
-
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
420
|
-
|
421
415
|
Returns:
|
422
416
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
423
417
|
activation function and weights.
|
424
418
|
|
425
|
-
Notes:
|
426
|
-
- If `rl_mode` is True:
|
427
|
-
- Accepts x_population is a single genom
|
428
|
-
- The inputs are flattened, and the activation function is applied across the single genom.
|
429
|
-
|
430
|
-
- If `rl_mode` is False:
|
431
|
-
- Accepts x_population is a list of genomes
|
432
|
-
- Each genome is processed individually, and the results are stored in the `outputs` list.
|
433
|
-
|
434
|
-
- `feed_forward()` function is the core function that processes the input with the given weights and activation function.
|
435
|
-
|
436
419
|
Example:
|
437
420
|
```python
|
438
|
-
outputs = evaluate(x_population, weights, activation_potentiations
|
421
|
+
outputs = evaluate(x_population, weights, activation_potentiations)
|
439
422
|
```
|
440
423
|
|
441
424
|
- The function returns a list of outputs after processing the population, where each element corresponds to
|
442
425
|
the output for each genome in `x_population`.
|
443
|
-
"""
|
444
|
-
|
445
|
-
### IF RL_MODE IS TRUE, A SINGLE GENOME IS ASSUMED AS INPUT, A FEEDFORWARD PREDICTION IS MADE, AND THE OUTPUT(NPARRAY) IS RETURNED:
|
446
|
-
|
447
|
-
### IF RL_MODE IS FALSE, PREDICTIONS ARE MADE FOR ALL GENOMES IN THE GROUP USING THEIR CORRESPONDING INDEXED INPUTS AND DATA.
|
426
|
+
"""
|
448
427
|
### THE OUTPUTS ARE RETURNED AS A PYTHON LIST, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
|
449
428
|
|
450
|
-
if rl_mode == True:
|
451
|
-
Input = np.array(x_population, copy=False, dtype=dtype)
|
452
|
-
Input = Input.ravel()
|
453
|
-
|
454
|
-
if isinstance(activation_potentiations, str):
|
455
|
-
activation_potentiations = [activation_potentiations]
|
456
|
-
|
457
|
-
outputs = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations, w=weights)
|
458
|
-
|
459
|
-
else:
|
460
|
-
outputs = [0] * len(x_population)
|
461
|
-
for i, genome in enumerate(x_population):
|
462
|
-
|
463
|
-
Input = np.array(genome, copy=False)
|
464
|
-
Input = Input.ravel()
|
465
429
|
|
466
|
-
|
467
|
-
|
430
|
+
if isinstance(activation_potentiations, str):
|
431
|
+
activation_potentiations = [activation_potentiations]
|
432
|
+
else:
|
433
|
+
activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
|
468
434
|
|
469
|
-
|
435
|
+
x_population = apply_activation(x_population, activation_potentiations)
|
436
|
+
result = x_population @ weights.T
|
470
437
|
|
471
|
-
return
|
438
|
+
return result
|
472
439
|
|
473
440
|
|
474
441
|
def cross_over(first_parent_W,
|