pyerualjetwork 4.3.0.1__py3-none-any.whl → 4.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-jetstorm → pyerualjetwork}/__init__.py +1 -1
- pyerualjetwork/activation_functions.py +343 -0
- pyerualjetwork/activation_functions_cuda.py +340 -0
- {pyerualjetwork-jetstorm → pyerualjetwork}/model_operations.py +14 -14
- {pyerualjetwork-jetstorm → pyerualjetwork}/model_operations_cuda.py +17 -16
- {pyerualjetwork-jetstorm → pyerualjetwork}/plan.py +246 -44
- {pyerualjetwork-jetstorm → pyerualjetwork}/plan_cuda.py +256 -37
- {pyerualjetwork-jetstorm → pyerualjetwork}/planeat.py +43 -11
- {pyerualjetwork-jetstorm → pyerualjetwork}/planeat_cuda.py +44 -8
- {pyerualjetwork-4.3.0.1.dist-info → pyerualjetwork-4.3.1.dist-info}/METADATA +2 -3
- pyerualjetwork-4.3.1.dist-info/RECORD +24 -0
- pyerualjetwork-4.3.1.dist-info/top_level.txt +1 -0
- pyerualjetwork-4.3.0.1.dist-info/RECORD +0 -24
- pyerualjetwork-4.3.0.1.dist-info/top_level.txt +0 -1
- pyerualjetwork-jetstorm/activation_functions.py +0 -291
- pyerualjetwork-jetstorm/activation_functions_cuda.py +0 -290
- {pyerualjetwork-jetstorm → pyerualjetwork}/data_operations.py +0 -0
- {pyerualjetwork-jetstorm → pyerualjetwork}/data_operations_cuda.py +0 -0
- {pyerualjetwork-jetstorm → pyerualjetwork}/help.py +0 -0
- {pyerualjetwork-jetstorm → pyerualjetwork}/loss_functions.py +0 -0
- {pyerualjetwork-jetstorm → pyerualjetwork}/loss_functions_cuda.py +0 -0
- {pyerualjetwork-jetstorm → pyerualjetwork}/memory_operations.py +0 -0
- {pyerualjetwork-jetstorm → pyerualjetwork}/metrics.py +0 -0
- {pyerualjetwork-jetstorm → pyerualjetwork}/metrics_cuda.py +0 -0
- {pyerualjetwork-jetstorm → pyerualjetwork}/ui.py +0 -0
- {pyerualjetwork-jetstorm → pyerualjetwork}/visualizations.py +0 -0
- {pyerualjetwork-jetstorm → pyerualjetwork}/visualizations_cuda.py +0 -0
- {pyerualjetwork-4.3.0.1.dist-info → pyerualjetwork-4.3.1.dist-info}/WHEEL +0 -0
@@ -16,21 +16,31 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
16
16
|
"""
|
17
17
|
|
18
18
|
import cupy as cp
|
19
|
+
import math
|
19
20
|
|
20
21
|
### LIBRARY IMPORTS ###
|
21
22
|
from .ui import loading_bars, initialize_loading_bar
|
22
|
-
from .data_operations_cuda import normalization
|
23
|
+
from .data_operations_cuda import normalization, decode_one_hot, batcher
|
23
24
|
from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
|
24
|
-
from .activation_functions_cuda import apply_activation, all_activations
|
25
|
+
from .activation_functions_cuda import apply_activation, Softmax, all_activations
|
25
26
|
from .metrics_cuda import metrics
|
26
27
|
from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
|
27
28
|
from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
|
28
29
|
from .visualizations_cuda import (
|
29
30
|
draw_neural_web,
|
31
|
+
update_neural_web_for_fit,
|
32
|
+
plot_evaluate,
|
33
|
+
update_neuron_history,
|
34
|
+
initialize_visualization_for_fit,
|
35
|
+
update_weight_visualization_for_fit,
|
36
|
+
update_decision_boundary_for_fit,
|
37
|
+
update_validation_history_for_fit,
|
38
|
+
display_visualization_for_fit,
|
30
39
|
display_visualizations_for_learner,
|
31
40
|
update_history_plots_for_learner,
|
32
41
|
initialize_visualization_for_learner,
|
33
|
-
update_neuron_history_for_learner
|
42
|
+
update_neuron_history_for_learner,
|
43
|
+
show
|
34
44
|
)
|
35
45
|
|
36
46
|
### GLOBAL VARIABLES ###
|
@@ -42,42 +52,144 @@ bar_format_learner = loading_bars()[1]
|
|
42
52
|
def fit(
|
43
53
|
x_train,
|
44
54
|
y_train,
|
55
|
+
val=False,
|
56
|
+
val_count=None,
|
45
57
|
activation_potentiation=['linear'],
|
46
|
-
|
47
|
-
|
58
|
+
x_val=None,
|
59
|
+
y_val=None,
|
60
|
+
show_training=None,
|
61
|
+
interval=100,
|
62
|
+
LTD=0,
|
63
|
+
decision_boundary_status=True,
|
64
|
+
train_bar=True,
|
65
|
+
auto_normalization=True,
|
66
|
+
neurons_history=False,
|
67
|
+
dtype=cp.float32,
|
68
|
+
memory='gpu'
|
48
69
|
):
|
49
70
|
"""
|
50
|
-
Creates a model to fitting data
|
71
|
+
Creates a model to fitting data.
|
51
72
|
|
52
73
|
fit Args:
|
53
74
|
|
54
|
-
x_train (
|
75
|
+
x_train (list[num]): List or numarray of input data.
|
55
76
|
|
56
|
-
y_train (
|
77
|
+
y_train (list[num]): List or numarray of target labels. (one hot encoded)
|
78
|
+
|
79
|
+
val (None or True): validation in training process ? None or True default: None (optional)
|
80
|
+
|
81
|
+
val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
|
57
82
|
|
58
83
|
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
|
59
84
|
|
60
|
-
|
61
|
-
|
62
|
-
|
85
|
+
x_val (list[num]): List of validation data. default: x_train (optional)
|
86
|
+
|
87
|
+
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
|
88
|
+
|
89
|
+
show_training (bool, str): True or None default: None (optional)
|
90
|
+
|
91
|
+
LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
|
92
|
+
|
93
|
+
interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
|
94
|
+
|
95
|
+
decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
|
96
|
+
|
97
|
+
train_bar (bool): Training loading bar? True or False. Default is True. (optional)
|
98
|
+
|
99
|
+
auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
|
100
|
+
|
101
|
+
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
|
102
|
+
|
103
|
+
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
104
|
+
|
105
|
+
memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
|
63
106
|
|
64
107
|
Returns:
|
65
|
-
|
108
|
+
numpyarray([num]): (Weight matrix).
|
66
109
|
"""
|
67
|
-
# Pre-
|
110
|
+
# Pre-checks
|
68
111
|
|
69
|
-
if
|
112
|
+
if train_bar and val:
|
113
|
+
train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
|
114
|
+
elif train_bar and val == False:
|
115
|
+
train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
|
70
116
|
|
71
|
-
|
117
|
+
if len(x_train) != len(y_train):
|
118
|
+
raise ValueError("x_train and y_train must have the same length.")
|
119
|
+
|
120
|
+
if val and (x_val is None or y_val is None):
|
121
|
+
x_val, y_val = x_train, y_train
|
122
|
+
|
123
|
+
if memory == 'gpu':
|
124
|
+
x_train = transfer_to_gpu(x_train, dtype=dtype)
|
125
|
+
y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
|
72
126
|
|
73
|
-
|
74
|
-
|
127
|
+
if val:
|
128
|
+
x_val = transfer_to_gpu(x_val, dtype=dtype)
|
129
|
+
y_val = transfer_to_gpu(y_val, dtype=y_train.dtype)
|
130
|
+
|
131
|
+
elif memory == 'cpu':
|
132
|
+
x_train = transfer_to_cpu(x_train, dtype=dtype)
|
133
|
+
y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
|
134
|
+
|
135
|
+
if val:
|
136
|
+
x_val = transfer_to_cpu(x_val, dtype=dtype)
|
137
|
+
y_val = transfer_to_cpu(y_val, dtype=y_train.dtype)
|
138
|
+
|
139
|
+
else:
|
140
|
+
raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
|
141
|
+
|
142
|
+
val_list = [] if val else None
|
143
|
+
val_count = val_count or 10
|
144
|
+
# Defining weights
|
145
|
+
STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TERM POTENTIATION WEIGHT
|
146
|
+
LTPW = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TERM POTENTIATION WEIGHT
|
147
|
+
# Initialize visualization
|
148
|
+
vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
|
149
|
+
|
150
|
+
# Training process
|
151
|
+
for index, inp in enumerate(x_train):
|
152
|
+
inp = transfer_to_gpu(inp, dtype=dtype).ravel()
|
153
|
+
y_decoded = decode_one_hot(cp.array(y_train[index], copy=False, dtype=y_train.dtype))
|
154
|
+
# Weight updates
|
155
|
+
STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
|
156
|
+
LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
|
157
|
+
|
158
|
+
if val and index != 0:
|
159
|
+
if index % math.ceil((val_count / len(x_train)) * 100) == 0:
|
160
|
+
val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW, memory=memory)[get_acc()]
|
161
|
+
val_list.append(val_acc)
|
162
|
+
|
163
|
+
# Visualization updates
|
164
|
+
if show_training:
|
165
|
+
update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
|
166
|
+
if decision_boundary_status:
|
167
|
+
update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
|
168
|
+
update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
|
169
|
+
update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
|
170
|
+
if neurons_history:
|
171
|
+
update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
|
172
|
+
if train_bar:
|
173
|
+
train_progress.update(1)
|
174
|
+
|
175
|
+
STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
|
176
|
+
|
177
|
+
if show_training:
|
178
|
+
ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
|
179
|
+
ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
|
180
|
+
ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
|
181
|
+
ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
|
182
|
+
show()
|
183
|
+
|
184
|
+
if neurons_history:
|
185
|
+
ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
|
186
|
+
show()
|
75
187
|
|
76
188
|
return normalization(LTPW, dtype=dtype)
|
77
189
|
|
78
190
|
|
79
191
|
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
|
80
|
-
neural_web_history=False, show_current_activations=False,
|
192
|
+
neural_web_history=False, show_current_activations=False, auto_normalization=True,
|
81
193
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
82
194
|
interval=33.33, target_acc=None, target_loss=None,
|
83
195
|
start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
|
@@ -122,6 +234,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
122
234
|
|
123
235
|
early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
|
124
236
|
|
237
|
+
auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
|
238
|
+
|
125
239
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
126
240
|
|
127
241
|
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
@@ -225,10 +339,10 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
225
339
|
|
226
340
|
if fit_start is True and i == 0:
|
227
341
|
act_pop.append(activation_potentiation[j])
|
228
|
-
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], dtype=dtype)
|
342
|
+
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
229
343
|
weight_pop.append(W)
|
230
344
|
|
231
|
-
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j])
|
345
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
|
232
346
|
acc = model[get_acc()]
|
233
347
|
|
234
348
|
if strategy == 'accuracy': target_pop.append(acc)
|
@@ -262,7 +376,6 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
262
376
|
best_acc = acc
|
263
377
|
best_weights = cp.copy(weight_pop[j])
|
264
378
|
final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
|
265
|
-
|
266
379
|
best_model = model
|
267
380
|
|
268
381
|
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
@@ -314,7 +427,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
314
427
|
if target_acc is not None and best_acc >= target_acc:
|
315
428
|
progress.close()
|
316
429
|
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
317
|
-
activation_potentiation=final_activations)
|
430
|
+
activation_potentiation=final_activations, dtype=dtype)
|
318
431
|
|
319
432
|
if loss == 'categorical_crossentropy':
|
320
433
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -335,8 +448,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
335
448
|
# Check target loss
|
336
449
|
if target_loss is not None and best_loss <= target_loss:
|
337
450
|
progress.close()
|
338
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
339
|
-
activation_potentiation=final_activations)
|
451
|
+
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
452
|
+
activation_potentiation=final_activations, dtype=dtype)
|
340
453
|
|
341
454
|
if loss == 'categorical_crossentropy':
|
342
455
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -346,7 +459,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
346
459
|
y_pred_batch=train_model[get_preds_softmax()])
|
347
460
|
|
348
461
|
print('\nActivations: ', final_activations)
|
349
|
-
print(f'Train Accuracy:
|
462
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
350
463
|
print(f'Train Loss: ', train_loss, '\n')
|
351
464
|
|
352
465
|
# Display final visualizations
|
@@ -366,8 +479,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
366
479
|
if early_stop == True and i > 0:
|
367
480
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
368
481
|
progress.close()
|
369
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
370
|
-
activation_potentiation=final_activations)
|
482
|
+
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
483
|
+
activation_potentiation=final_activations, dtype=dtype, memory=memory)
|
371
484
|
|
372
485
|
if loss == 'categorical_crossentropy':
|
373
486
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -387,8 +500,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
387
500
|
|
388
501
|
# Final evaluation
|
389
502
|
progress.close()
|
390
|
-
train_model = evaluate(x_train, y_train, W=best_weights,
|
391
|
-
activation_potentiation=final_activations)
|
503
|
+
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
504
|
+
activation_potentiation=final_activations, dtype=dtype, memory=memory)
|
392
505
|
|
393
506
|
if loss == 'categorical_crossentropy':
|
394
507
|
train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
@@ -396,18 +509,70 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
396
509
|
train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
397
510
|
|
398
511
|
print('\nActivations: ', final_activations)
|
399
|
-
print(f'Train Accuracy:
|
400
|
-
print(f'Train Loss
|
512
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
513
|
+
print(f'Train Loss: ', train_loss, '\n')
|
401
514
|
|
402
515
|
# Display final visualizations
|
403
516
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
|
404
517
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
405
518
|
|
519
|
+
|
520
|
+
|
521
|
+
def feed_forward(
|
522
|
+
Input, # list[num]: Input data.
|
523
|
+
w, # num: Weight matrix of the neural network.
|
524
|
+
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
525
|
+
activation_potentiation,
|
526
|
+
Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
|
527
|
+
LTD=0
|
528
|
+
) -> tuple:
|
529
|
+
"""
|
530
|
+
Applies feature extraction process to the input data using synaptic potentiation.
|
531
|
+
|
532
|
+
Args:
|
533
|
+
Input (num): Input data.
|
534
|
+
w (num): Weight matrix of the neural network.
|
535
|
+
is_training (bool): Flag indicating if the function is called during training (True or False).
|
536
|
+
Class (int): if is during training then which class(label) ? is isnt then put None.
|
537
|
+
# activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
538
|
+
|
539
|
+
Returns:
|
540
|
+
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
541
|
+
or
|
542
|
+
num: neural network output
|
543
|
+
"""
|
544
|
+
|
545
|
+
Output = apply_activation(Input, activation_potentiation)
|
546
|
+
|
547
|
+
Input = Output
|
548
|
+
|
549
|
+
if is_training == True:
|
550
|
+
|
551
|
+
for _ in range(LTD):
|
552
|
+
|
553
|
+
depression_vector = cp.random.rand(*Input.shape)
|
554
|
+
|
555
|
+
Input -= depression_vector
|
556
|
+
|
557
|
+
w[Class, :] = Input
|
558
|
+
return w
|
559
|
+
|
560
|
+
else:
|
561
|
+
|
562
|
+
neural_layer = cp.dot(w, Input)
|
563
|
+
|
564
|
+
return neural_layer
|
565
|
+
|
566
|
+
|
406
567
|
def evaluate(
|
407
568
|
x_test,
|
408
569
|
y_test,
|
409
570
|
W,
|
410
|
-
activation_potentiation=['linear']
|
571
|
+
activation_potentiation=['linear'],
|
572
|
+
loading_bar_status=True,
|
573
|
+
show_metrics=False,
|
574
|
+
dtype=cp.float32,
|
575
|
+
memory='gpu'
|
411
576
|
) -> tuple:
|
412
577
|
"""
|
413
578
|
Evaluates the neural network model using the given test data.
|
@@ -417,16 +582,70 @@ def evaluate(
|
|
417
582
|
|
418
583
|
y_test (cp.ndarray): Test labels (one-hot encoded).
|
419
584
|
|
420
|
-
W (cp.ndarray): Neural net weight matrix.
|
585
|
+
W (list[cp.ndarray]): Neural net weight matrix.
|
421
586
|
|
422
587
|
activation_potentiation (list): Activation list. Default = ['linear'].
|
423
588
|
|
589
|
+
loading_bar_status (bool): Loading bar (optional). Default = True.
|
590
|
+
|
591
|
+
show_metrics (bool): Visualize metrics ? (optional). Default = False.
|
592
|
+
|
593
|
+
dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
594
|
+
|
595
|
+
memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
|
596
|
+
|
424
597
|
Returns:
|
425
598
|
tuple: Model (list).
|
426
599
|
"""
|
427
600
|
|
428
|
-
|
429
|
-
|
430
|
-
|
601
|
+
if memory == 'gpu':
|
602
|
+
x_test = transfer_to_gpu(x_test, dtype=dtype)
|
603
|
+
y_test = transfer_to_gpu(y_test, dtype=y_test.dtype)
|
604
|
+
|
605
|
+
elif memory == 'cpu':
|
606
|
+
x_test = transfer_to_cpu(x_test, dtype=dtype)
|
607
|
+
y_test = transfer_to_cpu(y_test, dtype=y_test.dtype)
|
608
|
+
|
609
|
+
else:
|
610
|
+
raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
|
611
|
+
|
612
|
+
predict_probabilitys = cp.empty((len(x_test), W.shape[0]), dtype=dtype)
|
613
|
+
real_classes = cp.empty(len(x_test), dtype=y_test.dtype)
|
614
|
+
predict_classes = cp.empty(len(x_test), dtype=y_test.dtype)
|
615
|
+
|
616
|
+
true_predict = 0
|
617
|
+
acc_list = cp.empty(len(x_test), dtype=dtype)
|
618
|
+
|
619
|
+
if loading_bar_status:
|
620
|
+
loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
|
621
|
+
|
622
|
+
for inpIndex in range(len(x_test)):
|
623
|
+
Input = transfer_to_gpu(x_test[inpIndex], dtype=dtype).ravel()
|
624
|
+
neural_layer = Input
|
625
|
+
|
626
|
+
neural_layer = feed_forward(neural_layer, cp.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
|
627
|
+
|
628
|
+
predict_probabilitys[inpIndex] = Softmax(neural_layer)
|
629
|
+
|
630
|
+
RealOutput = decode_one_hot(transfer_to_gpu(y_test[inpIndex], dtype=y_test[inpIndex].dtype))
|
631
|
+
real_classes[inpIndex] = RealOutput
|
632
|
+
PredictedOutput = cp.argmax(neural_layer)
|
633
|
+
predict_classes[inpIndex] = PredictedOutput
|
634
|
+
|
635
|
+
if RealOutput == PredictedOutput:
|
636
|
+
true_predict += 1
|
637
|
+
|
638
|
+
acc = true_predict / (inpIndex + 1)
|
639
|
+
acc_list[inpIndex] = acc
|
640
|
+
|
641
|
+
if loading_bar_status:
|
642
|
+
loading_bar.update(1)
|
643
|
+
loading_bar.set_postfix({"Test Accuracy": acc})
|
644
|
+
|
645
|
+
if loading_bar_status:
|
646
|
+
loading_bar.close()
|
647
|
+
|
648
|
+
if show_metrics:
|
649
|
+
plot_evaluate(x_test, y_test, predict_classes, acc_list, W=cp.copy(W), activation_potentiation=activation_potentiation)
|
431
650
|
|
432
|
-
return W,
|
651
|
+
return W, predict_classes, acc_list[-1], None, None, predict_probabilitys
|
@@ -17,6 +17,7 @@ import random
|
|
17
17
|
import math
|
18
18
|
|
19
19
|
### LIBRARY IMPORTS ###
|
20
|
+
from .plan import feed_forward
|
20
21
|
from .data_operations import normalization
|
21
22
|
from .ui import loading_bars, initialize_loading_bar
|
22
23
|
from .activation_functions import apply_activation, all_activations
|
@@ -296,7 +297,6 @@ def evolver(weights,
|
|
296
297
|
mutated_W = np.copy(bad_weights)
|
297
298
|
mutated_act = bad_activations.copy()
|
298
299
|
|
299
|
-
|
300
300
|
for i in range(len(bad_weights)):
|
301
301
|
|
302
302
|
if policy == 'aggressive':
|
@@ -399,7 +399,7 @@ def evolver(weights,
|
|
399
399
|
return weights, activation_potentiations
|
400
400
|
|
401
401
|
|
402
|
-
def evaluate(x_population, weights, activation_potentiations):
|
402
|
+
def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dtype=np.float32):
|
403
403
|
"""
|
404
404
|
Evaluates the performance of a population of genomes, applying different activation functions
|
405
405
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
@@ -412,30 +412,62 @@ def evaluate(x_population, weights, activation_potentiations):
|
|
412
412
|
activation_potentiations (list or str): A list where each entry represents an activation function
|
413
413
|
or a potentiation strategy applied to each genome. If only one
|
414
414
|
activation function is used, this can be a single string.
|
415
|
+
rl_mode (bool, optional): If True, reinforcement learning mode is activated, this accepts x_population is a single genome. (Also weights and activation_potentations a single genomes part.)
|
416
|
+
Default is False.
|
417
|
+
|
418
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
419
|
+
|
415
420
|
Returns:
|
416
421
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
417
422
|
activation function and weights.
|
418
423
|
|
424
|
+
Notes:
|
425
|
+
- If `rl_mode` is True:
|
426
|
+
- Accepts x_population is a single genom
|
427
|
+
- The inputs are flattened, and the activation function is applied across the single genom.
|
428
|
+
|
429
|
+
- If `rl_mode` is False:
|
430
|
+
- Accepts x_population is a list of genomes
|
431
|
+
- Each genome is processed individually, and the results are stored in the `outputs` list.
|
432
|
+
|
433
|
+
- `feed_forward()` function is the core function that processes the input with the given weights and activation function.
|
434
|
+
|
419
435
|
Example:
|
420
436
|
```python
|
421
|
-
outputs = evaluate(x_population, weights, activation_potentiations)
|
437
|
+
outputs = evaluate(x_population, weights, activation_potentiations, rl_mode=False)
|
422
438
|
```
|
423
439
|
|
424
440
|
- The function returns a list of outputs after processing the population, where each element corresponds to
|
425
441
|
the output for each genome in `x_population`.
|
426
|
-
"""
|
442
|
+
"""
|
443
|
+
|
444
|
+
### IF RL_MODE IS TRUE, A SINGLE GENOME IS ASSUMED AS INPUT, A FEEDFORWARD PREDICTION IS MADE, AND THE OUTPUT(NPARRAY) IS RETURNED:
|
445
|
+
|
446
|
+
### IF RL_MODE IS FALSE, PREDICTIONS ARE MADE FOR ALL GENOMES IN THE GROUP USING THEIR CORRESPONDING INDEXED INPUTS AND DATA.
|
427
447
|
### THE OUTPUTS ARE RETURNED AS A PYTHON LIST, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
|
428
448
|
|
429
|
-
|
430
|
-
|
431
|
-
|
449
|
+
if rl_mode == True:
|
450
|
+
Input = np.array(x_population, copy=False, dtype=dtype)
|
451
|
+
Input = Input.ravel()
|
452
|
+
|
453
|
+
if isinstance(activation_potentiations, str):
|
454
|
+
activation_potentiations = [activation_potentiations]
|
455
|
+
|
456
|
+
outputs = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations, w=weights)
|
457
|
+
|
432
458
|
else:
|
433
|
-
|
459
|
+
outputs = [0] * len(x_population)
|
460
|
+
for i, genome in enumerate(x_population):
|
461
|
+
|
462
|
+
Input = np.array(genome, copy=False)
|
463
|
+
Input = Input.ravel()
|
464
|
+
|
465
|
+
if isinstance(activation_potentiations[i], str):
|
466
|
+
activation_potentiations[i] = [activation_potentiations[i]]
|
434
467
|
|
435
|
-
|
436
|
-
result = x_population @ weights.T
|
468
|
+
outputs[i] = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations[i], w=weights[i])
|
437
469
|
|
438
|
-
return
|
470
|
+
return outputs
|
439
471
|
|
440
472
|
|
441
473
|
def cross_over(first_parent_W,
|
@@ -19,6 +19,7 @@ import math
|
|
19
19
|
|
20
20
|
|
21
21
|
### LIBRARY IMPORTS ###
|
22
|
+
from .plan_cuda import feed_forward
|
22
23
|
from .data_operations_cuda import normalization
|
23
24
|
from .ui import loading_bars, initialize_loading_bar
|
24
25
|
from .activation_functions_cuda import apply_activation, all_activations
|
@@ -398,7 +399,7 @@ def evolver(weights,
|
|
398
399
|
return weights, activation_potentiations
|
399
400
|
|
400
401
|
|
401
|
-
def evaluate(x_population, weights, activation_potentiations):
|
402
|
+
def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dtype=cp.float32):
|
402
403
|
"""
|
403
404
|
Evaluates the performance of a population of genomes, applying different activation functions
|
404
405
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
@@ -413,29 +414,64 @@ def evaluate(x_population, weights, activation_potentiations):
|
|
413
414
|
activation_potentiations (list or str): A list where each entry represents an activation function
|
414
415
|
or a potentiation strategy applied to each genome. If only one
|
415
416
|
activation function is used, this can be a single string.
|
417
|
+
|
418
|
+
rl_mode (bool, optional): If True, reinforcement learning mode is activated, this accepts x_population is a single genome. (Also weights and activation_potentations a single genomes part.)
|
419
|
+
Default is False.
|
420
|
+
|
421
|
+
|
422
|
+
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
423
|
+
|
416
424
|
Returns:
|
417
425
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
418
426
|
activation function and weights.
|
419
427
|
|
428
|
+
Notes:
|
429
|
+
- If `rl_mode` is True:
|
430
|
+
- Accepts x_population is a single genom
|
431
|
+
- The inputs are flattened, and the activation function is applied across the single genom.
|
432
|
+
|
433
|
+
- If `rl_mode` is False:
|
434
|
+
- Accepts x_population is a list of genomes
|
435
|
+
- Each genome is processed individually, and the results are stored in the `outputs` list.
|
436
|
+
|
437
|
+
- `feed_forward()` function is the core function that processes the input with the given weights and activation function.
|
438
|
+
|
420
439
|
Example:
|
421
440
|
```python
|
422
|
-
outputs = evaluate(x_population, weights, activation_potentiations)
|
441
|
+
outputs = evaluate(x_population, weights, activation_potentiations, rl_mode=False)
|
423
442
|
```
|
424
443
|
|
425
444
|
- The function returns a list of outputs after processing the population, where each element corresponds to
|
426
445
|
the output for each genome in `x_population`.
|
427
446
|
"""
|
447
|
+
|
448
|
+
### IF RL_MODE IS TRUE, A SINGLE GENOME IS ASSUMED AS INPUT, A FEEDFORWARD PREDICTION IS MADE, AND THE OUTPUT(NPARRAY) IS RETURNED:
|
449
|
+
|
450
|
+
### IF RL_MODE IS FALSE, PREDICTIONS ARE MADE FOR ALL GENOMES IN THE GROUP USING THEIR CORRESPONDING INDEXED INPUTS AND DATA.
|
428
451
|
### THE OUTPUTS ARE RETURNED AS A PYTHON LIST, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
|
429
452
|
|
430
|
-
if
|
431
|
-
|
453
|
+
if rl_mode == True:
|
454
|
+
Input = cp.array(x_population, dtype=dtype, copy=False)
|
455
|
+
Input = Input.ravel()
|
456
|
+
|
457
|
+
if isinstance(activation_potentiations, str):
|
458
|
+
activation_potentiations = [activation_potentiations]
|
459
|
+
|
460
|
+
outputs = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations, w=weights)
|
461
|
+
|
432
462
|
else:
|
433
|
-
|
463
|
+
outputs = [0] * len(x_population)
|
464
|
+
for i, genome in enumerate(x_population):
|
465
|
+
|
466
|
+
Input = cp.array(genome)
|
467
|
+
Input = Input.ravel()
|
468
|
+
|
469
|
+
if isinstance(activation_potentiations[i], str):
|
470
|
+
activation_potentiations[i] = [activation_potentiations[i]]
|
434
471
|
|
435
|
-
|
436
|
-
result = x_population @ weights.T
|
472
|
+
outputs[i] = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations[i], w=weights[i])
|
437
473
|
|
438
|
-
return
|
474
|
+
return outputs
|
439
475
|
|
440
476
|
|
441
477
|
def cross_over(first_parent_W,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.3.
|
3
|
+
Version: 4.3.1
|
4
4
|
Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -23,8 +23,7 @@ PyPi Page: https://pypi.org/project/pyerualjetwork/
|
|
23
23
|
GitHub Page: https://github.com/HCB06/PyerualJetwork
|
24
24
|
|
25
25
|
|
26
|
-
pip install pyerualjetwork
|
27
|
-
pip install pyerualjetwork==x.x.x-jetstorm
|
26
|
+
pip install pyerualjetwork==x.x.x (normal pyerualjetwork package)
|
28
27
|
|
29
28
|
from pyerualjetwork import plan
|
30
29
|
from pyerualjetwork import planeat
|
@@ -0,0 +1,24 @@
|
|
1
|
+
pyerualjetwork/__init__.py,sha256=DC4NJW7QwXpVUqokEUxO3GrR_c8bV0zLmui9p-HcEfQ,639
|
2
|
+
pyerualjetwork/activation_functions.py,sha256=eLEesmMgDvkI1TqaLTpqtOgTaLbHEAyw-D57KIKd9G4,11775
|
3
|
+
pyerualjetwork/activation_functions_cuda.py,sha256=ztIw6rMR4t1289_TPIGYwE6qarl_YbSOGj5Ep3rUMqs,11803
|
4
|
+
pyerualjetwork/data_operations.py,sha256=Flteouu6rfSo2uHMqBHuzO02dXmbNa-I5qWmUpGTZ5Y,14760
|
5
|
+
pyerualjetwork/data_operations_cuda.py,sha256=UpoJoFhIwTU4xg9dVuLAxLAT4CkRaGsxvtJG9j1xrNo,17629
|
6
|
+
pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
|
7
|
+
pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
8
|
+
pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
9
|
+
pyerualjetwork/memory_operations.py,sha256=I7QiZ--xSyRkFF0wcckPwZV7K9emEvyx5aJ3DiRHZFI,13468
|
10
|
+
pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
|
11
|
+
pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
|
12
|
+
pyerualjetwork/model_operations.py,sha256=RKqnh7-MByFosxqme4q4jC1lOndX26O-OVXYV6ZxoEE,12965
|
13
|
+
pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
|
14
|
+
pyerualjetwork/plan.py,sha256=ApMQC46_I8qtMqO4lLYLme--SGcMRg-GRo1-gSb3A3I,31894
|
15
|
+
pyerualjetwork/plan_cuda.py,sha256=H_EuNNyxrY6-AiuRkOYC8J_UmbzoqJ9aeO0i9pgUDZI,33277
|
16
|
+
pyerualjetwork/planeat.py,sha256=e-J-u5gJYijKznN6gn2DZoaCJJro84DOBYTy1rR5-y4,39470
|
17
|
+
pyerualjetwork/planeat_cuda.py,sha256=QNHCQLkR0MNFqyN2iHAtC7cbf8qZiD3p_54YH3lnMFA,39529
|
18
|
+
pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
|
19
|
+
pyerualjetwork/visualizations.py,sha256=1SKMZaJ80OD2qHUyMxW1IOv8zwmxzMPxclfbeq1Xr4g,28772
|
20
|
+
pyerualjetwork/visualizations_cuda.py,sha256=KbMhfsLlxujy_i3QrwCf734Q-k6d7Zn_7CEbm3gzK9w,29186
|
21
|
+
pyerualjetwork-4.3.1.dist-info/METADATA,sha256=DZGPEt8_FjnZGYzHWdLDSo5IKyQInv64aHmGsSmbEy0,7491
|
22
|
+
pyerualjetwork-4.3.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
23
|
+
pyerualjetwork-4.3.1.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
24
|
+
pyerualjetwork-4.3.1.dist-info/RECORD,,
|
@@ -0,0 +1 @@
|
|
1
|
+
pyerualjetwork
|