pyerualjetwork 2.7.8__py3-none-any.whl → 4.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,696 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+
4
+ MAIN MODULE FOR PLAN_CUDA
5
+
6
+ PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
7
+ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
8
+
9
+ @author: Hasan Can Beydili
10
+ @YouTube: https://www.youtube.com/@HasanCanBeydili
11
+ @Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
12
+ @Instagram: https://www.instagram.com/canbeydilj/
13
+ @contact: tchasancan@gmail.com
14
+ """
15
+
16
+ import cupy as cp
17
+ import numpy as np
18
+ from colorama import Fore
19
+ import math
20
+
21
+ ### LIBRARY IMPORTS ###
22
+ from .ui import loading_bars, initialize_loading_bar
23
+ from .data_operations_cuda import normalization, decode_one_hot, batcher
24
+ from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
25
+ from .activation_functions_cuda import apply_activation, Softmax, all_activations
26
+ from .metrics_cuda import metrics
27
+ from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
28
+ from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
29
+ from .visualizations_cuda import (
30
+ draw_neural_web,
31
+ update_neural_web_for_fit,
32
+ plot_evaluate,
33
+ update_neuron_history,
34
+ initialize_visualization_for_fit,
35
+ update_weight_visualization_for_fit,
36
+ update_decision_boundary_for_fit,
37
+ update_validation_history_for_fit,
38
+ display_visualization_for_fit,
39
+ display_visualizations_for_learner,
40
+ update_history_plots_for_learner,
41
+ initialize_visualization_for_learner,
42
+ update_neuron_history_for_learner,
43
+ show
44
+ )
45
+
46
+ ### GLOBAL VARIABLES ###
47
+ bar_format_normal = loading_bars()[0]
48
+ bar_format_learner = loading_bars()[1]
49
+
50
+ # BUILD -----
51
+
52
+ def fit(
53
+ x_train,
54
+ y_train,
55
+ val=False,
56
+ val_count=None,
57
+ activation_potentiation=['linear'],
58
+ x_val=None,
59
+ y_val=None,
60
+ show_training=None,
61
+ interval=100,
62
+ LTD=0,
63
+ decision_boundary_status=True,
64
+ train_bar=True,
65
+ auto_normalization=True,
66
+ neurons_history=False,
67
+ dtype=cp.float32,
68
+ memory='gpu'
69
+ ):
70
+ """
71
+ Creates a model to fitting data.
72
+
73
+ fit Args:
74
+
75
+ x_train (list[num]): List or numarray of input data.
76
+
77
+ y_train (list[num]): List or numarray of target labels. (one hot encoded)
78
+
79
+ val (None or True): validation in training process ? None or True default: None (optional)
80
+
81
+ val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
82
+
83
+ activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
84
+
85
+ x_val (list[num]): List of validation data. default: x_train (optional)
86
+
87
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
88
+
89
+ show_training (bool, str): True or None default: None (optional)
90
+
91
+ LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
92
+
93
+ interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
94
+
95
+ decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
96
+
97
+ train_bar (bool): Training loading bar? True or False. Default is True. (optional)
98
+
99
+ auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
100
+
101
+ neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
102
+
103
+ dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
104
+
105
+ memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
106
+
107
+ Returns:
108
+ numpyarray([num]): (Weight matrix).
109
+ """
110
+ # Pre-checks
111
+
112
+ if train_bar and val:
113
+ train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
114
+ elif train_bar and val == False:
115
+ train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
116
+
117
+ if len(x_train) != len(y_train):
118
+ raise ValueError("x_train and y_train must have the same length.")
119
+
120
+ if val and (x_val is None or y_val is None):
121
+ x_val, y_val = x_train, y_train
122
+
123
+ if memory == 'gpu':
124
+ x_train = transfer_to_gpu(x_train, dtype=dtype)
125
+ y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
126
+
127
+ if val:
128
+ x_val = transfer_to_gpu(x_val, dtype=dtype)
129
+ y_val = transfer_to_gpu(y_val, dtype=y_train.dtype)
130
+
131
+ elif memory == 'cpu':
132
+ x_train = transfer_to_cpu(x_train, dtype=dtype)
133
+ y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
134
+
135
+ if val:
136
+ x_val = transfer_to_cpu(x_val, dtype=dtype)
137
+ y_val = transfer_to_cpu(y_val, dtype=y_train.dtype)
138
+
139
+ else:
140
+ raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
141
+
142
+ val_list = [] if val else None
143
+ val_count = val_count or 10
144
+ # Defining weights
145
+ STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TERM POTENTIATION WEIGHT
146
+ LTPW = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TERM POTENTIATION WEIGHT
147
+ # Initialize visualization
148
+ vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
149
+
150
+ # Training process
151
+ for index, inp in enumerate(x_train):
152
+ inp = transfer_to_gpu(inp, dtype=dtype).ravel()
153
+ y_decoded = decode_one_hot(cp.array(y_train[index], copy=False, dtype=y_train.dtype))
154
+ # Weight updates
155
+ STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
156
+ LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
157
+
158
+ if val and index != 0:
159
+ if index % math.ceil((val_count / len(x_train)) * 100) == 0:
160
+ val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW, memory=memory)[get_acc()]
161
+ val_list.append(val_acc)
162
+
163
+ # Visualization updates
164
+ if show_training:
165
+ update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
166
+ if decision_boundary_status:
167
+ update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
168
+ update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
169
+ update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
170
+ if neurons_history:
171
+ update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
172
+ if train_bar:
173
+ train_progress.update(1)
174
+
175
+ STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
176
+
177
+ if show_training:
178
+ ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
179
+ ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
180
+ ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
181
+ ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
182
+ show()
183
+
184
+ if neurons_history:
185
+ ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
186
+ show()
187
+
188
+ return normalization(LTPW, dtype=dtype)
189
+
190
+
191
+ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
192
+ neural_web_history=False, show_current_activations=False, auto_normalization=True,
193
+ neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
194
+ interval=33.33, target_acc=None, target_loss=None, except_this=None,
195
+ only_this=None, start_this_act=None, start_this_W=None, target_fitness='max', dtype=cp.float32, memory='gpu'):
196
+ """
197
+ Optimizes the activation functions for a neural network by leveraging train data to find
198
+ the most accurate combination of activation potentiation for the given dataset.
199
+
200
+ Why genetic optimization and not backpropagation?
201
+ Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
202
+ Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
203
+
204
+ Args:
205
+
206
+ x_train (array-like): Training input data.
207
+
208
+ y_train (array-like): Labels for training data.
209
+
210
+ optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
211
+ ```python
212
+ genetic_optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args,
213
+ activation_add_prob=0.85,
214
+ mutations=False,
215
+ strategy='cross_over',
216
+ **kwargs)
217
+
218
+ model = plan_cuda.learner(x_train,
219
+ y_train,
220
+ optimizer=genetic_optimizer,
221
+ strategy='accuracy',
222
+ show_history=True,
223
+ target_acc=0.94,
224
+ interval=16.67)
225
+ ```
226
+
227
+ x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
228
+
229
+ y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
230
+
231
+ strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
232
+
233
+ patience ((int, float), optional): patience value for adaptive strategies. For 'adaptive_accuracy' Default value: 5. For 'adaptive_loss' Default value: 0.150.
234
+
235
+ gen (int, optional): The generation count for genetic optimization.
236
+
237
+ batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
238
+
239
+ auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
240
+
241
+ early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
242
+
243
+ show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
244
+
245
+ show_history (bool, optional): If True, displays the training history after optimization. Default is False.
246
+
247
+ loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
248
+
249
+ interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
250
+
251
+ target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
252
+
253
+ target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
254
+
255
+ except_this (list, optional): A list of activations to exclude from optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
256
+
257
+ only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
258
+
259
+ start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
260
+
261
+ start_this_W (cupy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
262
+
263
+ neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
264
+
265
+ neural_web_history (bool, optional): Draws history of neural web. Default is False.
266
+
267
+ dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
268
+
269
+ memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
270
+
271
+ Returns:
272
+ tuple: A list for model parameters: [Weight matrix, Preds, Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
273
+
274
+ """
275
+
276
+ print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
277
+
278
+ activation_potentiation = all_activations()
279
+
280
+ y_train = optimize_labels(y_train, cuda=True)
281
+
282
+ if x_test is None and y_test is None:
283
+ x_test = x_train
284
+ y_test = y_train
285
+ data = 'Train'
286
+ else:
287
+ data = 'Test'
288
+ y_test = optimize_labels(y_test, cuda=True)
289
+
290
+ if memory == 'gpu':
291
+ x_train = transfer_to_gpu(x_train, dtype=dtype)
292
+ y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
293
+
294
+ x_test = transfer_to_gpu(x_test, dtype=dtype)
295
+ y_test = transfer_to_gpu(y_test, dtype=y_train.dtype)
296
+
297
+ from .data_operations_cuda import batcher
298
+
299
+ elif memory == 'cpu':
300
+ x_train = transfer_to_cpu(x_train, dtype=dtype)
301
+ y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
302
+
303
+ x_test = transfer_to_cpu(x_test, dtype=dtype)
304
+ y_test = transfer_to_cpu(y_test, dtype=y_train.dtype)
305
+
306
+ from .data_operations import batcher
307
+
308
+ else:
309
+ raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
310
+
311
+ if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
312
+
313
+ # Filter activation functions
314
+ if only_this is not None:
315
+ activation_potentiation = only_this
316
+ if except_this is not None:
317
+ activation_potentiation = [item for item in activation_potentiation if item not in except_this]
318
+ if gen is None:
319
+ gen = len(activation_potentiation)
320
+
321
+ if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
322
+ if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
323
+
324
+ # Initialize visualization components
325
+ viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
326
+
327
+ # Initialize progress bar
328
+ if batch_size == 1:
329
+ ncols = 86
330
+ else:
331
+ ncols = 99
332
+
333
+ # Initialize variables
334
+ act_pop = []
335
+ weight_pop = []
336
+
337
+ if start_this_act is None and start_this_W is None:
338
+ best_acc = 0
339
+ else:
340
+ act_pop.append(start_this_act)
341
+ weight_pop.append(start_this_W)
342
+
343
+ x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
344
+ model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype, memory=memory)
345
+
346
+ if loss == 'categorical_crossentropy':
347
+ test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
348
+ else:
349
+ test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
350
+
351
+ best_acc = model[get_acc()]
352
+ best_loss = test_loss
353
+
354
+ best_acc_per_gen_list = []
355
+ postfix_dict = {}
356
+ loss_list = []
357
+ target_pop = []
358
+
359
+ progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
360
+
361
+ for i in range(gen):
362
+ postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
363
+ progress.set_postfix(postfix_dict)
364
+
365
+ progress.n = 0
366
+ progress.last_print_n = 0
367
+ progress.update(0)
368
+
369
+ for j in range(len(activation_potentiation)):
370
+
371
+ x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
372
+
373
+ if i == 0:
374
+ act_pop.append(activation_potentiation[j])
375
+ W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
376
+ weight_pop.append(W)
377
+
378
+ model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
379
+
380
+ acc = model[get_acc()]
381
+ if strategy == 'accuracy': target_pop.append(acc)
382
+
383
+ elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
384
+ precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
385
+
386
+ if strategy == 'precision':
387
+ target_pop.append(precision_score)
388
+
389
+ if i == 0 and j == 0:
390
+ best_precision = precision_score
391
+
392
+ if strategy == 'recall':
393
+ target_pop.append(recall_score)
394
+
395
+ if i == 0 and j == 0:
396
+ best_recall = recall_score
397
+
398
+ if strategy == 'f1':
399
+ target_pop.append(f1_score)
400
+
401
+ if i == 0 and j == 0:
402
+ best_f1 = f1_score
403
+
404
+ if ((strategy == 'accuracy' and acc >= best_acc) or
405
+ (strategy == 'f1' and f1_score >= best_f1) or
406
+ (strategy == 'precision' and precision_score >= best_precision) or
407
+ (strategy == 'recall' and recall_score >= best_recall)):
408
+
409
+ best_acc = acc
410
+ best_weights = weight_pop[j]
411
+ final_activations = act_pop[j]
412
+ best_model = model
413
+
414
+ final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
415
+
416
+ if batch_size == 1:
417
+ postfix_dict[f"{data} Accuracy"] = best_acc
418
+ else:
419
+ postfix_dict[f"{data} Batch Accuracy"] = acc
420
+ progress.set_postfix(postfix_dict)
421
+
422
+ if show_current_activations:
423
+ print(f", Current Activations={final_activations}", end='')
424
+
425
+ if loss == 'categorical_crossentropy':
426
+ test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
427
+ else:
428
+ test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
429
+
430
+ if batch_size == 1:
431
+ postfix_dict[f"{data} Loss"] = test_loss
432
+ best_loss = test_loss
433
+ else:
434
+ postfix_dict[f"{data} Batch Loss"] = test_loss
435
+ progress.set_postfix(postfix_dict)
436
+ best_loss = test_loss
437
+
438
+ # Update visualizations during training
439
+ if show_history:
440
+ gen_list = range(1, len(best_acc_per_gen_list) + 2)
441
+ update_history_plots_for_learner(viz_objects, gen_list, loss_list + [test_loss],
442
+ best_acc_per_gen_list + [best_acc], x_train, final_activations)
443
+
444
+ if neurons_history:
445
+ viz_objects['neurons']['artists'] = (
446
+ update_neuron_history_for_learner(cp.copy(best_weights), viz_objects['neurons']['ax'],
447
+ viz_objects['neurons']['row'], viz_objects['neurons']['col'],
448
+ y_train[0], viz_objects['neurons']['artists'],
449
+ data=data, fig1=viz_objects['neurons']['fig'],
450
+ acc=best_acc, loss=test_loss)
451
+ )
452
+
453
+ if neural_web_history:
454
+ art5_1, art5_2, art5_3 = draw_neural_web(W=best_weights, ax=viz_objects['web']['ax'],
455
+ G=viz_objects['web']['G'], return_objs=True)
456
+ art5_list = [art5_1] + [art5_2] + list(art5_3.values())
457
+ viz_objects['web']['artists'].append(art5_list)
458
+
459
+ # Check target accuracy
460
+ if target_acc is not None and best_acc >= target_acc:
461
+ progress.close()
462
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
463
+ activation_potentiation=final_activations, dtype=dtype)
464
+
465
+ if loss == 'categorical_crossentropy':
466
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
467
+ y_pred_batch=train_model[get_preds_softmax()])
468
+ else:
469
+ train_loss = binary_crossentropy(y_true_batch=y_train,
470
+ y_pred_batch=train_model[get_preds_softmax()])
471
+
472
+ print('\nActivations: ', final_activations)
473
+ print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
474
+ print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
475
+ if data == 'Test':
476
+ print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
477
+ print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
478
+
479
+ # Display final visualizations
480
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
481
+ test_loss, y_train, interval)
482
+ return best_weights, best_model[get_preds()], best_acc, final_activations
483
+
484
+ # Check target loss
485
+ if target_loss is not None and best_loss <= target_loss:
486
+ progress.close()
487
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
488
+ activation_potentiation=final_activations, dtype=dtype)
489
+
490
+ if loss == 'categorical_crossentropy':
491
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
492
+ y_pred_batch=train_model[get_preds_softmax()])
493
+ else:
494
+ train_loss = binary_crossentropy(y_true_batch=y_train,
495
+ y_pred_batch=train_model[get_preds_softmax()])
496
+
497
+ print('\nActivations: ', final_activations)
498
+ print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
499
+ print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
500
+ if data == 'Test':
501
+ print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
502
+ print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
503
+
504
+ # Display final visualizations
505
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
506
+ test_loss, y_train, interval)
507
+ return best_weights, best_model[get_preds()], best_acc, final_activations
508
+
509
+ progress.update(1)
510
+
511
+ best_acc_per_gen_list.append(best_acc)
512
+ loss_list.append(best_loss)
513
+
514
+ weight_pop, act_pop = optimizer(cp.array(weight_pop, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
515
+ target_pop = []
516
+
517
+ # Early stopping check
518
+ if early_stop == True and i > 0:
519
+ if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
520
+ progress.close()
521
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
522
+ activation_potentiation=final_activations, dtype=dtype, memory=memory)
523
+
524
+ if loss == 'categorical_crossentropy':
525
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
526
+ y_pred_batch=train_model[get_preds_softmax()])
527
+ else:
528
+ train_loss = binary_crossentropy(y_true_batch=y_train,
529
+ y_pred_batch=train_model[get_preds_softmax()])
530
+
531
+ print('\nActivations: ', final_activations)
532
+ print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
533
+ print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
534
+ if data == 'Test':
535
+ print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
536
+ print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
537
+
538
+ # Display final visualizations
539
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
540
+ test_loss, y_train, interval)
541
+ return best_weights, best_model[get_preds()], best_acc, final_activations
542
+
543
+ # Final evaluation
544
+ progress.close()
545
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
546
+ activation_potentiation=final_activations, dtype=dtype, memory=memory)
547
+
548
+ if loss == 'categorical_crossentropy':
549
+ train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
550
+ else:
551
+ train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
552
+
553
+ print('\nActivations: ', final_activations)
554
+ print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
555
+ print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
556
+ if data == 'Test':
557
+ print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
558
+ print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
559
+
560
+ # Display final visualizations
561
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval)
562
+ return best_weights, best_model[get_preds()], best_acc, final_activations
563
+
564
+
565
+
566
+ def feed_forward(
567
+ Input, # list[num]: Input data.
568
+ w, # num: Weight matrix of the neural network.
569
+ is_training, # bool: Flag indicating if the function is called during training (True or False).
570
+ activation_potentiation,
571
+ Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
572
+ LTD=0
573
+ ) -> tuple:
574
+ """
575
+ Applies feature extraction process to the input data using synaptic potentiation.
576
+
577
+ Args:
578
+ Input (num): Input data.
579
+ w (num): Weight matrix of the neural network.
580
+ is_training (bool): Flag indicating if the function is called during training (True or False).
581
+ Class (int): if is during training then which class(label) ? is isnt then put None.
582
+ # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
583
+
584
+ Returns:
585
+ tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
586
+ or
587
+ num: neural network output
588
+ """
589
+
590
+ Output = apply_activation(Input, activation_potentiation)
591
+
592
+ Input = Output
593
+
594
+ if is_training == True:
595
+
596
+ for _ in range(LTD):
597
+
598
+ depression_vector = cp.random.rand(*Input.shape)
599
+
600
+ Input -= depression_vector
601
+
602
+ w[Class, :] = Input
603
+ return w
604
+
605
+ else:
606
+
607
+ neural_layer = cp.dot(w, Input)
608
+
609
+ return neural_layer
610
+
611
+
612
+ def evaluate(
613
+ x_test,
614
+ y_test,
615
+ W,
616
+ activation_potentiation=['linear'],
617
+ loading_bar_status=True,
618
+ show_metrics=False,
619
+ dtype=cp.float32,
620
+ memory='gpu'
621
+ ) -> tuple:
622
+ """
623
+ Evaluates the neural network model using the given test data.
624
+
625
+ Args:
626
+ x_test (cp.ndarray): Test data.
627
+
628
+ y_test (cp.ndarray): Test labels (one-hot encoded).
629
+
630
+ W (list[cp.ndarray]): Neural net weight matrix.
631
+
632
+ activation_potentiation (list): Activation list. Default = ['linear'].
633
+
634
+ loading_bar_status (bool): Loading bar (optional). Default = True.
635
+
636
+ show_metrics (bool): Visualize metrics ? (optional). Default = False.
637
+
638
+ dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
639
+
640
+ memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
641
+
642
+ Returns:
643
+ tuple: Model (list).
644
+ """
645
+
646
+ if memory == 'gpu':
647
+ x_test = transfer_to_gpu(x_test, dtype=dtype)
648
+ y_test = transfer_to_gpu(y_test, dtype=y_test.dtype)
649
+
650
+ elif memory == 'cpu':
651
+ x_test = transfer_to_cpu(x_test, dtype=dtype)
652
+ y_test = transfer_to_cpu(y_test, dtype=y_test.dtype)
653
+
654
+ else:
655
+ raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
656
+
657
+ predict_probabilitys = cp.empty((len(x_test), W.shape[0]), dtype=dtype)
658
+ real_classes = cp.empty(len(x_test), dtype=y_test.dtype)
659
+ predict_classes = cp.empty(len(x_test), dtype=y_test.dtype)
660
+
661
+ true_predict = 0
662
+ acc_list = cp.empty(len(x_test), dtype=dtype)
663
+
664
+ if loading_bar_status:
665
+ loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
666
+
667
+ for inpIndex in range(len(x_test)):
668
+ Input = transfer_to_gpu(x_test[inpIndex], dtype=dtype).ravel()
669
+ neural_layer = Input
670
+
671
+ neural_layer = feed_forward(neural_layer, cp.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
672
+
673
+ predict_probabilitys[inpIndex] = Softmax(neural_layer)
674
+
675
+ RealOutput = decode_one_hot(transfer_to_gpu(y_test[inpIndex], dtype=y_test[inpIndex].dtype))
676
+ real_classes[inpIndex] = RealOutput
677
+ PredictedOutput = cp.argmax(neural_layer)
678
+ predict_classes[inpIndex] = PredictedOutput
679
+
680
+ if RealOutput == PredictedOutput:
681
+ true_predict += 1
682
+
683
+ acc = true_predict / (inpIndex + 1)
684
+ acc_list[inpIndex] = acc
685
+
686
+ if loading_bar_status:
687
+ loading_bar.update(1)
688
+ loading_bar.set_postfix({"Test Accuracy": acc})
689
+
690
+ if loading_bar_status:
691
+ loading_bar.close()
692
+
693
+ if show_metrics:
694
+ plot_evaluate(x_test, y_test, predict_classes, acc_list, W=cp.copy(W), activation_potentiation=activation_potentiation)
695
+
696
+ return W, predict_classes, acc_list[-1], None, None, predict_probabilitys