pyerualjetwork 4.3.2.1__py3-none-any.whl → 4.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {pyerualjetwork-afterburner → pyerualjetwork}/__init__.py +1 -1
  2. pyerualjetwork/activation_functions.py +343 -0
  3. pyerualjetwork/activation_functions_cuda.py +340 -0
  4. pyerualjetwork/model_operations.py +408 -0
  5. pyerualjetwork/model_operations_cuda.py +421 -0
  6. pyerualjetwork/plan.py +627 -0
  7. pyerualjetwork/plan_cuda.py +651 -0
  8. pyerualjetwork/planeat.py +825 -0
  9. pyerualjetwork/planeat_cuda.py +834 -0
  10. {pyerualjetwork-4.3.2.1.dist-info → pyerualjetwork-4.3.3.dist-info}/METADATA +17 -4
  11. pyerualjetwork-4.3.3.dist-info/RECORD +44 -0
  12. pyerualjetwork-4.3.3.dist-info/top_level.txt +2 -0
  13. pyerualjetwork_afterburner/__init__.py +11 -0
  14. pyerualjetwork_afterburner/data_operations.py +406 -0
  15. pyerualjetwork_afterburner/data_operations_cuda.py +461 -0
  16. pyerualjetwork_afterburner/help.py +17 -0
  17. pyerualjetwork_afterburner/loss_functions.py +21 -0
  18. pyerualjetwork_afterburner/loss_functions_cuda.py +21 -0
  19. pyerualjetwork_afterburner/memory_operations.py +298 -0
  20. pyerualjetwork_afterburner/metrics.py +190 -0
  21. pyerualjetwork_afterburner/metrics_cuda.py +163 -0
  22. pyerualjetwork_afterburner/ui.py +22 -0
  23. pyerualjetwork_afterburner/visualizations.py +823 -0
  24. pyerualjetwork_afterburner/visualizations_cuda.py +825 -0
  25. pyerualjetwork-4.3.2.1.dist-info/RECORD +0 -24
  26. pyerualjetwork-4.3.2.1.dist-info/top_level.txt +0 -1
  27. {pyerualjetwork-afterburner → pyerualjetwork}/data_operations.py +0 -0
  28. {pyerualjetwork-afterburner → pyerualjetwork}/data_operations_cuda.py +0 -0
  29. {pyerualjetwork-afterburner → pyerualjetwork}/help.py +0 -0
  30. {pyerualjetwork-afterburner → pyerualjetwork}/loss_functions.py +0 -0
  31. {pyerualjetwork-afterburner → pyerualjetwork}/loss_functions_cuda.py +0 -0
  32. {pyerualjetwork-afterburner → pyerualjetwork}/memory_operations.py +0 -0
  33. {pyerualjetwork-afterburner → pyerualjetwork}/metrics.py +0 -0
  34. {pyerualjetwork-afterburner → pyerualjetwork}/metrics_cuda.py +0 -0
  35. {pyerualjetwork-afterburner → pyerualjetwork}/ui.py +0 -0
  36. {pyerualjetwork-afterburner → pyerualjetwork}/visualizations.py +0 -0
  37. {pyerualjetwork-afterburner → pyerualjetwork}/visualizations_cuda.py +0 -0
  38. {pyerualjetwork-4.3.2.1.dist-info → pyerualjetwork-4.3.3.dist-info}/WHEEL +0 -0
  39. {pyerualjetwork-afterburner → pyerualjetwork_afterburner}/activation_functions.py +0 -0
  40. {pyerualjetwork-afterburner → pyerualjetwork_afterburner}/activation_functions_cuda.py +0 -0
  41. {pyerualjetwork-afterburner → pyerualjetwork_afterburner}/model_operations.py +0 -0
  42. {pyerualjetwork-afterburner → pyerualjetwork_afterburner}/model_operations_cuda.py +0 -0
  43. {pyerualjetwork-afterburner → pyerualjetwork_afterburner}/plan.py +0 -0
  44. {pyerualjetwork-afterburner → pyerualjetwork_afterburner}/plan_cuda.py +0 -0
  45. {pyerualjetwork-afterburner → pyerualjetwork_afterburner}/planeat.py +0 -0
  46. {pyerualjetwork-afterburner → pyerualjetwork_afterburner}/planeat_cuda.py +0 -0
@@ -0,0 +1,651 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+
4
+ MAIN MODULE FOR PLAN_CUDA
5
+
6
+ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
7
+
8
+ PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
9
+ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
10
+
11
+ @author: Hasan Can Beydili
12
+ @YouTube: https://www.youtube.com/@HasanCanBeydili
13
+ @Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
14
+ @Instagram: https://www.instagram.com/canbeydilj/
15
+ @contact: tchasancan@gmail.com
16
+ """
17
+
18
+ import cupy as cp
19
+ import math
20
+
21
+ ### LIBRARY IMPORTS ###
22
+ from .ui import loading_bars, initialize_loading_bar
23
+ from .data_operations_cuda import normalization, decode_one_hot, batcher
24
+ from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
25
+ from .activation_functions_cuda import apply_activation, Softmax, all_activations
26
+ from .metrics_cuda import metrics
27
+ from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
28
+ from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
29
+ from .visualizations_cuda import (
30
+ draw_neural_web,
31
+ update_neural_web_for_fit,
32
+ plot_evaluate,
33
+ update_neuron_history,
34
+ initialize_visualization_for_fit,
35
+ update_weight_visualization_for_fit,
36
+ update_decision_boundary_for_fit,
37
+ update_validation_history_for_fit,
38
+ display_visualization_for_fit,
39
+ display_visualizations_for_learner,
40
+ update_history_plots_for_learner,
41
+ initialize_visualization_for_learner,
42
+ update_neuron_history_for_learner,
43
+ show
44
+ )
45
+
46
+ ### GLOBAL VARIABLES ###
47
+ bar_format_normal = loading_bars()[0]
48
+ bar_format_learner = loading_bars()[1]
49
+
50
+ # BUILD -----
51
+
52
+ def fit(
53
+ x_train,
54
+ y_train,
55
+ val=False,
56
+ val_count=None,
57
+ activation_potentiation=['linear'],
58
+ x_val=None,
59
+ y_val=None,
60
+ show_training=None,
61
+ interval=100,
62
+ LTD=0,
63
+ decision_boundary_status=True,
64
+ train_bar=True,
65
+ auto_normalization=True,
66
+ neurons_history=False,
67
+ dtype=cp.float32,
68
+ memory='gpu'
69
+ ):
70
+ """
71
+ Creates a model to fitting data.
72
+
73
+ fit Args:
74
+
75
+ x_train (list[num]): List or numarray of input data.
76
+
77
+ y_train (list[num]): List or numarray of target labels. (one hot encoded)
78
+
79
+ val (None or True): validation in training process ? None or True default: None (optional)
80
+
81
+ val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
82
+
83
+ activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
84
+
85
+ x_val (list[num]): List of validation data. default: x_train (optional)
86
+
87
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
88
+
89
+ show_training (bool, str): True or None default: None (optional)
90
+
91
+ LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
92
+
93
+ interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
94
+
95
+ decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
96
+
97
+ train_bar (bool): Training loading bar? True or False. Default is True. (optional)
98
+
99
+ auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
100
+
101
+ neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
102
+
103
+ dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
104
+
105
+ memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
106
+
107
+ Returns:
108
+ numpyarray([num]): (Weight matrix).
109
+ """
110
+ # Pre-checks
111
+
112
+ if train_bar and val:
113
+ train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
114
+ elif train_bar and val == False:
115
+ train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
116
+
117
+ if len(x_train) != len(y_train):
118
+ raise ValueError("x_train and y_train must have the same length.")
119
+
120
+ if val and (x_val is None or y_val is None):
121
+ x_val, y_val = x_train, y_train
122
+
123
+ if memory == 'gpu':
124
+ x_train = transfer_to_gpu(x_train, dtype=dtype)
125
+ y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
126
+
127
+ if val:
128
+ x_val = transfer_to_gpu(x_val, dtype=dtype)
129
+ y_val = transfer_to_gpu(y_val, dtype=y_train.dtype)
130
+
131
+ elif memory == 'cpu':
132
+ x_train = transfer_to_cpu(x_train, dtype=dtype)
133
+ y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
134
+
135
+ if val:
136
+ x_val = transfer_to_cpu(x_val, dtype=dtype)
137
+ y_val = transfer_to_cpu(y_val, dtype=y_train.dtype)
138
+
139
+ else:
140
+ raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
141
+
142
+ val_list = [] if val else None
143
+ val_count = val_count or 10
144
+ # Defining weights
145
+ STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TERM POTENTIATION WEIGHT
146
+ LTPW = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TERM POTENTIATION WEIGHT
147
+ # Initialize visualization
148
+ vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
149
+
150
+ # Training process
151
+ for index, inp in enumerate(x_train):
152
+ inp = transfer_to_gpu(inp, dtype=dtype).ravel()
153
+ y_decoded = decode_one_hot(cp.array(y_train[index], copy=False, dtype=y_train.dtype))
154
+ # Weight updates
155
+ STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
156
+ LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
157
+
158
+ if val and index != 0:
159
+ if index % math.ceil((val_count / len(x_train)) * 100) == 0:
160
+ val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW, memory=memory)[get_acc()]
161
+ val_list.append(val_acc)
162
+
163
+ # Visualization updates
164
+ if show_training:
165
+ update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
166
+ if decision_boundary_status:
167
+ update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
168
+ update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
169
+ update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
170
+ if neurons_history:
171
+ update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
172
+ if train_bar:
173
+ train_progress.update(1)
174
+
175
+ STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
176
+
177
+ if show_training:
178
+ ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
179
+ ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
180
+ ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
181
+ ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
182
+ show()
183
+
184
+ if neurons_history:
185
+ ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
186
+ show()
187
+
188
+ return normalization(LTPW, dtype=dtype)
189
+
190
+
191
+ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
192
+ neural_web_history=False, show_current_activations=False, auto_normalization=True,
193
+ neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
194
+ interval=33.33, target_acc=None, target_loss=None,
195
+ start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
196
+ """
197
+ Optimizes the activation functions for a neural network by leveraging train data to find
198
+ the most accurate combination of activation potentiation for the given dataset.
199
+
200
+ Why genetic optimization and not backpropagation?
201
+ Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
202
+ Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
203
+
204
+ Args:
205
+
206
+ x_train (array-like): Training input data.
207
+
208
+ y_train (array-like): Labels for training data.
209
+
210
+ optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
211
+ ```python
212
+ genetic_optimizer = lambda *args, **kwargs: planeat_cuda.evolver(*args,
213
+ activation_add_prob=0.85,
214
+ strategy='aggressive',
215
+ **kwargs)
216
+
217
+ model = plan_cuda.learner(x_train,
218
+ y_train,
219
+ optimizer=genetic_optimizer,
220
+ fit_start=True,
221
+ strategy='accuracy',
222
+ show_history=True,
223
+ gen=15,
224
+ batch_size=0.05,
225
+ interval=16.67)
226
+ ```
227
+ fit_start (bool): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. The fit_start parameter is MANDATORY and must be provided.
228
+
229
+ strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
230
+
231
+ gen (int, optional): The generation count for genetic optimization.
232
+
233
+ batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
234
+
235
+ early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
236
+
237
+ auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
238
+
239
+ show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
240
+
241
+ show_history (bool, optional): If True, displays the training history after optimization. Default is False.
242
+
243
+ loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
244
+
245
+ interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
246
+
247
+ target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
248
+
249
+ target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
250
+
251
+ start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
252
+
253
+ start_this_W (cupy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
254
+
255
+ neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
256
+
257
+ neural_web_history (bool, optional): Draws history of neural web. Default is False.
258
+
259
+ dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
260
+
261
+ memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
262
+
263
+ Returns:
264
+ tuple: A list for model parameters: [Weight matrix, Preds, Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
265
+
266
+ """
267
+
268
+ from .planeat_cuda import define_genomes
269
+
270
+ data = 'Train'
271
+
272
+ activation_potentiation = all_activations()
273
+ activation_potentiation_len = len(activation_potentiation)
274
+
275
+ y_train = optimize_labels(y_train, cuda=True)
276
+
277
+ if memory == 'gpu':
278
+ x_train = transfer_to_gpu(x_train, dtype=dtype)
279
+ y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
280
+
281
+ from .data_operations_cuda import batcher
282
+
283
+ elif memory == 'cpu':
284
+ x_train = transfer_to_cpu(x_train, dtype=dtype)
285
+ y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
286
+
287
+ from .data_operations import batcher
288
+
289
+ else:
290
+ raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
291
+
292
+ if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
293
+ if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
294
+ if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
295
+
296
+ # Initialize visualization components
297
+ viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
298
+
299
+ # Initialize progress bar
300
+ if batch_size == 1:
301
+ ncols = 76
302
+ else:
303
+ ncols = 89
304
+
305
+ # Initialize variables
306
+ best_acc = 0
307
+ best_f1 = 0
308
+ best_recall = 0
309
+ best_precision = 0
310
+ best_acc_per_gen_list = []
311
+ postfix_dict = {}
312
+ loss_list = []
313
+ target_pop = []
314
+
315
+ progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
316
+
317
+ if fit_start is False:
318
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len, dtype=dtype)
319
+
320
+ if start_this_act is not None and start_this_W is not None:
321
+ weight_pop[0] = start_this_W
322
+ act_pop[0] = start_this_act
323
+
324
+ else:
325
+ weight_pop = []
326
+ act_pop = []
327
+
328
+ for i in range(gen):
329
+ postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
330
+ progress.set_postfix(postfix_dict)
331
+
332
+ progress.n = 0
333
+ progress.last_print_n = 0
334
+ progress.update(0)
335
+
336
+ for j in range(activation_potentiation_len):
337
+
338
+ x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
339
+
340
+ if fit_start is True and i == 0:
341
+ act_pop.append(activation_potentiation[j])
342
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
343
+ weight_pop.append(W)
344
+
345
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
346
+ acc = model[get_acc()]
347
+
348
+ if strategy == 'accuracy': target_pop.append(acc)
349
+
350
+ elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
351
+ precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
352
+
353
+ if strategy == 'precision':
354
+ target_pop.append(precision_score)
355
+
356
+ if i == 0 and j == 0:
357
+ best_precision = precision_score
358
+
359
+ if strategy == 'recall':
360
+ target_pop.append(recall_score)
361
+
362
+ if i == 0 and j == 0:
363
+ best_recall = recall_score
364
+
365
+ if strategy == 'f1':
366
+ target_pop.append(f1_score)
367
+
368
+ if i == 0 and j == 0:
369
+ best_f1 = f1_score
370
+
371
+ if ((strategy == 'accuracy' and acc >= best_acc) or
372
+ (strategy == 'f1' and f1_score >= best_f1) or
373
+ (strategy == 'precision' and precision_score >= best_precision) or
374
+ (strategy == 'recall' and recall_score >= best_recall)):
375
+
376
+ best_acc = acc
377
+ best_weights = cp.copy(weight_pop[j])
378
+ final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
379
+ best_model = model
380
+
381
+ final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
382
+
383
+ if batch_size == 1:
384
+ postfix_dict[f"{data} Accuracy"] = cp.round(best_acc, 3)
385
+ else:
386
+ postfix_dict[f"{data} Batch Accuracy"] = cp.round(best_acc, 3)
387
+ progress.set_postfix(postfix_dict)
388
+
389
+ if show_current_activations:
390
+ print(f", Current Activations={final_activations}", end='')
391
+
392
+ if loss == 'categorical_crossentropy':
393
+ train_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
394
+ else:
395
+ train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
396
+
397
+ if batch_size == 1:
398
+ postfix_dict[f"{data} Loss"] = cp.round(train_loss, 3)
399
+ best_loss = train_loss
400
+ else:
401
+ postfix_dict[f"{data} Batch Loss"] = cp.round(train_loss, 3)
402
+ progress.set_postfix(postfix_dict)
403
+ best_loss = train_loss
404
+
405
+ # Update visualizations during training
406
+ if show_history:
407
+ gen_list = range(1, len(best_acc_per_gen_list) + 2)
408
+ update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
409
+ best_acc_per_gen_list + [best_acc], x_train, final_activations)
410
+
411
+ if neurons_history:
412
+ viz_objects['neurons']['artists'] = (
413
+ update_neuron_history_for_learner(cp.copy(best_weights), viz_objects['neurons']['ax'],
414
+ viz_objects['neurons']['row'], viz_objects['neurons']['col'],
415
+ y_train[0], viz_objects['neurons']['artists'],
416
+ data=data, fig1=viz_objects['neurons']['fig'],
417
+ acc=best_acc, loss=train_loss)
418
+ )
419
+
420
+ if neural_web_history:
421
+ art5_1, art5_2, art5_3 = draw_neural_web(W=best_weights, ax=viz_objects['web']['ax'],
422
+ G=viz_objects['web']['G'], return_objs=True)
423
+ art5_list = [art5_1] + [art5_2] + list(art5_3.values())
424
+ viz_objects['web']['artists'].append(art5_list)
425
+
426
+ # Check target accuracy
427
+ if target_acc is not None and best_acc >= target_acc:
428
+ progress.close()
429
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
430
+ activation_potentiation=final_activations, dtype=dtype)
431
+
432
+ if loss == 'categorical_crossentropy':
433
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
434
+ y_pred_batch=train_model[get_preds_softmax()])
435
+ else:
436
+ train_loss = binary_crossentropy(y_true_batch=y_train,
437
+ y_pred_batch=train_model[get_preds_softmax()])
438
+
439
+ print('\nActivations: ', final_activations)
440
+ print(f'Train Accuracy:', train_model[get_acc()])
441
+ print(f'Train Loss: ', train_loss, '\n')
442
+
443
+ # Display final visualizations
444
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
445
+ train_loss, y_train, interval)
446
+ return best_weights, best_model[get_preds()], best_acc, final_activations
447
+
448
+ # Check target loss
449
+ if target_loss is not None and best_loss <= target_loss:
450
+ progress.close()
451
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
452
+ activation_potentiation=final_activations, dtype=dtype)
453
+
454
+ if loss == 'categorical_crossentropy':
455
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
456
+ y_pred_batch=train_model[get_preds_softmax()])
457
+ else:
458
+ train_loss = binary_crossentropy(y_true_batch=y_train,
459
+ y_pred_batch=train_model[get_preds_softmax()])
460
+
461
+ print('\nActivations: ', final_activations)
462
+ print(f'Train Accuracy:', train_model[get_acc()])
463
+ print(f'Train Loss: ', train_loss, '\n')
464
+
465
+ # Display final visualizations
466
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
467
+ train_loss, y_train, interval)
468
+ return best_weights, best_model[get_preds()], best_acc, final_activations
469
+
470
+ progress.update(1)
471
+
472
+ best_acc_per_gen_list.append(best_acc)
473
+ loss_list.append(best_loss)
474
+
475
+ weight_pop, act_pop = optimizer(cp.array(weight_pop, copy=False, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), bar_status=False)
476
+ target_pop = []
477
+
478
+ # Early stopping check
479
+ if early_stop == True and i > 0:
480
+ if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
481
+ progress.close()
482
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
483
+ activation_potentiation=final_activations, dtype=dtype, memory=memory)
484
+
485
+ if loss == 'categorical_crossentropy':
486
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
487
+ y_pred_batch=train_model[get_preds_softmax()])
488
+ else:
489
+ train_loss = binary_crossentropy(y_true_batch=y_train,
490
+ y_pred_batch=train_model[get_preds_softmax()])
491
+
492
+ print('\nActivations: ', final_activations)
493
+ print(f'Train Accuracy:', train_model[get_acc()])
494
+ print(f'Train Loss: ', train_loss, '\n')
495
+
496
+ # Display final visualizations
497
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
498
+ train_loss, y_train, interval)
499
+ return best_weights, best_model[get_preds()], best_acc, final_activations
500
+
501
+ # Final evaluation
502
+ progress.close()
503
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
504
+ activation_potentiation=final_activations, dtype=dtype, memory=memory)
505
+
506
+ if loss == 'categorical_crossentropy':
507
+ train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
508
+ else:
509
+ train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
510
+
511
+ print('\nActivations: ', final_activations)
512
+ print(f'Train Accuracy:', train_model[get_acc()])
513
+ print(f'Train Loss: ', train_loss, '\n')
514
+
515
+ # Display final visualizations
516
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
517
+ return best_weights, best_model[get_preds()], best_acc, final_activations
518
+
519
+
520
+
521
+ def feed_forward(
522
+ Input, # list[num]: Input data.
523
+ w, # num: Weight matrix of the neural network.
524
+ is_training, # bool: Flag indicating if the function is called during training (True or False).
525
+ activation_potentiation,
526
+ Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
527
+ LTD=0
528
+ ) -> tuple:
529
+ """
530
+ Applies feature extraction process to the input data using synaptic potentiation.
531
+
532
+ Args:
533
+ Input (num): Input data.
534
+ w (num): Weight matrix of the neural network.
535
+ is_training (bool): Flag indicating if the function is called during training (True or False).
536
+ Class (int): if is during training then which class(label) ? is isnt then put None.
537
+ # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
538
+
539
+ Returns:
540
+ tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
541
+ or
542
+ num: neural network output
543
+ """
544
+
545
+ Output = apply_activation(Input, activation_potentiation)
546
+
547
+ Input = Output
548
+
549
+ if is_training == True:
550
+
551
+ for _ in range(LTD):
552
+
553
+ depression_vector = cp.random.rand(*Input.shape)
554
+
555
+ Input -= depression_vector
556
+
557
+ w[Class, :] = Input
558
+ return w
559
+
560
+ else:
561
+
562
+ neural_layer = cp.dot(w, Input)
563
+
564
+ return neural_layer
565
+
566
+
567
+ def evaluate(
568
+ x_test,
569
+ y_test,
570
+ W,
571
+ activation_potentiation=['linear'],
572
+ loading_bar_status=True,
573
+ show_metrics=False,
574
+ dtype=cp.float32,
575
+ memory='gpu'
576
+ ) -> tuple:
577
+ """
578
+ Evaluates the neural network model using the given test data.
579
+
580
+ Args:
581
+ x_test (cp.ndarray): Test data.
582
+
583
+ y_test (cp.ndarray): Test labels (one-hot encoded).
584
+
585
+ W (list[cp.ndarray]): Neural net weight matrix.
586
+
587
+ activation_potentiation (list): Activation list. Default = ['linear'].
588
+
589
+ loading_bar_status (bool): Loading bar (optional). Default = True.
590
+
591
+ show_metrics (bool): Visualize metrics ? (optional). Default = False.
592
+
593
+ dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
594
+
595
+ memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
596
+
597
+ Returns:
598
+ tuple: Model (list).
599
+ """
600
+
601
+ if memory == 'gpu':
602
+ x_test = transfer_to_gpu(x_test, dtype=dtype)
603
+ y_test = transfer_to_gpu(y_test, dtype=y_test.dtype)
604
+
605
+ elif memory == 'cpu':
606
+ x_test = transfer_to_cpu(x_test, dtype=dtype)
607
+ y_test = transfer_to_cpu(y_test, dtype=y_test.dtype)
608
+
609
+ else:
610
+ raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
611
+
612
+ predict_probabilitys = cp.empty((len(x_test), W.shape[0]), dtype=dtype)
613
+ real_classes = cp.empty(len(x_test), dtype=y_test.dtype)
614
+ predict_classes = cp.empty(len(x_test), dtype=y_test.dtype)
615
+
616
+ true_predict = 0
617
+ acc_list = cp.empty(len(x_test), dtype=dtype)
618
+
619
+ if loading_bar_status:
620
+ loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
621
+
622
+ for inpIndex in range(len(x_test)):
623
+ Input = transfer_to_gpu(x_test[inpIndex], dtype=dtype).ravel()
624
+ neural_layer = Input
625
+
626
+ neural_layer = feed_forward(neural_layer, cp.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
627
+
628
+ predict_probabilitys[inpIndex] = Softmax(neural_layer)
629
+
630
+ RealOutput = decode_one_hot(transfer_to_gpu(y_test[inpIndex], dtype=y_test[inpIndex].dtype))
631
+ real_classes[inpIndex] = RealOutput
632
+ PredictedOutput = cp.argmax(neural_layer)
633
+ predict_classes[inpIndex] = PredictedOutput
634
+
635
+ if RealOutput == PredictedOutput:
636
+ true_predict += 1
637
+
638
+ acc = true_predict / (inpIndex + 1)
639
+ acc_list[inpIndex] = acc
640
+
641
+ if loading_bar_status:
642
+ loading_bar.update(1)
643
+ loading_bar.set_postfix({"Test Accuracy": acc})
644
+
645
+ if loading_bar_status:
646
+ loading_bar.close()
647
+
648
+ if show_metrics:
649
+ plot_evaluate(x_test, y_test, predict_classes, acc_list, W=cp.copy(W), activation_potentiation=activation_potentiation)
650
+
651
+ return W, predict_classes, acc_list[-1], None, None, predict_probabilitys