pyerualjetwork 2.7.8__py3-none-any.whl → 4.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyerualjetwork/plan.py ADDED
@@ -0,0 +1,664 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+
4
+ MAIN MODULE FOR PLAN
5
+
6
+ PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
7
+ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
8
+
9
+ @author: Hasan Can Beydili
10
+ @YouTube: https://www.youtube.com/@HasanCanBeydili
11
+ @Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
12
+ @Instagram: https://www.instagram.com/canbeydilj/
13
+ @contact: tchasancan@gmail.com
14
+ """
15
+
16
+ import numpy as np
17
+ from colorama import Fore
18
+ import math
19
+ import random
20
+
21
+ ### LIBRARY IMPORTS ###
22
+ from .ui import loading_bars, initialize_loading_bar
23
+ from .data_operations import normalization, decode_one_hot, batcher
24
+ from .loss_functions import binary_crossentropy, categorical_crossentropy
25
+ from .activation_functions import apply_activation, Softmax, all_activations
26
+ from .metrics import metrics
27
+ from .model_operations import get_acc, get_preds, get_preds_softmax
28
+ from .memory_operations import optimize_labels
29
+ from .visualizations import (
30
+ draw_neural_web,
31
+ update_neural_web_for_fit,
32
+ plot_evaluate,
33
+ update_neuron_history,
34
+ initialize_visualization_for_fit,
35
+ update_weight_visualization_for_fit,
36
+ update_decision_boundary_for_fit,
37
+ update_validation_history_for_fit,
38
+ display_visualization_for_fit,
39
+ display_visualizations_for_learner,
40
+ update_history_plots_for_learner,
41
+ initialize_visualization_for_learner,
42
+ update_neuron_history_for_learner,
43
+ show
44
+ )
45
+
46
+ ### GLOBAL VARIABLES ###
47
+ bar_format_normal = loading_bars()[0]
48
+ bar_format_learner = loading_bars()[1]
49
+
50
+ # BUILD -----
51
+
52
+ def fit(
53
+ x_train,
54
+ y_train,
55
+ val=False,
56
+ val_count=None,
57
+ activation_potentiation=['linear'],
58
+ x_val=None,
59
+ y_val=None,
60
+ show_training=None,
61
+ interval=100,
62
+ LTD=0,
63
+ decision_boundary_status=True,
64
+ train_bar=True,
65
+ auto_normalization=True,
66
+ neurons_history=False,
67
+ dtype=np.float32
68
+ ):
69
+ """
70
+ Creates a model to fitting data.
71
+
72
+ fit Args:
73
+
74
+ x_train (list[num]): List or numarray of input data.
75
+
76
+ y_train (list[num]): List or numarray of target labels. (one hot encoded)
77
+
78
+ val (None or True): validation in training process ? None or True default: None (optional)
79
+
80
+ val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
81
+
82
+ activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
83
+
84
+ x_val (list[num]): List of validation data. default: x_train (optional)
85
+
86
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
87
+
88
+ show_training (bool, str): True or None default: None (optional)
89
+
90
+ LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
91
+
92
+ interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
93
+
94
+ decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
95
+
96
+ train_bar (bool): Training loading bar? True or False. Default is True. (optional)
97
+
98
+ auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
99
+
100
+ neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
101
+
102
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
103
+
104
+ Returns:
105
+ numpyarray([num]): (Weight matrix).
106
+ """
107
+
108
+ # Pre-checks
109
+
110
+ x_train = x_train.astype(dtype, copy=False)
111
+
112
+ if train_bar and val:
113
+ train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
114
+ elif train_bar and val == False:
115
+ train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
116
+
117
+ if len(x_train) != len(y_train):
118
+ raise ValueError("x_train and y_train must have the same length.")
119
+
120
+ if val and (x_val is None and y_val is None):
121
+ x_val, y_val = x_train, y_train
122
+
123
+ elif val and (x_val is not None and y_val is not None):
124
+ x_val = x_val.astype(dtype, copy=False)
125
+ y_val = y_val.astype(dtype, copy=False)
126
+
127
+ val_list = [] if val else None
128
+ val_count = val_count or 10
129
+ # Defining weights
130
+ STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TIME POTENTIATION WEIGHT
131
+ LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TIME POTENTIATION WEIGHT
132
+ # Initialize visualization
133
+ vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
134
+
135
+ # Training process
136
+ for index, inp in enumerate(x_train):
137
+ inp = np.array(inp, copy=False).ravel()
138
+ y_decoded = decode_one_hot(y_train)
139
+ # Weight updates
140
+ STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded[index], activation_potentiation=activation_potentiation, LTD=LTD)
141
+ LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
142
+ if val and index != 0:
143
+ if index % math.ceil((val_count / len(x_train)) * 100) == 0:
144
+ val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW)[get_acc()]
145
+ val_list.append(val_acc)
146
+
147
+ # Visualization updates
148
+ if show_training:
149
+ update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
150
+ if decision_boundary_status:
151
+ update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
152
+ update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
153
+ update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
154
+ if neurons_history:
155
+ update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
156
+ if train_bar:
157
+ train_progress.update(1)
158
+
159
+ STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
160
+
161
+ # Finalize visualization
162
+ if show_training:
163
+ ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
164
+ ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
165
+ ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
166
+ ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
167
+ show()
168
+
169
+ if neurons_history:
170
+ ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
171
+ show()
172
+
173
+ return normalization(LTPW, dtype=dtype)
174
+
175
+
176
+ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
177
+ neural_web_history=False, show_current_activations=False, auto_normalization=True,
178
+ neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
179
+ interval=33.33, target_acc=None, target_loss=None, except_this=None,
180
+ only_this=None, start_this_act=None, start_this_W=None, target_fitness='max', dtype=np.float32):
181
+ """
182
+ Optimizes the activation functions for a neural network by leveraging train data to find
183
+ the most accurate combination of activation potentiation for the given dataset using genetic algorithm NEAT (Neuroevolution of Augmenting Topologies). But modifided for PLAN version. Created by me: PLANEAT.
184
+
185
+ Why genetic optimization and not backpropagation?
186
+ Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
187
+ Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
188
+
189
+ Args:
190
+
191
+ x_train (array-like): Training input data.
192
+
193
+ y_train (array-like): Labels for training data. one-hot encoded.
194
+
195
+ optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
196
+ ```python
197
+ genetic_optimizer = lambda *args, **kwargs: planeat.evolve(*args,
198
+ activation_add_prob=0.85,
199
+ mutations=False,
200
+ strategy='cross_over',
201
+ **kwargs)
202
+
203
+ model = plan.learner(x_train,
204
+ y_train,
205
+ optimizer=genetic_optimizer,
206
+ strategy='accuracy',
207
+ show_history=True,
208
+ target_acc=0.94,
209
+ interval=16.67)
210
+ ```
211
+
212
+ x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
213
+
214
+ y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
215
+
216
+ strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
217
+
218
+ gen (int, optional): The generation count for genetic optimization.
219
+
220
+ batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
221
+
222
+ auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
223
+
224
+ early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
225
+
226
+ show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
227
+
228
+ show_history (bool, optional): If True, displays the training history after optimization. Default is False.
229
+
230
+ loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
231
+
232
+ interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
233
+
234
+ target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
235
+
236
+ target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
237
+
238
+ except_this (list, optional): A list of activations to exclude from optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
239
+
240
+ only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
241
+
242
+ start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
243
+
244
+ start_this_W (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
245
+
246
+ neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
247
+
248
+ neural_web_history (bool, optional): Draws history of neural web. Default is False.
249
+
250
+ target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
251
+
252
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
253
+
254
+ Returns:
255
+ tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
256
+
257
+ """
258
+
259
+ print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
260
+
261
+ activation_potentiation = all_activations()
262
+
263
+ # Pre-checks
264
+
265
+ x_train = x_train.astype(dtype, copy=False)
266
+ y_train = optimize_labels(y_train, cuda=False)
267
+
268
+ if x_test is None and y_test is None:
269
+ x_test = x_train
270
+ y_test = y_train
271
+ data = 'Train'
272
+ else:
273
+ x_test = x_test.astype(dtype, copy=False)
274
+ y_test = optimize_labels(y_test, cuda=False)
275
+ data = 'Test'
276
+
277
+ # Filter activation functions
278
+ if only_this is not None:
279
+ activation_potentiation = only_this
280
+ if except_this is not None:
281
+ activation_potentiation = [item for item in activation_potentiation if item not in except_this]
282
+ if gen is None:
283
+ gen = len(activation_potentiation)
284
+
285
+ if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
286
+
287
+ if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
288
+ if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
289
+
290
+ # Initialize visualization components
291
+ viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
292
+
293
+ # Initialize progress bar
294
+ if batch_size == 1:
295
+ ncols = 76
296
+ else:
297
+ ncols = 89
298
+
299
+ # Initialize variables
300
+ act_pop = []
301
+ weight_pop = []
302
+
303
+ if start_this_act is None and start_this_W is None:
304
+ best_acc = 0
305
+ else:
306
+ act_pop.append(start_this_act)
307
+ weight_pop.append(start_this_W)
308
+
309
+ x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
310
+
311
+ model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype)
312
+
313
+ if loss == 'categorical_crossentropy':
314
+ test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
315
+ else:
316
+ test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
317
+
318
+ best_acc = model[get_acc()]
319
+ best_loss = test_loss
320
+
321
+ best_acc_per_gen_list = []
322
+ postfix_dict = {}
323
+ loss_list = []
324
+ target_pop = []
325
+
326
+ progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
327
+
328
+ for i in range(gen):
329
+ postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
330
+ progress.set_postfix(postfix_dict)
331
+
332
+ progress.n = 0
333
+ progress.last_print_n = 0
334
+ progress.update(0)
335
+
336
+ for j in range(len(activation_potentiation)):
337
+
338
+ x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
339
+
340
+ if i == 0:
341
+ act_pop.append(activation_potentiation[j])
342
+ W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
343
+ weight_pop.append(W)
344
+
345
+ model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
346
+ acc = model[get_acc()]
347
+
348
+ if strategy == 'accuracy': target_pop.append(acc)
349
+
350
+ elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
351
+ precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
352
+
353
+ if strategy == 'precision':
354
+ target_pop.append(precision_score)
355
+
356
+ if i == 0 and j == 0:
357
+ best_precision = precision_score
358
+
359
+ if strategy == 'recall':
360
+ target_pop.append(recall_score)
361
+
362
+ if i == 0 and j == 0:
363
+ best_recall = recall_score
364
+
365
+ if strategy == 'f1':
366
+ target_pop.append(f1_score)
367
+
368
+ if i == 0 and j == 0:
369
+ best_f1 = f1_score
370
+
371
+ if ((strategy == 'accuracy' and acc >= best_acc) or
372
+ (strategy == 'f1' and f1_score >= best_f1) or
373
+ (strategy == 'precision' and precision_score >= best_precision) or
374
+ (strategy == 'recall' and recall_score >= best_recall)):
375
+
376
+ best_acc = acc
377
+ best_weights = weight_pop[j]
378
+ final_activations = act_pop[j]
379
+ best_model = model
380
+
381
+ final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
382
+
383
+ if batch_size == 1:
384
+ postfix_dict[f"{data} Accuracy"] = best_acc
385
+ else:
386
+ postfix_dict[f"{data} Batch Accuracy"] = acc
387
+ progress.set_postfix(postfix_dict)
388
+
389
+ if show_current_activations:
390
+ print(f", Current Activations={final_activations}", end='')
391
+
392
+ if loss == 'categorical_crossentropy':
393
+ test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
394
+ else:
395
+ test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
396
+
397
+ if batch_size == 1:
398
+ postfix_dict[f"{data} Loss"] = test_loss
399
+ best_loss = test_loss
400
+ else:
401
+ postfix_dict[f"{data} Batch Loss"] = test_loss
402
+ progress.set_postfix(postfix_dict)
403
+ best_loss = test_loss
404
+
405
+ # Update visualizations during training
406
+ if show_history:
407
+ gen_list = range(1, len(best_acc_per_gen_list) + 2)
408
+ update_history_plots_for_learner(viz_objects, gen_list, loss_list + [test_loss],
409
+ best_acc_per_gen_list + [best_acc], x_train, final_activations)
410
+
411
+ if neurons_history:
412
+ viz_objects['neurons']['artists'] = (
413
+ update_neuron_history_for_learner(np.copy(best_weights), viz_objects['neurons']['ax'],
414
+ viz_objects['neurons']['row'], viz_objects['neurons']['col'],
415
+ y_train[0], viz_objects['neurons']['artists'],
416
+ data=data, fig1=viz_objects['neurons']['fig'],
417
+ acc=best_acc, loss=test_loss)
418
+ )
419
+
420
+ if neural_web_history:
421
+ art5_1, art5_2, art5_3 = draw_neural_web(W=best_weights, ax=viz_objects['web']['ax'],
422
+ G=viz_objects['web']['G'], return_objs=True)
423
+ art5_list = [art5_1] + [art5_2] + list(art5_3.values())
424
+ viz_objects['web']['artists'].append(art5_list)
425
+
426
+ # Check target accuracy
427
+ if target_acc is not None and best_acc >= target_acc:
428
+ progress.close()
429
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
430
+ activation_potentiation=final_activations, dtype=dtype)
431
+
432
+ if loss == 'categorical_crossentropy':
433
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
434
+ y_pred_batch=train_model[get_preds_softmax()])
435
+ else:
436
+ train_loss = binary_crossentropy(y_true_batch=y_train,
437
+ y_pred_batch=train_model[get_preds_softmax()])
438
+
439
+ print('\nActivations: ', final_activations)
440
+ print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
441
+ print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
442
+ if data == 'Test':
443
+ print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
444
+ print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
445
+
446
+ # Display final visualizations
447
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
448
+ test_loss, y_train, interval)
449
+ return best_weights, best_model[get_preds()], best_acc, final_activations
450
+
451
+ # Check target loss
452
+ if target_loss is not None and best_loss <= target_loss:
453
+ progress.close()
454
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
455
+ activation_potentiation=final_activations, dtype=dtype)
456
+
457
+ if loss == 'categorical_crossentropy':
458
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
459
+ y_pred_batch=train_model[get_preds_softmax()])
460
+ else:
461
+ train_loss = binary_crossentropy(y_true_batch=y_train,
462
+ y_pred_batch=train_model[get_preds_softmax()])
463
+
464
+ print('\nActivations: ', final_activations)
465
+ print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
466
+ print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
467
+ if data == 'Test':
468
+ print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
469
+ print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
470
+
471
+ # Display final visualizations
472
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
473
+ test_loss, y_train, interval)
474
+ return best_weights, best_model[get_preds()], best_acc, final_activations
475
+
476
+ progress.update(1)
477
+
478
+ best_acc_per_gen_list.append(best_acc)
479
+ loss_list.append(best_loss)
480
+
481
+ weight_pop, act_pop = optimizer(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
482
+ target_pop = []
483
+
484
+ # Early stopping check
485
+ if early_stop == True and i > 0:
486
+ if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
487
+ progress.close()
488
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
489
+ activation_potentiation=final_activations, dtype=dtype)
490
+
491
+ if loss == 'categorical_crossentropy':
492
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
493
+ y_pred_batch=train_model[get_preds_softmax()])
494
+ else:
495
+ train_loss = binary_crossentropy(y_true_batch=y_train,
496
+ y_pred_batch=train_model[get_preds_softmax()])
497
+
498
+ print('\nActivations: ', final_activations)
499
+ print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
500
+ print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
501
+ if data == 'Test':
502
+ print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
503
+ print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
504
+
505
+ # Display final visualizations
506
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
507
+ test_loss, y_train, interval)
508
+ return best_weights, best_model[get_preds()], best_acc, final_activations
509
+
510
+ # Final evaluation
511
+ progress.close()
512
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
513
+ activation_potentiation=final_activations, dtype=dtype)
514
+
515
+ if loss == 'categorical_crossentropy':
516
+ train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
517
+ else:
518
+ train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
519
+
520
+ print('\nActivations: ', act_pop[-1])
521
+ print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
522
+ print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
523
+ if data == 'Test':
524
+ print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
525
+ print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
526
+
527
+ # Display final visualizations
528
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval)
529
+ return best_weights, best_model[get_preds()], best_acc, final_activations
530
+
531
+
532
+
533
+ def feed_forward(
534
+ Input, # list[num]: Input data.
535
+ w, # num: Weight matrix of the neural network.
536
+ is_training, # bool: Flag indicating if the function is called during training (True or False).
537
+ activation_potentiation,
538
+ Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
539
+ LTD=0
540
+ ) -> tuple:
541
+ """
542
+ Applies feature extraction process to the input data using synaptic potentiation.
543
+
544
+ Args:
545
+ Input (num): Input data.
546
+ w (num): Weight matrix of the neural network.
547
+ is_training (bool): Flag indicating if the function is called during training (True or False).
548
+ Class (int): if is during training then which class(label) ? is isnt then put None.
549
+ # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
550
+
551
+ Returns:
552
+ tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
553
+ or
554
+ num: neural network output
555
+ """
556
+
557
+ Output = apply_activation(Input, activation_potentiation)
558
+
559
+ Input = Output
560
+
561
+ if is_training == True:
562
+
563
+ for _ in range(LTD):
564
+
565
+ depression_vector = np.random.rand(*Input.shape)
566
+
567
+ Input -= depression_vector
568
+
569
+ w[Class, :] = Input
570
+ return w
571
+
572
+ else:
573
+
574
+ neural_layer = np.dot(w, Input)
575
+
576
+ return neural_layer
577
+
578
+
579
+ def evaluate(
580
+ x_test, # NumPy array: Test input data.
581
+ y_test, # NumPy array: Test labels.
582
+ W, # List of NumPy arrays: Neural network weight matrices.
583
+ activation_potentiation=['linear'], # List of activation functions.
584
+ loading_bar_status=True, # Optionally show loading bar.
585
+ show_metrics=None, # Optionally show metrics.
586
+ dtype=np.float32
587
+ ) -> tuple:
588
+ """
589
+ Evaluates the neural network model using the given test data.
590
+
591
+ Args:
592
+ x_test (np.ndarray): Test input data.
593
+
594
+ y_test (np.ndarray): Test labels. one-hot encoded.
595
+
596
+ W (list[np.ndarray]): List of neural network weight matrices.
597
+
598
+ activation_potentiation (list): List of activation functions.
599
+
600
+ loading_bar_status (bool): Option to show a loading bar (optional).
601
+
602
+ show_metrics (bool): Option to show metrics (optional).
603
+
604
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
605
+
606
+ Returns:
607
+ tuple: Predicted labels, model accuracy, and other evaluation metrics.
608
+ """
609
+ # Pre-checks
610
+
611
+ x_test = x_test.astype(dtype, copy=False)
612
+
613
+ if len(y_test[0]) < 256:
614
+ if y_test.dtype != np.uint8:
615
+ y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
616
+ elif len(y_test[0]) <= 32767:
617
+ if y_test.dtype != np.uint16:
618
+ y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
619
+ else:
620
+ if y_test.dtype != np.uint32:
621
+ y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
622
+
623
+ predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=np.float32)
624
+ real_classes = np.empty(len(x_test), dtype=np.int32)
625
+ predict_classes = np.empty(len(x_test), dtype=np.int32)
626
+
627
+ true_predict = 0
628
+ acc_list = np.empty(len(x_test), dtype=np.float32)
629
+
630
+ if loading_bar_status:
631
+ loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
632
+
633
+ for inpIndex in range(len(x_test)):
634
+ Input = x_test[inpIndex].ravel()
635
+
636
+ neural_layer = Input
637
+
638
+ neural_layer = feed_forward(neural_layer, np.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
639
+
640
+ predict_probabilitys[inpIndex] = Softmax(neural_layer)
641
+
642
+ RealOutput = np.argmax(y_test[inpIndex])
643
+ real_classes[inpIndex] = RealOutput
644
+ PredictedOutput = np.argmax(neural_layer)
645
+ predict_classes[inpIndex] = PredictedOutput
646
+
647
+ if RealOutput == PredictedOutput:
648
+ true_predict += 1
649
+
650
+ acc = true_predict / (inpIndex + 1)
651
+ acc_list[inpIndex] = acc
652
+
653
+ if loading_bar_status:
654
+ loading_bar.update(1)
655
+ loading_bar.set_postfix({"Test Accuracy": acc})
656
+
657
+ if loading_bar_status:
658
+ loading_bar.close()
659
+
660
+ if show_metrics:
661
+ # Plot the evaluation metrics
662
+ plot_evaluate(x_test, y_test, predict_classes, acc_list, W=np.copy(W), activation_potentiation=activation_potentiation)
663
+
664
+ return W, predict_classes, acc_list[-1], None, None, predict_probabilitys