pyerualjetwork 4.3.1__py3-none-any.whl → 4.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {pyerualjetwork-4.3.1.dist-info → pyerualjetwork-4.3.2.0.dist-info}/METADATA +3 -2
  2. pyerualjetwork-4.3.2.0.dist-info/RECORD +24 -0
  3. pyerualjetwork-4.3.2.0.dist-info/top_level.txt +1 -0
  4. {pyerualjetwork → pyerualjetwork-afterburner}/__init__.py +1 -1
  5. pyerualjetwork-afterburner/activation_functions.py +291 -0
  6. pyerualjetwork-afterburner/activation_functions_cuda.py +290 -0
  7. {pyerualjetwork → pyerualjetwork-afterburner}/model_operations.py +14 -14
  8. {pyerualjetwork → pyerualjetwork-afterburner}/model_operations_cuda.py +16 -17
  9. {pyerualjetwork → pyerualjetwork-afterburner}/plan.py +44 -246
  10. {pyerualjetwork → pyerualjetwork-afterburner}/plan_cuda.py +37 -256
  11. {pyerualjetwork → pyerualjetwork-afterburner}/planeat.py +11 -43
  12. {pyerualjetwork → pyerualjetwork-afterburner}/planeat_cuda.py +8 -44
  13. pyerualjetwork/activation_functions.py +0 -343
  14. pyerualjetwork/activation_functions_cuda.py +0 -340
  15. pyerualjetwork-4.3.1.dist-info/RECORD +0 -24
  16. pyerualjetwork-4.3.1.dist-info/top_level.txt +0 -1
  17. {pyerualjetwork-4.3.1.dist-info → pyerualjetwork-4.3.2.0.dist-info}/WHEEL +0 -0
  18. {pyerualjetwork → pyerualjetwork-afterburner}/data_operations.py +0 -0
  19. {pyerualjetwork → pyerualjetwork-afterburner}/data_operations_cuda.py +0 -0
  20. {pyerualjetwork → pyerualjetwork-afterburner}/help.py +0 -0
  21. {pyerualjetwork → pyerualjetwork-afterburner}/loss_functions.py +0 -0
  22. {pyerualjetwork → pyerualjetwork-afterburner}/loss_functions_cuda.py +0 -0
  23. {pyerualjetwork → pyerualjetwork-afterburner}/memory_operations.py +0 -0
  24. {pyerualjetwork → pyerualjetwork-afterburner}/metrics.py +0 -0
  25. {pyerualjetwork → pyerualjetwork-afterburner}/metrics_cuda.py +0 -0
  26. {pyerualjetwork → pyerualjetwork-afterburner}/ui.py +0 -0
  27. {pyerualjetwork → pyerualjetwork-afterburner}/visualizations.py +0 -0
  28. {pyerualjetwork → pyerualjetwork-afterburner}/visualizations_cuda.py +0 -0
@@ -16,31 +16,21 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
16
16
  """
17
17
 
18
18
  import cupy as cp
19
- import math
20
19
 
21
20
  ### LIBRARY IMPORTS ###
22
21
  from .ui import loading_bars, initialize_loading_bar
23
- from .data_operations_cuda import normalization, decode_one_hot, batcher
22
+ from .data_operations_cuda import normalization
24
23
  from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
25
- from .activation_functions_cuda import apply_activation, Softmax, all_activations
24
+ from .activation_functions_cuda import apply_activation, all_activations
26
25
  from .metrics_cuda import metrics
27
26
  from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
28
27
  from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
29
28
  from .visualizations_cuda import (
30
29
  draw_neural_web,
31
- update_neural_web_for_fit,
32
- plot_evaluate,
33
- update_neuron_history,
34
- initialize_visualization_for_fit,
35
- update_weight_visualization_for_fit,
36
- update_decision_boundary_for_fit,
37
- update_validation_history_for_fit,
38
- display_visualization_for_fit,
39
30
  display_visualizations_for_learner,
40
31
  update_history_plots_for_learner,
41
32
  initialize_visualization_for_learner,
42
- update_neuron_history_for_learner,
43
- show
33
+ update_neuron_history_for_learner
44
34
  )
45
35
 
46
36
  ### GLOBAL VARIABLES ###
@@ -52,144 +42,42 @@ bar_format_learner = loading_bars()[1]
52
42
  def fit(
53
43
  x_train,
54
44
  y_train,
55
- val=False,
56
- val_count=None,
57
45
  activation_potentiation=['linear'],
58
- x_val=None,
59
- y_val=None,
60
- show_training=None,
61
- interval=100,
62
- LTD=0,
63
- decision_boundary_status=True,
64
- train_bar=True,
65
- auto_normalization=True,
66
- neurons_history=False,
67
- dtype=cp.float32,
68
- memory='gpu'
46
+ W=None,
47
+ dtype=cp.float32
69
48
  ):
70
49
  """
71
- Creates a model to fitting data.
50
+ Creates a model to fitting data.,
72
51
 
73
52
  fit Args:
74
53
 
75
- x_train (list[num]): List or numarray of input data.
54
+ x_train (aray-like[cupy]): List or cupy array of input data.
76
55
 
77
- y_train (list[num]): List or numarray of target labels. (one hot encoded)
78
-
79
- val (None or True): validation in training process ? None or True default: None (optional)
80
-
81
- val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
56
+ y_train (aray-like[cupy]): List or cupy array of target labels. (one hot encoded)
82
57
 
83
58
  activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
84
59
 
85
- x_val (list[num]): List of validation data. default: x_train (optional)
86
-
87
- y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
88
-
89
- show_training (bool, str): True or None default: None (optional)
90
-
91
- LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
92
-
93
- interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
94
-
95
- decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
96
-
97
- train_bar (bool): Training loading bar? True or False. Default is True. (optional)
98
-
99
- auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
100
-
101
- neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
102
-
103
- dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
104
-
105
- memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
60
+ W (cupy.ndarray): If you want to re-continue or update model
61
+
62
+ dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
106
63
 
107
64
  Returns:
108
- numpyarray([num]): (Weight matrix).
65
+ cupyarray: (Weight matrix).
109
66
  """
110
- # Pre-checks
67
+ # Pre-check
111
68
 
112
- if train_bar and val:
113
- train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
114
- elif train_bar and val == False:
115
- train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
69
+ if len(x_train) != len(y_train): raise ValueError("x_train and y_train must have the same length.")
116
70
 
117
- if len(x_train) != len(y_train):
118
- raise ValueError("x_train and y_train must have the same length.")
119
-
120
- if val and (x_val is None or y_val is None):
121
- x_val, y_val = x_train, y_train
122
-
123
- if memory == 'gpu':
124
- x_train = transfer_to_gpu(x_train, dtype=dtype)
125
- y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
71
+ LTPW = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
126
72
 
127
- if val:
128
- x_val = transfer_to_gpu(x_val, dtype=dtype)
129
- y_val = transfer_to_gpu(y_val, dtype=y_train.dtype)
130
-
131
- elif memory == 'cpu':
132
- x_train = transfer_to_cpu(x_train, dtype=dtype)
133
- y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
134
-
135
- if val:
136
- x_val = transfer_to_cpu(x_val, dtype=dtype)
137
- y_val = transfer_to_cpu(y_val, dtype=y_train.dtype)
138
-
139
- else:
140
- raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
141
-
142
- val_list = [] if val else None
143
- val_count = val_count or 10
144
- # Defining weights
145
- STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TERM POTENTIATION WEIGHT
146
- LTPW = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TERM POTENTIATION WEIGHT
147
- # Initialize visualization
148
- vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
149
-
150
- # Training process
151
- for index, inp in enumerate(x_train):
152
- inp = transfer_to_gpu(inp, dtype=dtype).ravel()
153
- y_decoded = decode_one_hot(cp.array(y_train[index], copy=False, dtype=y_train.dtype))
154
- # Weight updates
155
- STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
156
- LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
157
-
158
- if val and index != 0:
159
- if index % math.ceil((val_count / len(x_train)) * 100) == 0:
160
- val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW, memory=memory)[get_acc()]
161
- val_list.append(val_acc)
162
-
163
- # Visualization updates
164
- if show_training:
165
- update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
166
- if decision_boundary_status:
167
- update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
168
- update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
169
- update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
170
- if neurons_history:
171
- update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
172
- if train_bar:
173
- train_progress.update(1)
174
-
175
- STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
176
-
177
- if show_training:
178
- ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
179
- ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
180
- ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
181
- ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
182
- show()
183
-
184
- if neurons_history:
185
- ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
186
- show()
73
+ x_train = apply_activation(x_train, activation_potentiation)
74
+ LTPW += cp.array(y_train, dtype=optimize_labels(y_train, cuda=True).dtype).T @ x_train
187
75
 
188
76
  return normalization(LTPW, dtype=dtype)
189
77
 
190
78
 
191
79
  def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
192
- neural_web_history=False, show_current_activations=False, auto_normalization=True,
80
+ neural_web_history=False, show_current_activations=False,
193
81
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
194
82
  interval=33.33, target_acc=None, target_loss=None,
195
83
  start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
@@ -234,8 +122,6 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
234
122
 
235
123
  early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
236
124
 
237
- auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
238
-
239
125
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
240
126
 
241
127
  show_history (bool, optional): If True, displays the training history after optimization. Default is False.
@@ -339,10 +225,10 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
339
225
 
340
226
  if fit_start is True and i == 0:
341
227
  act_pop.append(activation_potentiation[j])
342
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
228
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], dtype=dtype)
343
229
  weight_pop.append(W)
344
230
 
345
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
231
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j])
346
232
  acc = model[get_acc()]
347
233
 
348
234
  if strategy == 'accuracy': target_pop.append(acc)
@@ -376,6 +262,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
376
262
  best_acc = acc
377
263
  best_weights = cp.copy(weight_pop[j])
378
264
  final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
265
+
379
266
  best_model = model
380
267
 
381
268
  final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
@@ -427,7 +314,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
427
314
  if target_acc is not None and best_acc >= target_acc:
428
315
  progress.close()
429
316
  train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
430
- activation_potentiation=final_activations, dtype=dtype)
317
+ activation_potentiation=final_activations)
431
318
 
432
319
  if loss == 'categorical_crossentropy':
433
320
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -448,8 +335,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
448
335
  # Check target loss
449
336
  if target_loss is not None and best_loss <= target_loss:
450
337
  progress.close()
451
- train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
452
- activation_potentiation=final_activations, dtype=dtype)
338
+ train_model = evaluate(x_train, y_train, W=best_weights,
339
+ activation_potentiation=final_activations)
453
340
 
454
341
  if loss == 'categorical_crossentropy':
455
342
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -459,7 +346,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
459
346
  y_pred_batch=train_model[get_preds_softmax()])
460
347
 
461
348
  print('\nActivations: ', final_activations)
462
- print(f'Train Accuracy:', train_model[get_acc()])
349
+ print(f'Train Accuracy: ', train_model[get_acc()])
463
350
  print(f'Train Loss: ', train_loss, '\n')
464
351
 
465
352
  # Display final visualizations
@@ -479,8 +366,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
479
366
  if early_stop == True and i > 0:
480
367
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
481
368
  progress.close()
482
- train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
483
- activation_potentiation=final_activations, dtype=dtype, memory=memory)
369
+ train_model = evaluate(x_train, y_train, W=best_weights,
370
+ activation_potentiation=final_activations)
484
371
 
485
372
  if loss == 'categorical_crossentropy':
486
373
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -500,8 +387,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
500
387
 
501
388
  # Final evaluation
502
389
  progress.close()
503
- train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
504
- activation_potentiation=final_activations, dtype=dtype, memory=memory)
390
+ train_model = evaluate(x_train, y_train, W=best_weights,
391
+ activation_potentiation=final_activations)
505
392
 
506
393
  if loss == 'categorical_crossentropy':
507
394
  train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
@@ -509,70 +396,18 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
509
396
  train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
510
397
 
511
398
  print('\nActivations: ', final_activations)
512
- print(f'Train Accuracy:', train_model[get_acc()])
513
- print(f'Train Loss: ', train_loss, '\n')
399
+ print(f'Train Accuracy: ', train_model[get_acc()])
400
+ print(f'Train Loss : ', train_loss, '\n')
514
401
 
515
402
  # Display final visualizations
516
403
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
517
404
  return best_weights, best_model[get_preds()], best_acc, final_activations
518
405
 
519
-
520
-
521
- def feed_forward(
522
- Input, # list[num]: Input data.
523
- w, # num: Weight matrix of the neural network.
524
- is_training, # bool: Flag indicating if the function is called during training (True or False).
525
- activation_potentiation,
526
- Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
527
- LTD=0
528
- ) -> tuple:
529
- """
530
- Applies feature extraction process to the input data using synaptic potentiation.
531
-
532
- Args:
533
- Input (num): Input data.
534
- w (num): Weight matrix of the neural network.
535
- is_training (bool): Flag indicating if the function is called during training (True or False).
536
- Class (int): if is during training then which class(label) ? is isnt then put None.
537
- # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
538
-
539
- Returns:
540
- tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
541
- or
542
- num: neural network output
543
- """
544
-
545
- Output = apply_activation(Input, activation_potentiation)
546
-
547
- Input = Output
548
-
549
- if is_training == True:
550
-
551
- for _ in range(LTD):
552
-
553
- depression_vector = cp.random.rand(*Input.shape)
554
-
555
- Input -= depression_vector
556
-
557
- w[Class, :] = Input
558
- return w
559
-
560
- else:
561
-
562
- neural_layer = cp.dot(w, Input)
563
-
564
- return neural_layer
565
-
566
-
567
406
  def evaluate(
568
407
  x_test,
569
408
  y_test,
570
409
  W,
571
- activation_potentiation=['linear'],
572
- loading_bar_status=True,
573
- show_metrics=False,
574
- dtype=cp.float32,
575
- memory='gpu'
410
+ activation_potentiation=['linear']
576
411
  ) -> tuple:
577
412
  """
578
413
  Evaluates the neural network model using the given test data.
@@ -582,70 +417,16 @@ def evaluate(
582
417
 
583
418
  y_test (cp.ndarray): Test labels (one-hot encoded).
584
419
 
585
- W (list[cp.ndarray]): Neural net weight matrix.
420
+ W (cp.ndarray): Neural net weight matrix.
586
421
 
587
422
  activation_potentiation (list): Activation list. Default = ['linear'].
588
423
 
589
- loading_bar_status (bool): Loading bar (optional). Default = True.
590
-
591
- show_metrics (bool): Visualize metrics ? (optional). Default = False.
592
-
593
- dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
594
-
595
- memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
596
-
597
424
  Returns:
598
425
  tuple: Model (list).
599
426
  """
600
427
 
601
- if memory == 'gpu':
602
- x_test = transfer_to_gpu(x_test, dtype=dtype)
603
- y_test = transfer_to_gpu(y_test, dtype=y_test.dtype)
604
-
605
- elif memory == 'cpu':
606
- x_test = transfer_to_cpu(x_test, dtype=dtype)
607
- y_test = transfer_to_cpu(y_test, dtype=y_test.dtype)
608
-
609
- else:
610
- raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
611
-
612
- predict_probabilitys = cp.empty((len(x_test), W.shape[0]), dtype=dtype)
613
- real_classes = cp.empty(len(x_test), dtype=y_test.dtype)
614
- predict_classes = cp.empty(len(x_test), dtype=y_test.dtype)
615
-
616
- true_predict = 0
617
- acc_list = cp.empty(len(x_test), dtype=dtype)
618
-
619
- if loading_bar_status:
620
- loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
621
-
622
- for inpIndex in range(len(x_test)):
623
- Input = transfer_to_gpu(x_test[inpIndex], dtype=dtype).ravel()
624
- neural_layer = Input
625
-
626
- neural_layer = feed_forward(neural_layer, cp.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
627
-
628
- predict_probabilitys[inpIndex] = Softmax(neural_layer)
629
-
630
- RealOutput = decode_one_hot(transfer_to_gpu(y_test[inpIndex], dtype=y_test[inpIndex].dtype))
631
- real_classes[inpIndex] = RealOutput
632
- PredictedOutput = cp.argmax(neural_layer)
633
- predict_classes[inpIndex] = PredictedOutput
634
-
635
- if RealOutput == PredictedOutput:
636
- true_predict += 1
637
-
638
- acc = true_predict / (inpIndex + 1)
639
- acc_list[inpIndex] = acc
640
-
641
- if loading_bar_status:
642
- loading_bar.update(1)
643
- loading_bar.set_postfix({"Test Accuracy": acc})
644
-
645
- if loading_bar_status:
646
- loading_bar.close()
647
-
648
- if show_metrics:
649
- plot_evaluate(x_test, y_test, predict_classes, acc_list, W=cp.copy(W), activation_potentiation=activation_potentiation)
428
+ x_test = apply_activation(x_test, activation_potentiation)
429
+ result = x_test @ W.T
430
+ softmax_preds = cp.exp(result) / cp.sum(cp.exp(result), axis=1, keepdims=True); accuracy = (cp.argmax(result, axis=1) == cp.argmax(y_test, axis=1)).mean()
650
431
 
651
- return W, predict_classes, acc_list[-1], None, None, predict_probabilitys
432
+ return W, None, accuracy, None, None, softmax_preds
@@ -17,7 +17,6 @@ import random
17
17
  import math
18
18
 
19
19
  ### LIBRARY IMPORTS ###
20
- from .plan import feed_forward
21
20
  from .data_operations import normalization
22
21
  from .ui import loading_bars, initialize_loading_bar
23
22
  from .activation_functions import apply_activation, all_activations
@@ -297,6 +296,7 @@ def evolver(weights,
297
296
  mutated_W = np.copy(bad_weights)
298
297
  mutated_act = bad_activations.copy()
299
298
 
299
+
300
300
  for i in range(len(bad_weights)):
301
301
 
302
302
  if policy == 'aggressive':
@@ -399,7 +399,7 @@ def evolver(weights,
399
399
  return weights, activation_potentiations
400
400
 
401
401
 
402
- def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dtype=np.float32):
402
+ def evaluate(x_population, weights, activation_potentiations):
403
403
  """
404
404
  Evaluates the performance of a population of genomes, applying different activation functions
405
405
  and weights depending on whether reinforcement learning mode is enabled or not.
@@ -412,62 +412,30 @@ def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dty
412
412
  activation_potentiations (list or str): A list where each entry represents an activation function
413
413
  or a potentiation strategy applied to each genome. If only one
414
414
  activation function is used, this can be a single string.
415
- rl_mode (bool, optional): If True, reinforcement learning mode is activated, this accepts x_population is a single genome. (Also weights and activation_potentations a single genomes part.)
416
- Default is False.
417
-
418
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
419
-
420
415
  Returns:
421
416
  list: A list of outputs corresponding to each genome in the population after applying the respective
422
417
  activation function and weights.
423
418
 
424
- Notes:
425
- - If `rl_mode` is True:
426
- - Accepts x_population is a single genom
427
- - The inputs are flattened, and the activation function is applied across the single genom.
428
-
429
- - If `rl_mode` is False:
430
- - Accepts x_population is a list of genomes
431
- - Each genome is processed individually, and the results are stored in the `outputs` list.
432
-
433
- - `feed_forward()` function is the core function that processes the input with the given weights and activation function.
434
-
435
419
  Example:
436
420
  ```python
437
- outputs = evaluate(x_population, weights, activation_potentiations, rl_mode=False)
421
+ outputs = evaluate(x_population, weights, activation_potentiations)
438
422
  ```
439
423
 
440
424
  - The function returns a list of outputs after processing the population, where each element corresponds to
441
425
  the output for each genome in `x_population`.
442
- """
443
-
444
- ### IF RL_MODE IS TRUE, A SINGLE GENOME IS ASSUMED AS INPUT, A FEEDFORWARD PREDICTION IS MADE, AND THE OUTPUT(NPARRAY) IS RETURNED:
445
-
446
- ### IF RL_MODE IS FALSE, PREDICTIONS ARE MADE FOR ALL GENOMES IN THE GROUP USING THEIR CORRESPONDING INDEXED INPUTS AND DATA.
426
+ """
447
427
  ### THE OUTPUTS ARE RETURNED AS A PYTHON LIST, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
448
428
 
449
- if rl_mode == True:
450
- Input = np.array(x_population, copy=False, dtype=dtype)
451
- Input = Input.ravel()
452
-
453
- if isinstance(activation_potentiations, str):
454
- activation_potentiations = [activation_potentiations]
455
-
456
- outputs = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations, w=weights)
457
-
458
- else:
459
- outputs = [0] * len(x_population)
460
- for i, genome in enumerate(x_population):
461
-
462
- Input = np.array(genome, copy=False)
463
- Input = Input.ravel()
464
429
 
465
- if isinstance(activation_potentiations[i], str):
466
- activation_potentiations[i] = [activation_potentiations[i]]
430
+ if isinstance(activation_potentiations, str):
431
+ activation_potentiations = [activation_potentiations]
432
+ else:
433
+ activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
467
434
 
468
- outputs[i] = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations[i], w=weights[i])
435
+ x_population = apply_activation(x_population, activation_potentiations)
436
+ result = x_population @ weights.T
469
437
 
470
- return outputs
438
+ return result
471
439
 
472
440
 
473
441
  def cross_over(first_parent_W,
@@ -19,7 +19,6 @@ import math
19
19
 
20
20
 
21
21
  ### LIBRARY IMPORTS ###
22
- from .plan_cuda import feed_forward
23
22
  from .data_operations_cuda import normalization
24
23
  from .ui import loading_bars, initialize_loading_bar
25
24
  from .activation_functions_cuda import apply_activation, all_activations
@@ -399,7 +398,7 @@ def evolver(weights,
399
398
  return weights, activation_potentiations
400
399
 
401
400
 
402
- def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dtype=cp.float32):
401
+ def evaluate(x_population, weights, activation_potentiations):
403
402
  """
404
403
  Evaluates the performance of a population of genomes, applying different activation functions
405
404
  and weights depending on whether reinforcement learning mode is enabled or not.
@@ -414,64 +413,29 @@ def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dty
414
413
  activation_potentiations (list or str): A list where each entry represents an activation function
415
414
  or a potentiation strategy applied to each genome. If only one
416
415
  activation function is used, this can be a single string.
417
-
418
- rl_mode (bool, optional): If True, reinforcement learning mode is activated, this accepts x_population is a single genome. (Also weights and activation_potentations a single genomes part.)
419
- Default is False.
420
-
421
-
422
- dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
423
-
424
416
  Returns:
425
417
  list: A list of outputs corresponding to each genome in the population after applying the respective
426
418
  activation function and weights.
427
419
 
428
- Notes:
429
- - If `rl_mode` is True:
430
- - Accepts x_population is a single genom
431
- - The inputs are flattened, and the activation function is applied across the single genom.
432
-
433
- - If `rl_mode` is False:
434
- - Accepts x_population is a list of genomes
435
- - Each genome is processed individually, and the results are stored in the `outputs` list.
436
-
437
- - `feed_forward()` function is the core function that processes the input with the given weights and activation function.
438
-
439
420
  Example:
440
421
  ```python
441
- outputs = evaluate(x_population, weights, activation_potentiations, rl_mode=False)
422
+ outputs = evaluate(x_population, weights, activation_potentiations)
442
423
  ```
443
424
 
444
425
  - The function returns a list of outputs after processing the population, where each element corresponds to
445
426
  the output for each genome in `x_population`.
446
427
  """
447
-
448
- ### IF RL_MODE IS TRUE, A SINGLE GENOME IS ASSUMED AS INPUT, A FEEDFORWARD PREDICTION IS MADE, AND THE OUTPUT(NPARRAY) IS RETURNED:
449
-
450
- ### IF RL_MODE IS FALSE, PREDICTIONS ARE MADE FOR ALL GENOMES IN THE GROUP USING THEIR CORRESPONDING INDEXED INPUTS AND DATA.
451
428
  ### THE OUTPUTS ARE RETURNED AS A PYTHON LIST, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
452
429
 
453
- if rl_mode == True:
454
- Input = cp.array(x_population, dtype=dtype, copy=False)
455
- Input = Input.ravel()
456
-
457
- if isinstance(activation_potentiations, str):
458
- activation_potentiations = [activation_potentiations]
459
-
460
- outputs = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations, w=weights)
461
-
430
+ if isinstance(activation_potentiations, str):
431
+ activation_potentiations = [activation_potentiations]
462
432
  else:
463
- outputs = [0] * len(x_population)
464
- for i, genome in enumerate(x_population):
465
-
466
- Input = cp.array(genome)
467
- Input = Input.ravel()
468
-
469
- if isinstance(activation_potentiations[i], str):
470
- activation_potentiations[i] = [activation_potentiations[i]]
433
+ activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
471
434
 
472
- outputs[i] = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations[i], w=weights[i])
435
+ x_population = apply_activation(x_population, activation_potentiations)
436
+ result = x_population @ weights.T
473
437
 
474
- return outputs
438
+ return result
475
439
 
476
440
 
477
441
  def cross_over(first_parent_W,