pyerualjetwork 4.3.8.dev14__py3-none-any.whl → 4.3.9b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. pyerualjetwork/__init__.py +1 -1
  2. pyerualjetwork/activation_functions.py +2 -2
  3. pyerualjetwork/activation_functions_cuda.py +63 -114
  4. pyerualjetwork/data_operations_cuda.py +1 -1
  5. pyerualjetwork/model_operations.py +14 -14
  6. pyerualjetwork/model_operations_cuda.py +16 -17
  7. pyerualjetwork/plan.py +87 -268
  8. pyerualjetwork/plan_cuda.py +82 -276
  9. pyerualjetwork/planeat.py +12 -44
  10. pyerualjetwork/planeat_cuda.py +9 -45
  11. pyerualjetwork/visualizations.py +29 -26
  12. pyerualjetwork/visualizations_cuda.py +19 -20
  13. {pyerualjetwork-4.3.8.dev14.dist-info → pyerualjetwork-4.3.9b0.dist-info}/METADATA +2 -19
  14. pyerualjetwork-4.3.9b0.dist-info/RECORD +24 -0
  15. pyerualjetwork-4.3.9b0.dist-info/top_level.txt +1 -0
  16. pyerualjetwork-4.3.8.dev14.dist-info/RECORD +0 -44
  17. pyerualjetwork-4.3.8.dev14.dist-info/top_level.txt +0 -2
  18. pyerualjetwork_afterburner/__init__.py +0 -11
  19. pyerualjetwork_afterburner/activation_functions.py +0 -290
  20. pyerualjetwork_afterburner/activation_functions_cuda.py +0 -289
  21. pyerualjetwork_afterburner/data_operations.py +0 -406
  22. pyerualjetwork_afterburner/data_operations_cuda.py +0 -461
  23. pyerualjetwork_afterburner/help.py +0 -17
  24. pyerualjetwork_afterburner/loss_functions.py +0 -21
  25. pyerualjetwork_afterburner/loss_functions_cuda.py +0 -21
  26. pyerualjetwork_afterburner/memory_operations.py +0 -298
  27. pyerualjetwork_afterburner/metrics.py +0 -190
  28. pyerualjetwork_afterburner/metrics_cuda.py +0 -163
  29. pyerualjetwork_afterburner/model_operations.py +0 -408
  30. pyerualjetwork_afterburner/model_operations_cuda.py +0 -420
  31. pyerualjetwork_afterburner/plan.py +0 -432
  32. pyerualjetwork_afterburner/plan_cuda.py +0 -441
  33. pyerualjetwork_afterburner/planeat.py +0 -793
  34. pyerualjetwork_afterburner/planeat_cuda.py +0 -840
  35. pyerualjetwork_afterburner/ui.py +0 -22
  36. pyerualjetwork_afterburner/visualizations.py +0 -823
  37. pyerualjetwork_afterburner/visualizations_cuda.py +0 -825
  38. {pyerualjetwork-4.3.8.dev14.dist-info → pyerualjetwork-4.3.9b0.dist-info}/WHEEL +0 -0
@@ -16,31 +16,21 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
16
16
  """
17
17
 
18
18
  import cupy as cp
19
- import math
20
19
 
21
20
  ### LIBRARY IMPORTS ###
22
21
  from .ui import loading_bars, initialize_loading_bar
23
- from .data_operations_cuda import normalization, decode_one_hot, batcher
22
+ from .data_operations_cuda import normalization
24
23
  from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
25
- from .activation_functions_cuda import apply_activation, Softmax, all_activations
24
+ from .activation_functions_cuda import apply_activation, all_activations
26
25
  from .metrics_cuda import metrics
27
26
  from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
28
27
  from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
29
28
  from .visualizations_cuda import (
30
29
  draw_neural_web,
31
- update_neural_web_for_fit,
32
- plot_evaluate,
33
- update_neuron_history,
34
- initialize_visualization_for_fit,
35
- update_weight_visualization_for_fit,
36
- update_decision_boundary_for_fit,
37
- update_validation_history_for_fit,
38
- display_visualization_for_fit,
39
30
  display_visualizations_for_learner,
40
31
  update_history_plots_for_learner,
41
32
  initialize_visualization_for_learner,
42
- update_neuron_history_for_learner,
43
- show
33
+ update_neuron_history_for_learner
44
34
  )
45
35
 
46
36
  ### GLOBAL VARIABLES ###
@@ -48,147 +38,52 @@ bar_format_normal = loading_bars()[0]
48
38
  bar_format_learner = loading_bars()[1]
49
39
 
50
40
  # BUILD -----
41
+
51
42
  def fit(
52
43
  x_train,
53
44
  y_train,
54
- val=False,
55
- val_count=None,
56
45
  activation_potentiation=['linear'],
57
- x_val=None,
58
- y_val=None,
59
- show_training=None,
60
- interval=100,
61
- LTD=0,
62
- decision_boundary_status=True,
63
- train_bar=True,
64
- auto_normalization=True,
65
- neurons_history=False,
66
- dtype=cp.float32,
67
- memory='gpu'
46
+ W=None,
47
+ normalization=False,
48
+ dtype=cp.float32
68
49
  ):
69
50
  """
70
- Creates a model to fitting data.
51
+ Creates a model to fitting data.,
71
52
 
72
53
  fit Args:
73
54
 
74
- x_train (list[num]): List or numarray of input data.
75
-
76
- y_train (list[num]): List or numarray of target labels. (one hot encoded)
77
-
78
- val (None or True): validation in training process ? None or True default: None (optional)
55
+ x_train (aray-like[cupy]): List or cupy array of input data.
79
56
 
80
- val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
57
+ y_train (aray-like[cupy]): List or cupy array of target labels. (one hot encoded)
81
58
 
82
59
  activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
83
60
 
84
- x_val (list[num]): List of validation data. default: x_train (optional)
85
-
86
- y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
87
-
88
- show_training (bool, str): True or None default: None (optional)
89
-
90
- LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
91
-
92
- interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
93
-
94
- decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
95
-
96
- train_bar (bool): Training loading bar? True or False. Default is True. (optional)
97
-
98
- auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
99
-
100
- neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
101
-
102
- dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
61
+ W (cupy.ndarray, optional): If you want to re-continue or update model
62
+
63
+ normalization (bool, optional): Normalization may solves overflow problem. Default: False
103
64
 
104
- memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
65
+ dtype (cupy.dtype, optional): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
105
66
 
106
67
  Returns:
107
- numpyarray([num]): (Weight matrix).
68
+ cupyarray: (Weight matrix).
108
69
  """
109
- # Pre-checks
70
+ # Pre-check
110
71
 
111
- if train_bar and val:
112
- train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
113
- elif train_bar and val == False:
114
- train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
115
-
116
- if len(x_train) != len(y_train):
117
- raise ValueError("x_train and y_train must have the same length.")
118
-
119
- if val and (x_val is None or y_val is None):
120
- x_val, y_val = x_train, y_train
121
-
122
- if memory == 'gpu':
123
- x_train = transfer_to_gpu(x_train, dtype=dtype)
124
- y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
125
-
126
- if val:
127
- x_val = transfer_to_gpu(x_val, dtype=dtype)
128
- y_val = transfer_to_gpu(y_val, dtype=y_train.dtype)
129
-
130
- elif memory == 'cpu':
131
- x_train = transfer_to_cpu(x_train, dtype=dtype)
132
- y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
72
+ if len(x_train) != len(y_train): raise ValueError("x_train and y_train must have the same length.")
133
73
 
134
- if val:
135
- x_val = transfer_to_cpu(x_val, dtype=dtype)
136
- y_val = transfer_to_cpu(y_val, dtype=y_train.dtype)
137
-
138
- else:
139
- raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
74
+ weight = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
140
75
 
141
- val_list = [] if val else None
142
- val_count = val_count or 10
143
- # Defining weights
144
- STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TERM POTENTIATION WEIGHT
145
- LTPW = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TERM POTENTIATION WEIGHT
146
- # Initialize visualization
147
- vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
148
-
149
- # Training process
150
- for index, inp in enumerate(x_train):
151
- inp = transfer_to_gpu(inp, dtype=dtype).ravel()
152
- y_decoded = decode_one_hot(cp.array(y_train[index], copy=False, dtype=y_train.dtype))
153
- # Weight updates
154
- STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
155
- LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
156
-
157
- if val and index != 0:
158
- if index % math.ceil((val_count / len(x_train)) * 100) == 0:
159
- val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW, memory=memory)[get_acc()]
160
- val_list.append(val_acc)
161
-
162
- # Visualization updates
163
- if show_training:
164
- update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
165
- if decision_boundary_status:
166
- update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
167
- update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
168
- update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
169
- if neurons_history:
170
- update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
171
- if train_bar:
172
- train_progress.update(1)
173
-
174
- STPW = cp.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
175
-
176
- if show_training:
177
- ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
178
- ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
179
- ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
180
- ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
181
- show()
76
+ if normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
77
+ elif normalization is False: x_train = apply_activation(x_train, activation_potentiation)
78
+ else: raise ValueError('normalization parameter only be True or False')
182
79
 
183
- if neurons_history:
184
- ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
185
- show()
80
+ weight += y_train.T @ x_train
186
81
 
187
- return normalization(LTPW, dtype=dtype)
82
+ return normalization(weight, dtype=dtype)
188
83
 
189
84
 
190
- def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
191
- neural_web_history=False, show_current_activations=False, auto_normalization=True,
85
+ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
86
+ neural_web_history=False, show_current_activations=False,
192
87
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
193
88
  interval=33.33, target_acc=None, target_loss=None,
194
89
  start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
@@ -231,9 +126,9 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
231
126
 
232
127
  batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
233
128
 
234
- early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
129
+ pop_size (int, optional): Population size of each generation. Default: count of activation functions
235
130
 
236
- auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
131
+ early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
237
132
 
238
133
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
239
134
 
@@ -242,7 +137,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
242
137
  loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
243
138
 
244
139
  interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
245
-
140
+
246
141
  target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
247
142
 
248
143
  target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
@@ -268,11 +163,15 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
268
163
 
269
164
  data = 'Train'
270
165
 
271
- activation_potentiation = all_activations()
166
+ except_this = ['spiral', 'circular']
167
+ activation_potentiation = [item for item in all_activations() if item not in except_this]
272
168
  activation_potentiation_len = len(activation_potentiation)
273
169
 
170
+ if pop_size is None: pop_size = activation_potentiation_len
274
171
  y_train = optimize_labels(y_train, cuda=True)
275
172
 
173
+ if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
174
+
276
175
  if memory == 'gpu':
277
176
  x_train = transfer_to_gpu(x_train, dtype=dtype)
278
177
  y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
@@ -299,7 +198,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
299
198
  if batch_size == 1:
300
199
  ncols = 76
301
200
  else:
302
- ncols = 89
201
+ ncols = 49
303
202
 
304
203
  # Initialize variables
305
204
  best_acc = 0
@@ -313,16 +212,16 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
313
212
 
314
213
  progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
315
214
 
316
- if fit_start is False:
317
- weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len, dtype=dtype)
215
+ if fit_start is False or pop_size > activation_potentiation_len:
216
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
318
217
 
319
218
  if start_this_act is not None and start_this_W is not None:
320
219
  weight_pop[0] = start_this_W
321
220
  act_pop[0] = start_this_act
322
221
 
323
222
  else:
324
- weight_pop = []
325
- act_pop = []
223
+ weight_pop = [0] * pop_size
224
+ act_pop = [0] * pop_size
326
225
 
327
226
  for i in range(gen):
328
227
  postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
@@ -332,15 +231,19 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
332
231
  progress.last_print_n = 0
333
232
  progress.update(0)
334
233
 
335
- for j in range(activation_potentiation_len):
234
+ for j in range(pop_size):
336
235
 
337
236
  x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
338
- if fit_start is True and i == 0:
339
- act_pop.append(activation_potentiation[j])
340
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], auto_normalization=auto_normalization, train_bar=False, dtype=dtype, memory=memory)
341
- weight_pop.append(W)
342
-
343
- model = evaluate(x_train_batch, y_train_batch, weight_pop[j], act_pop[j], loading_bar_status=False, dtype=dtype, memory=memory)
237
+
238
+ x_train_batch = cp.array(x_train_batch, dtype=dtype, copy=False)
239
+ y_train_batch = cp.array(y_train_batch, dtype=dtype, copy=False)
240
+
241
+ if fit_start is True and i == 0 and j < activation_potentiation_len:
242
+ act_pop[j] = activation_potentiation[j]
243
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], dtype=dtype)
244
+ weight_pop[j] = W
245
+
246
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
344
247
  acc = model[get_acc()]
345
248
 
346
249
  if strategy == 'accuracy': target_pop.append(acc)
@@ -374,15 +277,14 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
374
277
  best_acc = acc
375
278
  best_weights = cp.copy(weight_pop[j])
376
279
  final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
280
+
377
281
  best_model = model
378
282
 
379
283
  final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
380
284
 
381
285
  if batch_size == 1:
382
- postfix_dict[f"{data} Accuracy"] = cp.round(best_acc, 3)
383
- else:
384
- postfix_dict[f"{data} Batch Accuracy"] = cp.round(best_acc, 3)
385
- progress.set_postfix(postfix_dict)
286
+ postfix_dict[f"{data} Accuracy"] = cp.round(best_acc, 4)
287
+ progress.set_postfix(postfix_dict)
386
288
 
387
289
  if show_current_activations:
388
290
  print(f", Current Activations={final_activations}", end='')
@@ -393,12 +295,9 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
393
295
  train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
394
296
 
395
297
  if batch_size == 1:
396
- postfix_dict[f"{data} Loss"] = cp.round(train_loss, 3)
298
+ postfix_dict[f"{data} Loss"] = cp.round(train_loss, 4)
397
299
  best_loss = train_loss
398
- else:
399
- postfix_dict[f"{data} Batch Loss"] = cp.round(train_loss, 3)
400
- progress.set_postfix(postfix_dict)
401
- best_loss = train_loss
300
+ progress.set_postfix(postfix_dict)
402
301
 
403
302
  # Update visualizations during training
404
303
  if show_history:
@@ -425,7 +324,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
425
324
  if target_acc is not None and best_acc >= target_acc:
426
325
  progress.close()
427
326
  train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
428
- activation_potentiation=final_activations, dtype=dtype)
327
+ activation_potentiation=final_activations)
429
328
 
430
329
  if loss == 'categorical_crossentropy':
431
330
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -446,8 +345,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
446
345
  # Check target loss
447
346
  if target_loss is not None and best_loss <= target_loss:
448
347
  progress.close()
449
- train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
450
- activation_potentiation=final_activations, dtype=dtype)
348
+ train_model = evaluate(x_train, y_train, W=best_weights,
349
+ activation_potentiation=final_activations)
451
350
 
452
351
  if loss == 'categorical_crossentropy':
453
352
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -457,7 +356,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
457
356
  y_pred_batch=train_model[get_preds_softmax()])
458
357
 
459
358
  print('\nActivations: ', final_activations)
460
- print(f'Train Accuracy:', train_model[get_acc()])
359
+ print(f'Train Accuracy: ', train_model[get_acc()])
461
360
  print(f'Train Loss: ', train_loss, '\n')
462
361
 
463
362
  # Display final visualizations
@@ -470,6 +369,18 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
470
369
  best_acc_per_gen_list.append(best_acc)
471
370
  loss_list.append(best_loss)
472
371
 
372
+ if batch_size != 1:
373
+ train_model = evaluate(x_train, y_train, best_weights, final_activations)
374
+
375
+ if loss == 'categorical_crossentropy':
376
+ train_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=train_model[get_preds_softmax()])
377
+ else:
378
+ train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=train_model[get_preds_softmax()])
379
+
380
+ postfix_dict[f"{data} Accuracy"] = cp.round(train_model[get_acc()], 4)
381
+ postfix_dict[f"{data} Loss"] = cp.round(train_loss, 4)
382
+ progress.set_postfix(postfix_dict)
383
+
473
384
  weight_pop, act_pop = optimizer(cp.array(weight_pop, copy=False, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), bar_status=False)
474
385
  target_pop = []
475
386
 
@@ -477,8 +388,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
477
388
  if early_stop == True and i > 0:
478
389
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
479
390
  progress.close()
480
- train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
481
- activation_potentiation=final_activations, dtype=dtype, memory=memory)
391
+ train_model = evaluate(x_train, y_train, W=best_weights,
392
+ activation_potentiation=final_activations)
482
393
 
483
394
  if loss == 'categorical_crossentropy':
484
395
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -498,8 +409,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
498
409
 
499
410
  # Final evaluation
500
411
  progress.close()
501
- train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
502
- activation_potentiation=final_activations, dtype=dtype, memory=memory)
412
+ train_model = evaluate(x_train, y_train, W=best_weights,
413
+ activation_potentiation=final_activations)
503
414
 
504
415
  if loss == 'categorical_crossentropy':
505
416
  train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
@@ -507,69 +418,18 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
507
418
  train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
508
419
 
509
420
  print('\nActivations: ', final_activations)
510
- print(f'Train Accuracy:', train_model[get_acc()])
511
- print(f'Train Loss: ', train_loss, '\n')
421
+ print(f'Train Accuracy: ', train_model[get_acc()])
422
+ print(f'Train Loss : ', train_loss, '\n')
512
423
 
513
424
  # Display final visualizations
514
425
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
515
426
  return best_weights, best_model[get_preds()], best_acc, final_activations
516
427
 
517
-
518
-
519
- def feed_forward(
520
- Input, # list[num]: Input data.
521
- w, # num: Weight matrix of the neural network.
522
- is_training, # bool: Flag indicating if the function is called during training (True or False).
523
- activation_potentiation,
524
- Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
525
- LTD=0
526
- ) -> tuple:
527
- """
528
- Applies feature extraction process to the input data using synaptic potentiation.
529
-
530
- Args:
531
- Input (num): Input data.
532
- w (num): Weight matrix of the neural network.
533
- is_training (bool): Flag indicating if the function is called during training (True or False).
534
- Class (int): if is during training then which class(label) ? is isnt then put None.
535
- # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
536
-
537
- Returns:
538
- tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
539
- or
540
- num: neural network output
541
- """
542
-
543
- Output = apply_activation(Input, activation_potentiation)
544
-
545
- Input = Output
546
-
547
- if is_training == True:
548
-
549
- for _ in range(LTD):
550
-
551
- depression_vector = cp.random.rand(*Input.shape)
552
-
553
- Input -= depression_vector
554
-
555
- w[Class, :] = Input
556
- return w
557
-
558
- else:
559
-
560
- neural_layer = cp.dot(w, Input)
561
-
562
- return neural_layer
563
-
564
428
  def evaluate(
565
429
  x_test,
566
430
  y_test,
567
431
  W,
568
- activation_potentiation=['linear'],
569
- loading_bar_status=True,
570
- show_metrics=False,
571
- dtype=cp.float32,
572
- memory='gpu'
432
+ activation_potentiation=['linear']
573
433
  ) -> tuple:
574
434
  """
575
435
  Evaluates the neural network model using the given test data.
@@ -579,70 +439,16 @@ def evaluate(
579
439
 
580
440
  y_test (cp.ndarray): Test labels (one-hot encoded).
581
441
 
582
- W (list[cp.ndarray]): Neural net weight matrix.
442
+ W (cp.ndarray): Neural net weight matrix.
583
443
 
584
444
  activation_potentiation (list): Activation list. Default = ['linear'].
585
445
 
586
- loading_bar_status (bool): Loading bar (optional). Default = True.
587
-
588
- show_metrics (bool): Visualize metrics ? (optional). Default = False.
589
-
590
- dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
591
-
592
- memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
593
-
594
446
  Returns:
595
447
  tuple: Model (list).
596
448
  """
597
449
 
598
- if memory == 'gpu':
599
- x_test = transfer_to_gpu(x_test, dtype=dtype)
600
- y_test = transfer_to_gpu(y_test, dtype=y_test.dtype)
601
-
602
- elif memory == 'cpu':
603
- x_test = transfer_to_cpu(x_test, dtype=dtype)
604
- y_test = transfer_to_cpu(y_test, dtype=y_test.dtype)
605
-
606
- else:
607
- raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
608
-
609
- predict_probabilitys = cp.empty((len(x_test), W.shape[0]), dtype=dtype)
610
- real_classes = cp.empty(len(x_test), dtype=y_test.dtype)
611
- predict_classes = cp.empty(len(x_test), dtype=y_test.dtype)
612
-
613
- true_predict = 0
614
- acc_list = cp.empty(len(x_test), dtype=dtype)
615
-
616
- if loading_bar_status:
617
- loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
618
-
619
- for inpIndex in range(len(x_test)):
620
- Input = transfer_to_gpu(x_test[inpIndex], dtype=dtype).ravel()
621
- neural_layer = Input
622
-
623
- neural_layer = feed_forward(neural_layer, cp.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
624
-
625
- predict_probabilitys[inpIndex] = Softmax(neural_layer)
626
-
627
- RealOutput = decode_one_hot(transfer_to_gpu(y_test[inpIndex], dtype=y_test[inpIndex].dtype))
628
- real_classes[inpIndex] = RealOutput
629
- PredictedOutput = cp.argmax(neural_layer)
630
- predict_classes[inpIndex] = PredictedOutput
631
-
632
- if RealOutput == PredictedOutput:
633
- true_predict += 1
634
-
635
- acc = true_predict / (inpIndex + 1)
636
- acc_list[inpIndex] = acc
637
-
638
- if loading_bar_status:
639
- loading_bar.update(1)
640
- loading_bar.set_postfix({"Test Accuracy": acc})
641
-
642
- if loading_bar_status:
643
- loading_bar.close()
644
-
645
- if show_metrics:
646
- plot_evaluate(x_test, y_test, predict_classes, acc_list, W=cp.copy(W), activation_potentiation=activation_potentiation)
450
+ x_test = apply_activation(x_test, activation_potentiation)
451
+ result = x_test @ W.T
452
+ softmax_preds = cp.exp(result) / cp.sum(cp.exp(result), axis=1, keepdims=True); accuracy = (cp.argmax(result, axis=1) == cp.argmax(y_test, axis=1)).mean()
647
453
 
648
- return W, predict_classes, acc_list[-1], None, None, predict_probabilitys
454
+ return W, None, accuracy, None, None, softmax_preds
pyerualjetwork/planeat.py CHANGED
@@ -17,10 +17,9 @@ import random
17
17
  import math
18
18
 
19
19
  ### LIBRARY IMPORTS ###
20
- from .plan import feed_forward
21
20
  from .data_operations import normalization
22
21
  from .ui import loading_bars, initialize_loading_bar
23
- from. activation_functions import apply_activation, all_activations
22
+ from .activation_functions import apply_activation, all_activations
24
23
 
25
24
  def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
26
25
  """
@@ -297,6 +296,7 @@ def evolver(weights,
297
296
  mutated_W = np.copy(bad_weights)
298
297
  mutated_act = bad_activations.copy()
299
298
 
299
+
300
300
  for i in range(len(bad_weights)):
301
301
 
302
302
  if policy == 'aggressive':
@@ -399,7 +399,7 @@ def evolver(weights,
399
399
  return weights, activation_potentiations
400
400
 
401
401
 
402
- def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dtype=np.float32):
402
+ def evaluate(x_population, weights, activation_potentiations):
403
403
  """
404
404
  Evaluates the performance of a population of genomes, applying different activation functions
405
405
  and weights depending on whether reinforcement learning mode is enabled or not.
@@ -412,62 +412,30 @@ def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dty
412
412
  activation_potentiations (list or str): A list where each entry represents an activation function
413
413
  or a potentiation strategy applied to each genome. If only one
414
414
  activation function is used, this can be a single string.
415
- rl_mode (bool, optional): If True, reinforcement learning mode is activated, this accepts x_population is a single genome. (Also weights and activation_potentations a single genomes part.)
416
- Default is False.
417
-
418
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
419
-
420
415
  Returns:
421
416
  list: A list of outputs corresponding to each genome in the population after applying the respective
422
417
  activation function and weights.
423
418
 
424
- Notes:
425
- - If `rl_mode` is True:
426
- - Accepts x_population is a single genom
427
- - The inputs are flattened, and the activation function is applied across the single genom.
428
-
429
- - If `rl_mode` is False:
430
- - Accepts x_population is a list of genomes
431
- - Each genome is processed individually, and the results are stored in the `outputs` list.
432
-
433
- - `feed_forward()` function is the core function that processes the input with the given weights and activation function.
434
-
435
419
  Example:
436
420
  ```python
437
- outputs = evaluate(x_population, weights, activation_potentiations, rl_mode=False)
421
+ outputs = evaluate(x_population, weights, activation_potentiations)
438
422
  ```
439
423
 
440
424
  - The function returns a list of outputs after processing the population, where each element corresponds to
441
425
  the output for each genome in `x_population`.
442
- """
443
-
444
- ### IF RL_MODE IS TRUE, A SINGLE GENOME IS ASSUMED AS INPUT, A FEEDFORWARD PREDICTION IS MADE, AND THE OUTPUT(NPARRAY) IS RETURNED:
445
-
446
- ### IF RL_MODE IS FALSE, PREDICTIONS ARE MADE FOR ALL GENOMES IN THE GROUP USING THEIR CORRESPONDING INDEXED INPUTS AND DATA.
426
+ """
447
427
  ### THE OUTPUTS ARE RETURNED AS A PYTHON LIST, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
448
428
 
449
- if rl_mode == True:
450
- Input = np.array(x_population, copy=False, dtype=dtype)
451
- Input = Input.ravel()
452
-
453
- if isinstance(activation_potentiations, str):
454
- activation_potentiations = [activation_potentiations]
455
-
456
- outputs = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations, w=weights)
457
-
458
- else:
459
- outputs = [0] * len(x_population)
460
- for i, genome in enumerate(x_population):
461
-
462
- Input = np.array(genome, copy=False)
463
- Input = Input.ravel()
464
429
 
465
- if isinstance(activation_potentiations[i], str):
466
- activation_potentiations[i] = [activation_potentiations[i]]
430
+ if isinstance(activation_potentiations, str):
431
+ activation_potentiations = [activation_potentiations]
432
+ else:
433
+ activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
467
434
 
468
- outputs[i] = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations[i], w=weights[i])
435
+ x_population = apply_activation(x_population, activation_potentiations)
436
+ result = x_population @ weights.T
469
437
 
470
- return outputs
438
+ return result
471
439
 
472
440
 
473
441
  def cross_over(first_parent_W,