pyerualjetwork 4.3.0.1__py3-none-any.whl → 4.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {pyerualjetwork-jetstorm → pyerualjetwork}/__init__.py +1 -1
  2. pyerualjetwork/activation_functions.py +343 -0
  3. pyerualjetwork/activation_functions_cuda.py +340 -0
  4. {pyerualjetwork-jetstorm → pyerualjetwork}/model_operations.py +14 -14
  5. {pyerualjetwork-jetstorm → pyerualjetwork}/model_operations_cuda.py +17 -16
  6. {pyerualjetwork-jetstorm → pyerualjetwork}/plan.py +246 -44
  7. {pyerualjetwork-jetstorm → pyerualjetwork}/plan_cuda.py +256 -37
  8. {pyerualjetwork-jetstorm → pyerualjetwork}/planeat.py +43 -11
  9. {pyerualjetwork-jetstorm → pyerualjetwork}/planeat_cuda.py +44 -8
  10. {pyerualjetwork-4.3.0.1.dist-info → pyerualjetwork-4.3.1.dist-info}/METADATA +2 -3
  11. pyerualjetwork-4.3.1.dist-info/RECORD +24 -0
  12. pyerualjetwork-4.3.1.dist-info/top_level.txt +1 -0
  13. pyerualjetwork-4.3.0.1.dist-info/RECORD +0 -24
  14. pyerualjetwork-4.3.0.1.dist-info/top_level.txt +0 -1
  15. pyerualjetwork-jetstorm/activation_functions.py +0 -291
  16. pyerualjetwork-jetstorm/activation_functions_cuda.py +0 -290
  17. {pyerualjetwork-jetstorm → pyerualjetwork}/data_operations.py +0 -0
  18. {pyerualjetwork-jetstorm → pyerualjetwork}/data_operations_cuda.py +0 -0
  19. {pyerualjetwork-jetstorm → pyerualjetwork}/help.py +0 -0
  20. {pyerualjetwork-jetstorm → pyerualjetwork}/loss_functions.py +0 -0
  21. {pyerualjetwork-jetstorm → pyerualjetwork}/loss_functions_cuda.py +0 -0
  22. {pyerualjetwork-jetstorm → pyerualjetwork}/memory_operations.py +0 -0
  23. {pyerualjetwork-jetstorm → pyerualjetwork}/metrics.py +0 -0
  24. {pyerualjetwork-jetstorm → pyerualjetwork}/metrics_cuda.py +0 -0
  25. {pyerualjetwork-jetstorm → pyerualjetwork}/ui.py +0 -0
  26. {pyerualjetwork-jetstorm → pyerualjetwork}/visualizations.py +0 -0
  27. {pyerualjetwork-jetstorm → pyerualjetwork}/visualizations_cuda.py +0 -0
  28. {pyerualjetwork-4.3.0.1.dist-info → pyerualjetwork-4.3.1.dist-info}/WHEEL +0 -0
@@ -258,7 +258,7 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
258
258
  ndarray: Output from the model.
259
259
  """
260
260
 
261
- from .activation_functions import apply_activation
261
+ from .plan import feed_forward
262
262
  from .data_operations import standard_scaler
263
263
 
264
264
  model = load_model(model_name, model_path)
@@ -269,12 +269,12 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
269
269
 
270
270
  Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
271
271
 
272
- Input = np.array(Input, dtype=dtype, copy=False)
273
- Input = Input.ravel()
272
+ neural_layer = Input
273
+ neural_layer = np.array(neural_layer, copy=False)
274
+ neural_layer = neural_layer.ravel()
274
275
 
275
276
  try:
276
- Input = apply_activation(Input, activation_potentiation)
277
- neural_layer = Input @ W.T
277
+ neural_layer = feed_forward(neural_layer, np.copy(W.astype(dtype, copy=False)), is_training=False, Class='?', activation_potentiation=activation_potentiation)
278
278
  return neural_layer
279
279
  except:
280
280
  print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: predict_model_ssd." + Style.RESET_ALL)
@@ -304,7 +304,7 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=np.float3
304
304
  W = model[get_weights()]
305
305
 
306
306
  try:
307
- Input = W.T @ output
307
+ Input = np.dot(output.astype(dtype, copy=False), np.copy(W.astype(dtype, copy=False)))
308
308
  return Input
309
309
  except:
310
310
  print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: reverse_predict_model_ssd." + Style.RESET_ALL)
@@ -334,18 +334,18 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
334
334
  ndarray: Output from the model.
335
335
  """
336
336
 
337
- from .data_operations import standard_scaler
338
- from .activation_functions import apply_activation
337
+ from data_operations import standard_scaler
338
+ from plan import feed_forward
339
339
 
340
340
  Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
341
341
 
342
- Input = np.array(Input, dtype=dtype, copy=False)
343
- Input = Input.ravel()
344
-
345
342
  try:
346
343
 
347
- Input = apply_activation(Input, activation_potentiation)
348
- neural_layer = Input @ W.T
344
+ neural_layer = Input
345
+ neural_layer = np.array(neural_layer, copy=False)
346
+ neural_layer = neural_layer.ravel()
347
+
348
+ neural_layer = feed_forward(neural_layer, np.copy(W.astype(dtype, copy=False)), is_training=False, Class='?', activation_potentiation=activation_potentiation)
349
349
 
350
350
  return neural_layer
351
351
 
@@ -371,7 +371,7 @@ def reverse_predict_model_ram(output, W, dtype=np.float32):
371
371
  """
372
372
 
373
373
  try:
374
- Input = W.T @ output
374
+ Input = np.dot(output.astype(dtype, copy=False), np.copy(W.astype(dtype, copy=False)))
375
375
  return Input
376
376
 
377
377
  except:
@@ -263,10 +263,10 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
263
263
  Returns:
264
264
  ndarray: Output from the model.
265
265
  """
266
-
266
+
267
267
  Input = cp.array(Input, dtype=dtype, copy=False)
268
-
269
- from .activation_functions_cuda import apply_activation
268
+
269
+ from .plan_cuda import feed_forward
270
270
  from .data_operations_cuda import standard_scaler
271
271
 
272
272
  model = load_model(model_name, model_path)
@@ -277,14 +277,13 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
277
277
 
278
278
  Input = standard_scaler(None, Input, scaler_params)
279
279
 
280
- Input = cp.array(Input, dtype=dtype, copy=False)
281
- Input = Input.ravel()
280
+ neural_layer = Input
281
+ neural_layer = cp.array(neural_layer)
282
+ neural_layer = neural_layer.ravel()
282
283
 
283
284
  try:
284
- Input = apply_activation(Input, activation_potentiation)
285
- neural_layer = Input @ W.T
285
+ neural_layer = feed_forward(neural_layer, cp.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
286
286
  return neural_layer
287
-
288
287
  except:
289
288
  print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: predict_model_ssd." + Style.RESET_ALL)
290
289
  sys.exit()
@@ -315,7 +314,7 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=cp.float3
315
314
  W = model[get_weights()]
316
315
 
317
316
  try:
318
- Input = W.T @ output
317
+ Input = cp.dot(output, cp.copy(W))
319
318
  return Input
320
319
  except:
321
320
  print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: reverse_predict_model_ssd." + Style.RESET_ALL)
@@ -345,17 +344,19 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
345
344
  """
346
345
 
347
346
  from .data_operations_cuda import standard_scaler
348
- from .activation_functions_cuda import apply_activation
347
+ from .plan_cuda import feed_forward
349
348
 
349
+ Input = cp.array(Input, dtype=dtype, copy=False)
350
+
350
351
  Input = standard_scaler(None, Input, scaler_params)
351
352
 
352
- Input = cp.array(Input, dtype=dtype, copy=False)
353
- Input = Input.ravel()
354
-
355
353
  try:
356
354
 
357
- Input = apply_activation(Input, activation_potentiation)
358
- neural_layer = Input @ W.T
355
+ neural_layer = Input
356
+ neural_layer = cp.array(neural_layer)
357
+ neural_layer = neural_layer.ravel()
358
+
359
+ neural_layer = feed_forward(neural_layer, cp.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
359
360
 
360
361
  return neural_layer
361
362
 
@@ -383,7 +384,7 @@ def reverse_predict_model_ram(output, W, dtype=cp.float32):
383
384
  output = cp.array(output, dtype=dtype, copy=False)
384
385
 
385
386
  try:
386
- Input = W.T @ output
387
+ Input = cp.dot(output, cp.copy(W))
387
388
  return Input
388
389
 
389
390
  except:
@@ -16,21 +16,31 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
16
16
  """
17
17
 
18
18
  import numpy as np
19
+ import math
19
20
 
20
21
  ### LIBRARY IMPORTS ###
21
22
  from .ui import loading_bars, initialize_loading_bar
22
- from .data_operations import normalization, batcher
23
+ from .data_operations import normalization, decode_one_hot, batcher
23
24
  from .loss_functions import binary_crossentropy, categorical_crossentropy
24
- from .activation_functions import apply_activation, all_activations
25
+ from .activation_functions import apply_activation, Softmax, all_activations
25
26
  from .metrics import metrics
26
27
  from .model_operations import get_acc, get_preds, get_preds_softmax
27
28
  from .memory_operations import optimize_labels
28
29
  from .visualizations import (
29
30
  draw_neural_web,
31
+ update_neural_web_for_fit,
32
+ plot_evaluate,
33
+ update_neuron_history,
34
+ initialize_visualization_for_fit,
35
+ update_weight_visualization_for_fit,
36
+ update_decision_boundary_for_fit,
37
+ update_validation_history_for_fit,
38
+ display_visualization_for_fit,
30
39
  display_visualizations_for_learner,
31
40
  update_history_plots_for_learner,
32
41
  initialize_visualization_for_learner,
33
- update_neuron_history_for_learner
42
+ update_neuron_history_for_learner,
43
+ show
34
44
  )
35
45
 
36
46
  ### GLOBAL VARIABLES ###
@@ -42,8 +52,18 @@ bar_format_learner = loading_bars()[1]
42
52
  def fit(
43
53
  x_train,
44
54
  y_train,
55
+ val=False,
56
+ val_count=None,
45
57
  activation_potentiation=['linear'],
46
- W=None,
58
+ x_val=None,
59
+ y_val=None,
60
+ show_training=None,
61
+ interval=100,
62
+ LTD=0,
63
+ decision_boundary_status=True,
64
+ train_bar=True,
65
+ auto_normalization=True,
66
+ neurons_history=False,
47
67
  dtype=np.float32
48
68
  ):
49
69
  """
@@ -51,35 +71,110 @@ def fit(
51
71
 
52
72
  fit Args:
53
73
 
54
- x_train (aray-like[num]): List or numarray of input data.
74
+ x_train (list[num]): List or numarray of input data.
55
75
 
56
- y_train (aray-like[num]): List or numarray of target labels. (one hot encoded)
76
+ y_train (list[num]): List or numarray of target labels. (one hot encoded)
77
+
78
+ val (None or True): validation in training process ? None or True default: None (optional)
79
+
80
+ val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
57
81
 
58
82
  activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
59
83
 
60
- W (numpy.ndarray): If you want to re-continue or update model
84
+ x_val (list[num]): List of validation data. default: x_train (optional)
85
+
86
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
87
+
88
+ show_training (bool, str): True or None default: None (optional)
89
+
90
+ LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
91
+
92
+ interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
93
+
94
+ decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
95
+
96
+ train_bar (bool): Training loading bar? True or False. Default is True. (optional)
97
+
98
+ auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
99
+
100
+ neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
61
101
 
62
102
  dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
63
103
 
64
104
  Returns:
65
- numpyarray: (Weight matrix).
105
+ numpyarray([num]): (Weight matrix).
66
106
  """
67
107
 
68
- # Pre-check
69
-
108
+ # Pre-checks
109
+
110
+ x_train = x_train.astype(dtype, copy=False)
111
+
112
+ if train_bar and val:
113
+ train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
114
+ elif train_bar and val == False:
115
+ train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
116
+
70
117
  if len(x_train) != len(y_train):
71
118
  raise ValueError("x_train and y_train must have the same length.")
72
119
 
73
- LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
120
+ if val and (x_val is None and y_val is None):
121
+ x_val, y_val = x_train, y_train
122
+
123
+ elif val and (x_val is not None and y_val is not None):
124
+ x_val = x_val.astype(dtype, copy=False)
125
+ y_val = y_val.astype(dtype, copy=False)
126
+
127
+ val_list = [] if val else None
128
+ val_count = val_count or 10
129
+ # Defining weights
130
+ STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TIME POTENTIATION WEIGHT
131
+ LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TIME POTENTIATION WEIGHT
132
+ # Initialize visualization
133
+ vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
134
+
135
+ # Training process
136
+ for index, inp in enumerate(x_train):
137
+ inp = np.array(inp, copy=False).ravel()
138
+ y_decoded = decode_one_hot(y_train[index])
139
+ # Weight updates
140
+ STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
141
+ LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
142
+ if val and index != 0:
143
+ if index % math.ceil((val_count / len(x_train)) * 100) == 0:
144
+ val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW)[get_acc()]
145
+ val_list.append(val_acc)
146
+
147
+ # Visualization updates
148
+ if show_training:
149
+ update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
150
+ if decision_boundary_status:
151
+ update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
152
+ update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
153
+ update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
154
+ if neurons_history:
155
+ update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
156
+ if train_bar:
157
+ train_progress.update(1)
158
+
159
+ STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
160
+
161
+ # Finalize visualization
162
+ if show_training:
163
+ ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
164
+ ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
165
+ ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
166
+ ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
167
+ show()
74
168
 
75
- x_train = apply_activation(x_train, activation_potentiation)
76
- LTPW += np.array(y_train, dtype=optimize_labels(y_train, cuda=False).dtype).T @ x_train
169
+ if neurons_history:
170
+ ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
171
+ show()
77
172
 
78
173
  return normalization(LTPW, dtype=dtype)
79
174
 
80
175
 
81
176
  def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
82
- neural_web_history=False, show_current_activations=False,
177
+ neural_web_history=False, show_current_activations=False, auto_normalization=True,
83
178
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
84
179
  interval=33.33, target_acc=None, target_loss=None,
85
180
  start_this_act=None, start_this_W=None, dtype=np.float32):
@@ -125,6 +220,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
125
220
 
126
221
  early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
127
222
 
223
+ auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
224
+
128
225
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
129
226
 
130
227
  show_history (bool, optional): If True, displays the training history after optimization. Default is False.
@@ -217,10 +314,10 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
217
314
 
218
315
  if fit_start is True and i == 0:
219
316
  act_pop.append(activation_potentiation[j])
220
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], dtype=dtype)
317
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
221
318
  weight_pop.append(W)
222
319
 
223
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
320
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
224
321
  acc = model[get_acc()]
225
322
 
226
323
  if strategy == 'accuracy': target_pop.append(acc)
@@ -254,7 +351,6 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
254
351
  best_acc = acc
255
352
  best_weights = np.copy(weight_pop[j])
256
353
  final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
257
-
258
354
  best_model = model
259
355
 
260
356
  final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
@@ -305,7 +401,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
305
401
  # Check target accuracy
306
402
  if target_acc is not None and best_acc >= target_acc:
307
403
  progress.close()
308
- train_model = evaluate(x_train, y_train, W=best_weights,
404
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
309
405
  activation_potentiation=final_activations, dtype=dtype)
310
406
 
311
407
  if loss == 'categorical_crossentropy':
@@ -316,8 +412,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
316
412
  y_pred_batch=train_model[get_preds_softmax()])
317
413
 
318
414
  print('\nActivations: ', final_activations)
319
- print(f'Train Accuracy:', train_model[get_acc()])
320
- print(f'Train Loss: ', train_loss, '\n')
415
+ print(f'Train Accuracy :', train_model[get_acc()])
416
+ print(f'Train Loss : ', train_loss, '\n')
321
417
 
322
418
  # Display final visualizations
323
419
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
@@ -327,7 +423,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
327
423
  # Check target loss
328
424
  if target_loss is not None and best_loss <= target_loss:
329
425
  progress.close()
330
- train_model = evaluate(x_train, y_train, W=best_weights,
426
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
331
427
  activation_potentiation=final_activations, dtype=dtype)
332
428
 
333
429
  if loss == 'categorical_crossentropy':
@@ -338,8 +434,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
338
434
  y_pred_batch=train_model[get_preds_softmax()])
339
435
 
340
436
  print('\nActivations: ', final_activations)
341
- print(f'Train Accuracy:', train_model[get_acc()])
342
- print(f'Train Loss: ', train_loss, '\n')
437
+ print(f'Train Accuracy :', train_model[get_acc()])
438
+ print(f'Train Loss : ', train_loss, '\n')
343
439
 
344
440
  # Display final visualizations
345
441
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
@@ -358,7 +454,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
358
454
  if early_stop == True and i > 0:
359
455
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
360
456
  progress.close()
361
- train_model = evaluate(x_train, y_train, W=best_weights,
457
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
362
458
  activation_potentiation=final_activations, dtype=dtype)
363
459
 
364
460
  if loss == 'categorical_crossentropy':
@@ -369,8 +465,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
369
465
  y_pred_batch=train_model[get_preds_softmax()])
370
466
 
371
467
  print('\nActivations: ', final_activations)
372
- print(f'Train Accuracy:', train_model[get_acc()])
373
- print(f'Train Loss: ', train_loss, '\n')
468
+ print(f'Train Accuracy :', train_model[get_acc()])
469
+ print(f'Train Loss : ', train_loss, '\n')
374
470
 
375
471
  # Display final visualizations
376
472
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
@@ -379,7 +475,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
379
475
 
380
476
  # Final evaluation
381
477
  progress.close()
382
- train_model = evaluate(x_train, y_train, W=best_weights,
478
+ train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
383
479
  activation_potentiation=final_activations, dtype=dtype)
384
480
 
385
481
  if loss == 'categorical_crossentropy':
@@ -388,38 +484,144 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
388
484
  train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
389
485
 
390
486
  print('\nActivations: ', final_activations)
391
- print(f'Train Accuracy:', train_model[get_acc()])
392
- print(f'Train Loss: ', train_loss, '\n')
487
+ print(f'Train Accuracy :', train_model[get_acc()])
488
+ print(f'Train Loss : ', train_loss, '\n')
393
489
 
394
490
  # Display final visualizations
395
491
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
396
492
  return best_weights, best_model[get_preds()], best_acc, final_activations
397
493
 
398
494
 
399
- def evaluate(
400
- x_test,
401
- y_test,
402
- W,
403
- activation_potentiation=['linear']
495
+
496
+ def feed_forward(
497
+ Input, # list[num]: Input data.
498
+ w, # num: Weight matrix of the neural network.
499
+ is_training, # bool: Flag indicating if the function is called during training (True or False).
500
+ activation_potentiation,
501
+ Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
502
+ LTD=0
404
503
  ) -> tuple:
405
504
  """
406
- Evaluates the neural network model using the given test data.
505
+ Applies feature extraction process to the input data using synaptic potentiation.
407
506
 
408
507
  Args:
409
- x_test (np.ndarray): Test data.
508
+ Input (num): Input data.
509
+ w (num): Weight matrix of the neural network.
510
+ is_training (bool): Flag indicating if the function is called during training (True or False).
511
+ Class (int): if is during training then which class(label) ? is isnt then put None.
512
+ # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
513
+
514
+ Returns:
515
+ tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
516
+ or
517
+ num: neural network output
518
+ """
519
+
520
+ Output = apply_activation(Input, activation_potentiation)
521
+
522
+ Input = Output
523
+
524
+ if is_training == True:
525
+
526
+ for _ in range(LTD):
527
+
528
+ depression_vector = np.random.rand(*Input.shape)
529
+
530
+ Input -= depression_vector
531
+
532
+ w[Class, :] = Input
533
+ return w
410
534
 
411
- y_test (np.ndarray): Test labels (one-hot encoded).
535
+ else:
536
+
537
+ neural_layer = np.dot(w, Input)
538
+
539
+ return neural_layer
540
+
541
+
542
+ def evaluate(
543
+ x_test, # NumPy array: Test input data.
544
+ y_test, # NumPy array: Test labels.
545
+ W, # List of NumPy arrays: Neural network weight matrices.
546
+ activation_potentiation=['linear'], # List of activation functions.
547
+ loading_bar_status=True, # Optionally show loading bar.
548
+ show_metrics=None, # Optionally show metrics.
549
+ dtype=np.float32
550
+ ) -> tuple:
551
+ """
552
+ Evaluates the neural network model using the given test data.
412
553
 
413
- W (np.ndarray): Neural net weight matrix.
554
+ Args:
555
+ x_test (np.ndarray): Test input data.
556
+
557
+ y_test (np.ndarray): Test labels. one-hot encoded.
414
558
 
415
- activation_potentiation (list): Activation list. Default = ['linear'].
559
+ W (list[np.ndarray]): List of neural network weight matrices.
416
560
 
561
+ activation_potentiation (list): List of activation functions.
562
+
563
+ loading_bar_status (bool): Option to show a loading bar (optional).
564
+
565
+ show_metrics (bool): Option to show metrics (optional).
566
+
567
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
568
+
417
569
  Returns:
418
- tuple: Model (list).
570
+ tuple: Predicted labels, model accuracy, and other evaluation metrics.
419
571
  """
572
+ # Pre-checks
573
+
574
+ x_test = x_test.astype(dtype, copy=False)
575
+
576
+ if len(y_test[0]) < 256:
577
+ if y_test.dtype != np.uint8:
578
+ y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
579
+ elif len(y_test[0]) <= 32767:
580
+ if y_test.dtype != np.uint16:
581
+ y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
582
+ else:
583
+ if y_test.dtype != np.uint32:
584
+ y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
585
+
586
+ predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=dtype)
587
+ real_classes = np.empty(len(x_test), dtype=y_test.dtype)
588
+ predict_classes = np.empty(len(x_test), dtype=y_test.dtype)
589
+
590
+ true_predict = 0
591
+ acc_list = np.empty(len(x_test), dtype=dtype)
592
+
593
+ if loading_bar_status:
594
+ loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
595
+
596
+ for inpIndex in range(len(x_test)):
597
+ Input = x_test[inpIndex].ravel()
598
+
599
+ neural_layer = Input
600
+
601
+ neural_layer = feed_forward(neural_layer, np.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
602
+
603
+ predict_probabilitys[inpIndex] = Softmax(neural_layer)
604
+
605
+ RealOutput = np.argmax(y_test[inpIndex])
606
+ real_classes[inpIndex] = RealOutput
607
+ PredictedOutput = np.argmax(neural_layer)
608
+ predict_classes[inpIndex] = PredictedOutput
609
+
610
+ if RealOutput == PredictedOutput:
611
+ true_predict += 1
612
+
613
+ acc = true_predict / (inpIndex + 1)
614
+ acc_list[inpIndex] = acc
615
+
616
+ if loading_bar_status:
617
+ loading_bar.update(1)
618
+ loading_bar.set_postfix({"Test Accuracy": acc})
619
+
620
+ if loading_bar_status:
621
+ loading_bar.close()
420
622
 
421
- x_test = apply_activation(x_test, activation_potentiation)
422
- result = x_test @ W.T
423
- softmax_preds = np.exp(result) / np.sum(np.exp(result), axis=1, keepdims=True); accuracy = (np.argmax(result, axis=1) == np.argmax(y_test, axis=1)).mean()
623
+ if show_metrics:
624
+ # Plot the evaluation metrics
625
+ plot_evaluate(x_test, y_test, predict_classes, acc_list, W=np.copy(W), activation_potentiation=activation_potentiation)
424
626
 
425
- return W, None, accuracy, None, None, softmax_preds
627
+ return W, predict_classes, acc_list[-1], None, None, predict_probabilitys