pyerualjetwork 4.3.1__py3-none-any.whl → 4.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {pyerualjetwork-4.3.1.dist-info → pyerualjetwork-4.3.2.0.dist-info}/METADATA +3 -2
  2. pyerualjetwork-4.3.2.0.dist-info/RECORD +24 -0
  3. pyerualjetwork-4.3.2.0.dist-info/top_level.txt +1 -0
  4. {pyerualjetwork → pyerualjetwork-afterburner}/__init__.py +1 -1
  5. pyerualjetwork-afterburner/activation_functions.py +291 -0
  6. pyerualjetwork-afterburner/activation_functions_cuda.py +290 -0
  7. {pyerualjetwork → pyerualjetwork-afterburner}/model_operations.py +14 -14
  8. {pyerualjetwork → pyerualjetwork-afterburner}/model_operations_cuda.py +16 -17
  9. {pyerualjetwork → pyerualjetwork-afterburner}/plan.py +44 -246
  10. {pyerualjetwork → pyerualjetwork-afterburner}/plan_cuda.py +37 -256
  11. {pyerualjetwork → pyerualjetwork-afterburner}/planeat.py +11 -43
  12. {pyerualjetwork → pyerualjetwork-afterburner}/planeat_cuda.py +8 -44
  13. pyerualjetwork/activation_functions.py +0 -343
  14. pyerualjetwork/activation_functions_cuda.py +0 -340
  15. pyerualjetwork-4.3.1.dist-info/RECORD +0 -24
  16. pyerualjetwork-4.3.1.dist-info/top_level.txt +0 -1
  17. {pyerualjetwork-4.3.1.dist-info → pyerualjetwork-4.3.2.0.dist-info}/WHEEL +0 -0
  18. {pyerualjetwork → pyerualjetwork-afterburner}/data_operations.py +0 -0
  19. {pyerualjetwork → pyerualjetwork-afterburner}/data_operations_cuda.py +0 -0
  20. {pyerualjetwork → pyerualjetwork-afterburner}/help.py +0 -0
  21. {pyerualjetwork → pyerualjetwork-afterburner}/loss_functions.py +0 -0
  22. {pyerualjetwork → pyerualjetwork-afterburner}/loss_functions_cuda.py +0 -0
  23. {pyerualjetwork → pyerualjetwork-afterburner}/memory_operations.py +0 -0
  24. {pyerualjetwork → pyerualjetwork-afterburner}/metrics.py +0 -0
  25. {pyerualjetwork → pyerualjetwork-afterburner}/metrics_cuda.py +0 -0
  26. {pyerualjetwork → pyerualjetwork-afterburner}/ui.py +0 -0
  27. {pyerualjetwork → pyerualjetwork-afterburner}/visualizations.py +0 -0
  28. {pyerualjetwork → pyerualjetwork-afterburner}/visualizations_cuda.py +0 -0
@@ -263,10 +263,10 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
263
263
  Returns:
264
264
  ndarray: Output from the model.
265
265
  """
266
-
266
+
267
267
  Input = cp.array(Input, dtype=dtype, copy=False)
268
-
269
- from .plan_cuda import feed_forward
268
+
269
+ from .activation_functions_cuda import apply_activation
270
270
  from .data_operations_cuda import standard_scaler
271
271
 
272
272
  model = load_model(model_name, model_path)
@@ -277,13 +277,14 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
277
277
 
278
278
  Input = standard_scaler(None, Input, scaler_params)
279
279
 
280
- neural_layer = Input
281
- neural_layer = cp.array(neural_layer)
282
- neural_layer = neural_layer.ravel()
280
+ Input = cp.array(Input, dtype=dtype, copy=False)
281
+ Input = Input.ravel()
283
282
 
284
283
  try:
285
- neural_layer = feed_forward(neural_layer, cp.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
284
+ Input = apply_activation(Input, activation_potentiation)
285
+ neural_layer = Input @ W.T
286
286
  return neural_layer
287
+
287
288
  except:
288
289
  print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: predict_model_ssd." + Style.RESET_ALL)
289
290
  sys.exit()
@@ -314,7 +315,7 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=cp.float3
314
315
  W = model[get_weights()]
315
316
 
316
317
  try:
317
- Input = cp.dot(output, cp.copy(W))
318
+ Input = W.T @ output
318
319
  return Input
319
320
  except:
320
321
  print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: reverse_predict_model_ssd." + Style.RESET_ALL)
@@ -344,19 +345,17 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
344
345
  """
345
346
 
346
347
  from .data_operations_cuda import standard_scaler
347
- from .plan_cuda import feed_forward
348
+ from .activation_functions_cuda import apply_activation
348
349
 
349
- Input = cp.array(Input, dtype=dtype, copy=False)
350
-
351
350
  Input = standard_scaler(None, Input, scaler_params)
352
351
 
352
+ Input = cp.array(Input, dtype=dtype, copy=False)
353
+ Input = Input.ravel()
354
+
353
355
  try:
354
356
 
355
- neural_layer = Input
356
- neural_layer = cp.array(neural_layer)
357
- neural_layer = neural_layer.ravel()
358
-
359
- neural_layer = feed_forward(neural_layer, cp.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
357
+ Input = apply_activation(Input, activation_potentiation)
358
+ neural_layer = Input @ W.T
360
359
 
361
360
  return neural_layer
362
361
 
@@ -384,7 +383,7 @@ def reverse_predict_model_ram(output, W, dtype=cp.float32):
384
383
  output = cp.array(output, dtype=dtype, copy=False)
385
384
 
386
385
  try:
387
- Input = cp.dot(output, cp.copy(W))
386
+ Input = W.T @ output
388
387
  return Input
389
388
 
390
389
  except:
@@ -16,31 +16,21 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
16
16
  """
17
17
 
18
18
  import numpy as np
19
- import math
20
19
 
21
20
  ### LIBRARY IMPORTS ###
22
21
  from .ui import loading_bars, initialize_loading_bar
23
- from .data_operations import normalization, decode_one_hot, batcher
22
+ from .data_operations import normalization, batcher
24
23
  from .loss_functions import binary_crossentropy, categorical_crossentropy
25
- from .activation_functions import apply_activation, Softmax, all_activations
24
+ from .activation_functions import apply_activation, all_activations
26
25
  from .metrics import metrics
27
26
  from .model_operations import get_acc, get_preds, get_preds_softmax
28
27
  from .memory_operations import optimize_labels
29
28
  from .visualizations import (
30
29
  draw_neural_web,
31
- update_neural_web_for_fit,
32
- plot_evaluate,
33
- update_neuron_history,
34
- initialize_visualization_for_fit,
35
- update_weight_visualization_for_fit,
36
- update_decision_boundary_for_fit,
37
- update_validation_history_for_fit,
38
- display_visualization_for_fit,
39
30
  display_visualizations_for_learner,
40
31
  update_history_plots_for_learner,
41
32
  initialize_visualization_for_learner,
42
- update_neuron_history_for_learner,
43
- show
33
+ update_neuron_history_for_learner
44
34
  )
45
35
 
46
36
  ### GLOBAL VARIABLES ###
@@ -52,18 +42,8 @@ bar_format_learner = loading_bars()[1]
52
42
  def fit(
53
43
  x_train,
54
44
  y_train,
55
- val=False,
56
- val_count=None,
57
45
  activation_potentiation=['linear'],
58
- x_val=None,
59
- y_val=None,
60
- show_training=None,
61
- interval=100,
62
- LTD=0,
63
- decision_boundary_status=True,
64
- train_bar=True,
65
- auto_normalization=True,
66
- neurons_history=False,
46
+ W=None,
67
47
  dtype=np.float32
68
48
  ):
69
49
  """
@@ -71,110 +51,35 @@ def fit(
71
51
 
72
52
  fit Args:
73
53
 
74
- x_train (list[num]): List or numarray of input data.
54
+ x_train (aray-like[num]): List or numarray of input data.
75
55
 
76
- y_train (list[num]): List or numarray of target labels. (one hot encoded)
77
-
78
- val (None or True): validation in training process ? None or True default: None (optional)
79
-
80
- val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
56
+ y_train (aray-like[num]): List or numarray of target labels. (one hot encoded)
81
57
 
82
58
  activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
83
59
 
84
- x_val (list[num]): List of validation data. default: x_train (optional)
85
-
86
- y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
87
-
88
- show_training (bool, str): True or None default: None (optional)
89
-
90
- LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
91
-
92
- interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
93
-
94
- decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
95
-
96
- train_bar (bool): Training loading bar? True or False. Default is True. (optional)
97
-
98
- auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
99
-
100
- neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
60
+ W (numpy.ndarray): If you want to re-continue or update model
101
61
 
102
62
  dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
103
63
 
104
64
  Returns:
105
- numpyarray([num]): (Weight matrix).
65
+ numpyarray: (Weight matrix).
106
66
  """
107
67
 
108
- # Pre-checks
109
-
110
- x_train = x_train.astype(dtype, copy=False)
111
-
112
- if train_bar and val:
113
- train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
114
- elif train_bar and val == False:
115
- train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
116
-
68
+ # Pre-check
69
+
117
70
  if len(x_train) != len(y_train):
118
71
  raise ValueError("x_train and y_train must have the same length.")
119
72
 
120
- if val and (x_val is None and y_val is None):
121
- x_val, y_val = x_train, y_train
122
-
123
- elif val and (x_val is not None and y_val is not None):
124
- x_val = x_val.astype(dtype, copy=False)
125
- y_val = y_val.astype(dtype, copy=False)
126
-
127
- val_list = [] if val else None
128
- val_count = val_count or 10
129
- # Defining weights
130
- STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TIME POTENTIATION WEIGHT
131
- LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TIME POTENTIATION WEIGHT
132
- # Initialize visualization
133
- vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
134
-
135
- # Training process
136
- for index, inp in enumerate(x_train):
137
- inp = np.array(inp, copy=False).ravel()
138
- y_decoded = decode_one_hot(y_train[index])
139
- # Weight updates
140
- STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
141
- LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
142
- if val and index != 0:
143
- if index % math.ceil((val_count / len(x_train)) * 100) == 0:
144
- val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW)[get_acc()]
145
- val_list.append(val_acc)
146
-
147
- # Visualization updates
148
- if show_training:
149
- update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
150
- if decision_boundary_status:
151
- update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
152
- update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
153
- update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
154
- if neurons_history:
155
- update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
156
- if train_bar:
157
- train_progress.update(1)
158
-
159
- STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
160
-
161
- # Finalize visualization
162
- if show_training:
163
- ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
164
- ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
165
- ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
166
- ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
167
- show()
73
+ LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
168
74
 
169
- if neurons_history:
170
- ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
171
- show()
75
+ x_train = apply_activation(x_train, activation_potentiation)
76
+ LTPW += np.array(y_train, dtype=optimize_labels(y_train, cuda=False).dtype).T @ x_train
172
77
 
173
78
  return normalization(LTPW, dtype=dtype)
174
79
 
175
80
 
176
81
  def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
177
- neural_web_history=False, show_current_activations=False, auto_normalization=True,
82
+ neural_web_history=False, show_current_activations=False,
178
83
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
179
84
  interval=33.33, target_acc=None, target_loss=None,
180
85
  start_this_act=None, start_this_W=None, dtype=np.float32):
@@ -220,8 +125,6 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
220
125
 
221
126
  early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
222
127
 
223
- auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
224
-
225
128
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
226
129
 
227
130
  show_history (bool, optional): If True, displays the training history after optimization. Default is False.
@@ -314,10 +217,10 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
314
217
 
315
218
  if fit_start is True and i == 0:
316
219
  act_pop.append(activation_potentiation[j])
317
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
220
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], dtype=dtype)
318
221
  weight_pop.append(W)
319
222
 
320
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
223
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
321
224
  acc = model[get_acc()]
322
225
 
323
226
  if strategy == 'accuracy': target_pop.append(acc)
@@ -351,6 +254,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
351
254
  best_acc = acc
352
255
  best_weights = np.copy(weight_pop[j])
353
256
  final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
257
+
354
258
  best_model = model
355
259
 
356
260
  final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
@@ -401,7 +305,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
401
305
  # Check target accuracy
402
306
  if target_acc is not None and best_acc >= target_acc:
403
307
  progress.close()
404
- train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
308
+ train_model = evaluate(x_train, y_train, W=best_weights,
405
309
  activation_potentiation=final_activations, dtype=dtype)
406
310
 
407
311
  if loss == 'categorical_crossentropy':
@@ -412,8 +316,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
412
316
  y_pred_batch=train_model[get_preds_softmax()])
413
317
 
414
318
  print('\nActivations: ', final_activations)
415
- print(f'Train Accuracy :', train_model[get_acc()])
416
- print(f'Train Loss : ', train_loss, '\n')
319
+ print(f'Train Accuracy:', train_model[get_acc()])
320
+ print(f'Train Loss: ', train_loss, '\n')
417
321
 
418
322
  # Display final visualizations
419
323
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
@@ -423,7 +327,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
423
327
  # Check target loss
424
328
  if target_loss is not None and best_loss <= target_loss:
425
329
  progress.close()
426
- train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
330
+ train_model = evaluate(x_train, y_train, W=best_weights,
427
331
  activation_potentiation=final_activations, dtype=dtype)
428
332
 
429
333
  if loss == 'categorical_crossentropy':
@@ -434,8 +338,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
434
338
  y_pred_batch=train_model[get_preds_softmax()])
435
339
 
436
340
  print('\nActivations: ', final_activations)
437
- print(f'Train Accuracy :', train_model[get_acc()])
438
- print(f'Train Loss : ', train_loss, '\n')
341
+ print(f'Train Accuracy:', train_model[get_acc()])
342
+ print(f'Train Loss: ', train_loss, '\n')
439
343
 
440
344
  # Display final visualizations
441
345
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
@@ -454,7 +358,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
454
358
  if early_stop == True and i > 0:
455
359
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
456
360
  progress.close()
457
- train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
361
+ train_model = evaluate(x_train, y_train, W=best_weights,
458
362
  activation_potentiation=final_activations, dtype=dtype)
459
363
 
460
364
  if loss == 'categorical_crossentropy':
@@ -465,8 +369,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
465
369
  y_pred_batch=train_model[get_preds_softmax()])
466
370
 
467
371
  print('\nActivations: ', final_activations)
468
- print(f'Train Accuracy :', train_model[get_acc()])
469
- print(f'Train Loss : ', train_loss, '\n')
372
+ print(f'Train Accuracy:', train_model[get_acc()])
373
+ print(f'Train Loss: ', train_loss, '\n')
470
374
 
471
375
  # Display final visualizations
472
376
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
@@ -475,7 +379,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
475
379
 
476
380
  # Final evaluation
477
381
  progress.close()
478
- train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
382
+ train_model = evaluate(x_train, y_train, W=best_weights,
479
383
  activation_potentiation=final_activations, dtype=dtype)
480
384
 
481
385
  if loss == 'categorical_crossentropy':
@@ -484,144 +388,38 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
484
388
  train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
485
389
 
486
390
  print('\nActivations: ', final_activations)
487
- print(f'Train Accuracy :', train_model[get_acc()])
488
- print(f'Train Loss : ', train_loss, '\n')
391
+ print(f'Train Accuracy:', train_model[get_acc()])
392
+ print(f'Train Loss: ', train_loss, '\n')
489
393
 
490
394
  # Display final visualizations
491
395
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
492
396
  return best_weights, best_model[get_preds()], best_acc, final_activations
493
397
 
494
398
 
495
-
496
- def feed_forward(
497
- Input, # list[num]: Input data.
498
- w, # num: Weight matrix of the neural network.
499
- is_training, # bool: Flag indicating if the function is called during training (True or False).
500
- activation_potentiation,
501
- Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
502
- LTD=0
503
- ) -> tuple:
504
- """
505
- Applies feature extraction process to the input data using synaptic potentiation.
506
-
507
- Args:
508
- Input (num): Input data.
509
- w (num): Weight matrix of the neural network.
510
- is_training (bool): Flag indicating if the function is called during training (True or False).
511
- Class (int): if is during training then which class(label) ? is isnt then put None.
512
- # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
513
-
514
- Returns:
515
- tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
516
- or
517
- num: neural network output
518
- """
519
-
520
- Output = apply_activation(Input, activation_potentiation)
521
-
522
- Input = Output
523
-
524
- if is_training == True:
525
-
526
- for _ in range(LTD):
527
-
528
- depression_vector = np.random.rand(*Input.shape)
529
-
530
- Input -= depression_vector
531
-
532
- w[Class, :] = Input
533
- return w
534
-
535
- else:
536
-
537
- neural_layer = np.dot(w, Input)
538
-
539
- return neural_layer
540
-
541
-
542
399
  def evaluate(
543
- x_test, # NumPy array: Test input data.
544
- y_test, # NumPy array: Test labels.
545
- W, # List of NumPy arrays: Neural network weight matrices.
546
- activation_potentiation=['linear'], # List of activation functions.
547
- loading_bar_status=True, # Optionally show loading bar.
548
- show_metrics=None, # Optionally show metrics.
549
- dtype=np.float32
400
+ x_test,
401
+ y_test,
402
+ W,
403
+ activation_potentiation=['linear']
550
404
  ) -> tuple:
551
405
  """
552
406
  Evaluates the neural network model using the given test data.
553
407
 
554
408
  Args:
555
- x_test (np.ndarray): Test input data.
556
-
557
- y_test (np.ndarray): Test labels. one-hot encoded.
558
-
559
- W (list[np.ndarray]): List of neural network weight matrices.
560
-
561
- activation_potentiation (list): List of activation functions.
562
-
563
- loading_bar_status (bool): Option to show a loading bar (optional).
564
-
565
- show_metrics (bool): Option to show metrics (optional).
409
+ x_test (np.ndarray): Test data.
566
410
 
567
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
411
+ y_test (np.ndarray): Test labels (one-hot encoded).
568
412
 
413
+ W (np.ndarray): Neural net weight matrix.
414
+
415
+ activation_potentiation (list): Activation list. Default = ['linear'].
416
+
569
417
  Returns:
570
- tuple: Predicted labels, model accuracy, and other evaluation metrics.
418
+ tuple: Model (list).
571
419
  """
572
- # Pre-checks
573
-
574
- x_test = x_test.astype(dtype, copy=False)
575
-
576
- if len(y_test[0]) < 256:
577
- if y_test.dtype != np.uint8:
578
- y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
579
- elif len(y_test[0]) <= 32767:
580
- if y_test.dtype != np.uint16:
581
- y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
582
- else:
583
- if y_test.dtype != np.uint32:
584
- y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
585
-
586
- predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=dtype)
587
- real_classes = np.empty(len(x_test), dtype=y_test.dtype)
588
- predict_classes = np.empty(len(x_test), dtype=y_test.dtype)
589
-
590
- true_predict = 0
591
- acc_list = np.empty(len(x_test), dtype=dtype)
592
-
593
- if loading_bar_status:
594
- loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
595
-
596
- for inpIndex in range(len(x_test)):
597
- Input = x_test[inpIndex].ravel()
598
-
599
- neural_layer = Input
600
-
601
- neural_layer = feed_forward(neural_layer, np.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
602
-
603
- predict_probabilitys[inpIndex] = Softmax(neural_layer)
604
-
605
- RealOutput = np.argmax(y_test[inpIndex])
606
- real_classes[inpIndex] = RealOutput
607
- PredictedOutput = np.argmax(neural_layer)
608
- predict_classes[inpIndex] = PredictedOutput
609
-
610
- if RealOutput == PredictedOutput:
611
- true_predict += 1
612
-
613
- acc = true_predict / (inpIndex + 1)
614
- acc_list[inpIndex] = acc
615
-
616
- if loading_bar_status:
617
- loading_bar.update(1)
618
- loading_bar.set_postfix({"Test Accuracy": acc})
619
-
620
- if loading_bar_status:
621
- loading_bar.close()
622
420
 
623
- if show_metrics:
624
- # Plot the evaluation metrics
625
- plot_evaluate(x_test, y_test, predict_classes, acc_list, W=np.copy(W), activation_potentiation=activation_potentiation)
421
+ x_test = apply_activation(x_test, activation_potentiation)
422
+ result = x_test @ W.T
423
+ softmax_preds = np.exp(result) / np.sum(np.exp(result), axis=1, keepdims=True); accuracy = (np.argmax(result, axis=1) == np.argmax(y_test, axis=1)).mean()
626
424
 
627
- return W, predict_classes, acc_list[-1], None, None, predict_probabilitys
425
+ return W, None, accuracy, None, None, softmax_preds