pyerualjetwork 5.37__py3-none-any.whl → 5.40a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,9 +2,9 @@
2
2
  """
3
3
 
4
4
 
5
- NN (Neural Networks) on CPU
5
+ NN (Neural Networks)
6
6
  ============================
7
- This module hosts functions for training and evaluating artificial neural networks on CPU for labeled classification tasks.
7
+ This module hosts functions for training and evaluating artificial neural networks for labeled classification tasks.
8
8
 
9
9
  Currently, 3 types of models can be trained:
10
10
 
@@ -48,24 +48,16 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
48
48
  """
49
49
 
50
50
  import numpy as np
51
+ import cupy as cp
51
52
  import copy
52
53
  import random
53
54
 
54
55
  ### LIBRARY IMPORTS ###
55
- from ..ui import loading_bars, initialize_loading_bar
56
- from .data_ops import normalization, batcher
57
- from .activation_functions import apply_activation, all_activations
58
- from .model_ops import get_acc, get_preds_softmax
59
- from ..memory_ops import optimize_labels
60
- from .loss_functions import categorical_crossentropy, binary_crossentropy
61
- from ..fitness_functions import wals
62
- from .visualizations import (
63
- draw_neural_web,
64
- display_visualizations_for_learner,
65
- update_history_plots_for_learner,
66
- initialize_visualization_for_learner,
67
- update_neuron_history_for_learner
68
- )
56
+ from .ui import loading_bars, initialize_loading_bar
57
+ from .cpu.activation_functions import all_activations
58
+ from .model_ops import get_acc, get_preds_softmax, get_preds
59
+ from .memory_ops import optimize_labels, transfer_to_gpu
60
+ from .fitness_functions import wals
69
61
 
70
62
  ### GLOBAL VARIABLES ###
71
63
  bar_format_normal = loading_bars()[0]
@@ -79,48 +71,62 @@ def plan_fit(
79
71
  activations=['linear'],
80
72
  W=None,
81
73
  auto_normalization=False,
74
+ cuda=False,
82
75
  dtype=np.float32
83
76
  ):
84
77
  """
85
78
  Creates a PLAN model to fitting data.
86
79
 
87
80
  Args:
88
- x_train (aray-like[num]): List or numarray of input data.
81
+ x_train (aray-like[num or cup]): List or numarray of input data.
89
82
 
90
- y_train (aray-like[num]): List or numarray of target labels. (one hot encoded)
83
+ y_train (aray-like[num or cup]): List or numarray of target labels. (one hot encoded)
91
84
 
92
85
  activations (list): For deeper PLAN networks, activation function parameters. For more information please run this code: neu.activations_list() default: [None] (optional)
93
86
 
94
- W (numpy.ndarray): If you want to re-continue or update model
87
+ W (numpy.ndarray or cupy.ndarray): If you want to re-continue or update model
95
88
 
96
89
  auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
97
90
 
91
+ cuda (bool, optional): CUDA GPU acceleration ? Default = False.
92
+
98
93
  dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
99
94
 
100
95
  Returns:
101
96
  numpyarray: (Weight matrix).
102
97
  """
103
98
 
99
+ if not cuda:
100
+ from .cpu.data_ops import normalization
101
+ from .cpu.activation_functions import apply_activation
102
+ array_type = np
103
+
104
+ else:
105
+ from cuda.data_ops import normalization
106
+ from cuda.activation_functions import apply_activation
107
+ array_type = cp
108
+
109
+
104
110
  # Pre-check
105
111
 
106
112
  if len(x_train) != len(y_train): raise ValueError("x_train and y_train must have the same length.")
107
113
 
108
- weight = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
114
+ weight = array_type.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
109
115
 
110
116
  if auto_normalization is True: x_train = normalization(apply_activation(x_train, activations))
111
117
  elif auto_normalization is False: x_train = apply_activation(x_train, activations)
112
118
  else: raise ValueError('normalization parameter only be True or False')
113
-
114
- weight += y_train.T @ x_train
119
+
120
+ weight += y_train.T @ x_train if not cuda else cp.array(y_train).T @ cp.array(x_train)
115
121
 
116
122
  return normalization(weight, dtype=dtype)
117
123
 
118
124
 
119
- def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size=1,
125
+ def learn(x_train, y_train, optimizer, template_model, gen, pop_size, fit_start=True, batch_size=1,
120
126
  weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False,
121
127
  neurons_history=False, early_stop=False, show_history=False, target_loss=None,
122
128
  interval=33.33, target_acc=None, loss='categorical_crossentropy', acc_impact=0.9, loss_impact=0.1,
123
- start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], dtype=np.float32):
129
+ start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], decision_boundary_history=False, cuda=False, dtype=np.float32):
124
130
  """
125
131
  Optimizes the activation functions for a neural network by leveraging train data to find
126
132
  the most accurate combination of activation potentiation(or activation function) & weight values for the labeled classificaiton dataset.
@@ -135,18 +141,18 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
135
141
  Examples:
136
142
 
137
143
  This creates a PLAN model:
138
- - ```learn(x_train, y_train, optimizer, pop_size=100, gen=100, fit_start=True) ```
144
+ - ```model = learn(x_train, y_train, optimizer, template_model, pop_size=100, gen=100, fit_start=True) ```
139
145
 
140
146
  This creates a MLP model(with 2 hidden layer):
141
- - ```learn(x_train, y_train, optimizer, pop_size=100, gen=100, fit_start=False, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
147
+ - ```model = learn(x_train, y_train, optimizer, template_model, pop_size=100, gen=100, fit_start=False, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
142
148
 
143
149
  This creates a PTNN model(with 2 hidden layer & 1 aggregation layer(comes with PLAN)):
144
- - ```learn(x_train, y_train, optimizer, pop_size=100, gen=[10, 100], fit_start=True, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
150
+ - ```model = learn(x_train, y_train, optimizer, template_model, pop_size=100, gen=[10, 100], fit_start=True, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
145
151
 
146
152
  :Args:
147
153
  :param x_train: (array-like): Training input data.
148
154
  :param y_train: (array-like): Labels for training data. one-hot encoded.
149
- :param optimizer: (function): Optimization technique with hyperparameters. (PLAN, MLP & PTNN (all) using ENE for optimization. Gradient based technique's will added in the future.) Please use this: from pyerualjetwork.cpu.ene import evolver (and) optimizer = lambda *args, **kwargs: evolver(*args, 'here give your hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
155
+ :param optimizer: (function): Optimization technique with hyperparameters. (PLAN, MLP & PTNN (all) using ENE for optimization. Gradient based technique's will added in the future.) Please use this: from pyerualjetwork.ene import evolver (and) optimizer = lambda *args, **kwargs: evolver(*args, 'here give your hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
150
156
  ```python
151
157
  optimizer = lambda *args, **kwargs: ene.evolver(*args,
152
158
  activation_add_prob=0.05,
@@ -163,6 +169,7 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
163
169
  batch_size=0.05,
164
170
  interval=16.67)
165
171
  ```
172
+ :param template_model: (tuple): Use --> get_model_template() function in the model_ops module.
166
173
  :param fit_start: (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Additonaly if you want to train PTNN model you must be give True. Options: True or False. Default: True
167
174
  :param gen: (int or list): The generation count for genetic optimization. If you want to train PTNN model you must give a list of two number. First number for PLAN model training second number for MLP.
168
175
  :param batch_size: (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire train set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents %8 of the train set. Default is 1. (%100 of train)
@@ -183,29 +190,56 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
183
190
  :param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
184
191
  :param start_this_W: (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
185
192
  :param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP or PTNN. Number of elements --> Layer count. Default: [] (No hidden layer) --> architecture setted to PLAN, if not --> architecture setted to MLP.
193
+ :param decision_boundary_history: (bool, optional): At the end of the training, the decision boundary history is shown in animation form. Note: If you are sure of your memory, set it to True. Default: False
186
194
  :param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP or PTNN. if neurons is not [] --> uses default: ['linear'] * len(neurons). if neurons is [] --> uses [].
187
195
  :param dtype: (numpy.dtype): Data type for the Weight matrices. np.float32 by default. Example: np.float64 or np.float16.
196
+ :param cuda: (bool, optional): CUDA GPU acceleration ? Default = False.
188
197
 
189
198
  Returns:
190
199
  tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]].
191
200
  """
192
201
 
193
202
  from .ene import define_genomes
194
-
203
+ from .cpu.visualizations import display_decision_boundary_history, create_decision_boundary_hist, plot_decision_boundary
204
+
205
+ if cuda is False:
206
+ from .cpu.data_ops import batcher
207
+ from .cpu.loss_functions import categorical_crossentropy, binary_crossentropy
208
+ from .cpu.visualizations import (
209
+ draw_neural_web,
210
+ display_visualizations_for_learner,
211
+ update_history_plots_for_learner,
212
+ initialize_visualization_for_learner,
213
+ update_neuron_history_for_learner)
214
+
215
+ else:
216
+ from cuda.data_ops import batcher
217
+ from cuda.loss_functions import categorical_crossentropy, binary_crossentropy
218
+ from cuda.visualizations import (
219
+ draw_neural_web,
220
+ display_visualizations_for_learner,
221
+ update_history_plots_for_learner,
222
+ initialize_visualization_for_learner,
223
+ update_neuron_history_for_learner)
224
+
195
225
  data = 'Train'
196
226
 
197
227
  except_this = ['spiral', 'circular']
198
228
  activations = [item for item in all_activations() if item not in except_this]
199
229
  activations_len = len(activations)
200
-
230
+
201
231
  # Pre-checks
202
232
 
233
+ if cuda:
234
+ x_train = transfer_to_gpu(x_train, dtype=dtype)
235
+ y_train = transfer_to_gpu(y_train, dtype=dtype)
236
+
203
237
  if pop_size > activations_len and fit_start is True:
204
238
  for _ in range(pop_size - len(activations)):
205
239
  random_index_all_act = random.randint(0, len(activations)-1)
206
240
  activations.append(activations[random_index_all_act])
207
241
 
208
- y_train = optimize_labels(y_train, cuda=False)
242
+ y_train = optimize_labels(y_train, cuda=cuda)
209
243
 
210
244
  if pop_size < activations_len: raise ValueError(f"pop_size must be higher or equal to {activations_len}")
211
245
 
@@ -328,15 +362,15 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
328
362
  else:
329
363
 
330
364
  act_pop[j] = activations[j]
331
- W = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
365
+ W = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], cuda=cuda, auto_normalization=auto_normalization, dtype=dtype)
332
366
  weight_pop[j] = W
333
367
 
334
368
 
335
369
  if weight_evolve is False:
336
- weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
370
+ weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], cuda=cuda, auto_normalization=auto_normalization, dtype=dtype)
337
371
 
338
372
 
339
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], activation_potentiations=activation_potentiations[j], auto_normalization=auto_normalization, model_type=model_type)
373
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], activation_potentiations=activation_potentiations[j], auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
340
374
  acc = model[get_acc()]
341
375
 
342
376
  if loss == 'categorical_crossentropy':
@@ -347,8 +381,8 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
347
381
  y_pred_batch=model[get_preds_softmax()])
348
382
 
349
383
  fitness = wals(acc, train_loss, acc_impact, loss_impact)
350
- target_pop.append(fitness)
351
-
384
+ target_pop.append(fitness if not cuda else fitness.get())
385
+
352
386
  if fitness >= best_fitness:
353
387
 
354
388
  best_fitness = fitness
@@ -399,7 +433,7 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
399
433
  if target_acc is not None and best_acc >= target_acc:
400
434
  progress.close()
401
435
  train_model = evaluate(x_train, y_train, W=best_weight,
402
- activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, model_type=model_type)
436
+ activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
403
437
  if loss == 'categorical_crossentropy':
404
438
  train_loss = categorical_crossentropy(y_true_batch=y_train,
405
439
  y_pred_batch=train_model[get_preds_softmax()])
@@ -413,15 +447,32 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
413
447
  print('Train Loss: ', train_loss, '\n')
414
448
  print('Model Type:', model_type)
415
449
 
450
+ if decision_boundary_history: display_decision_boundary_history(fig, artist, interval=interval)
451
+
416
452
  display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
417
453
  best_loss, y_train, interval)
418
- return best_weight, best_model[get_preds_softmax()], best_acc, final_activations, None, None, model_type, None, None, None, None, activation_potentiation
454
+ model = template_model(
455
+ best_weight,
456
+ best_model[get_preds()],
457
+ best_acc,
458
+ final_activations,
459
+ None,
460
+ best_model[get_preds_softmax()],
461
+ model_type,
462
+ None,
463
+ None,
464
+ None,
465
+ None,
466
+ activation_potentiation
467
+ )
468
+
469
+ return model
419
470
 
420
471
  # Check target loss
421
472
  if target_loss is not None and best_loss <= target_loss:
422
473
  progress.close()
423
474
  train_model = evaluate(x_train, y_train, W=best_weight,
424
- activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, model_type=model_type)
475
+ activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
425
476
 
426
477
  if loss == 'categorical_crossentropy':
427
478
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -436,16 +487,39 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
436
487
  print('Train Loss: ', train_loss, '\n')
437
488
  print('Model Type:', model_type)
438
489
 
490
+ if decision_boundary_history: display_decision_boundary_history(fig, artist, interval=interval)
491
+
439
492
  # Display final visualizations
440
493
  display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
441
494
  train_loss, y_train, interval)
442
- return best_weight, best_model[get_preds_softmax()], best_acc, final_activations, None, None, model_type, None, None, None, None, activation_potentiation
495
+ model = template_model(
496
+ best_weight,
497
+ best_model[get_preds()],
498
+ best_acc,
499
+ final_activations,
500
+ None,
501
+ best_model[get_preds_softmax()],
502
+ model_type,
503
+ None,
504
+ None,
505
+ None,
506
+ None,
507
+ activation_potentiation
508
+ )
509
+
510
+ return model
443
511
 
444
512
 
445
513
  progress.update(1)
446
514
 
515
+ if decision_boundary_history:
516
+ if i == 0:
517
+ fig, ax = create_decision_boundary_hist()
518
+ artist = []
519
+ artist = plot_decision_boundary(x_train_batch, y_train_batch, activations=act_pop[np.argmax(target_pop)], W=weight_pop[np.argmax(target_pop)], cuda=cuda, model_type=model_type, ax=ax, artist=artist)
520
+
447
521
  if batch_size != 1:
448
- train_model = evaluate(x_train, y_train, W=best_weight, activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, model_type=model_type)
522
+ train_model = evaluate(x_train, y_train, W=best_weight, activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
449
523
 
450
524
  if loss == 'categorical_crossentropy':
451
525
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -476,7 +550,7 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
476
550
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
477
551
  progress.close()
478
552
  train_model = evaluate(x_train, y_train, W=best_weight,
479
- activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, model_type=model_type)
553
+ activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
480
554
 
481
555
  if loss == 'categorical_crossentropy':
482
556
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -491,16 +565,33 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
491
565
  print('Train Loss: ', train_loss, '\n')
492
566
  print('Model Type:', model_type)
493
567
 
568
+ if decision_boundary_history: display_decision_boundary_history(fig, artist, interval=interval)
569
+
494
570
  # Display final visualizations
495
571
  display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
496
572
  train_loss, y_train, interval)
497
- return best_weight, best_model[get_preds_softmax()], best_acc, final_activations, None, None, model_type, None, None, None, None, activation_potentiation
573
+ model = template_model(
574
+ best_weight,
575
+ best_model[get_preds()],
576
+ best_acc,
577
+ final_activations,
578
+ None,
579
+ best_model[get_preds_softmax()],
580
+ model_type,
581
+ None,
582
+ None,
583
+ None,
584
+ None,
585
+ activation_potentiation
586
+ )
587
+
588
+ return model
498
589
 
499
590
  # Final evaluation
500
591
  progress.close()
501
592
 
502
593
  train_model = evaluate(x_train, y_train, W=best_weight,
503
- activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, model_type=model_type)
594
+ activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
504
595
 
505
596
  if loss == 'categorical_crossentropy':
506
597
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -509,25 +600,47 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
509
600
  train_loss = binary_crossentropy(y_true_batch=y_train,
510
601
  y_pred_batch=train_model[get_preds_softmax()])
511
602
 
603
+
512
604
  print('\nActivations: ', final_activations)
513
605
  print('Activation Potentiation: ', activation_potentiation)
514
606
  print('Train Accuracy:', train_model[get_acc()])
515
607
  print('Train Loss: ', train_loss, '\n')
516
608
  print('Model Type:', model_type)
517
609
 
610
+ if decision_boundary_history: display_decision_boundary_history(fig, artist, interval=interval)
611
+
518
612
  # Display final visualizations
519
613
  display_visualizations_for_learner(viz_objects, best_weight, data, best_acc, train_loss, y_train, interval)
520
- return best_weight, best_model[get_preds_softmax()], best_acc, final_activations, None, None, model_type, None, None, None, None, activation_potentiation
614
+
615
+ model = template_model(
616
+ best_weight,
617
+ best_model[get_preds()],
618
+ best_acc,
619
+ final_activations,
620
+ None,
621
+ best_model[get_preds_softmax()],
622
+ model_type,
623
+ None,
624
+ None,
625
+ None,
626
+ None,
627
+ activation_potentiation
628
+ )
629
+
630
+ return model
521
631
 
522
632
 
523
633
  def evaluate(
524
634
  x_test,
525
635
  y_test,
526
- model_type,
527
- W,
636
+ model_type=None,
637
+ W=None,
638
+ model=None,
528
639
  activations=['linear'],
529
640
  activation_potentiations=[],
530
- auto_normalization=False
641
+ auto_normalization=False,
642
+ show_report=False,
643
+ cuda=False
531
644
  ) -> tuple:
532
645
  """
533
646
  Evaluates the neural network model using the given test data.
@@ -537,61 +650,73 @@ def evaluate(
537
650
 
538
651
  y_test (np.ndarray): Test labels (one-hot encoded).
539
652
 
540
- model_type: (str): Type of the model. Options: 'PLAN', 'MLP', 'PTNN'.
653
+ model_type: (str, optional): Type of the model. Options: 'PLAN', 'MLP', 'PTNN'.
541
654
 
542
- W (np.ndarray): Neural net weight matrix.
655
+ W (array-like, optional): Neural net weight matrix.
543
656
 
657
+ model (tuple, optional): Trained model.
658
+
544
659
  activations (list, optional): Activation list for PLAN or MLP models (MLP layers activations if it PTNN model). Default = ['linear'].
545
660
 
546
661
  activation_potentiations (list, optional): Extra activation potentiation list (PLAN layers activations) for PTNN models. Default = [].
547
662
 
548
663
  auto_normalization (bool, optional): Normalization for x_test ? Default = False.
549
664
 
665
+ show_report (bool, optional): show test report. Default = False
666
+
667
+ cuda (bool, optional): CUDA GPU acceleration ? Default = False.
668
+
550
669
  Returns:
551
670
  tuple: Model (list).
552
671
  """
553
672
 
554
- if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
673
+ from .cpu.visualizations import plot_evaluate
674
+ from .model_ops import predict_from_memory
555
675
 
556
- if isinstance(activations, str):
557
- activations = [activations]
558
- elif isinstance(activations, list):
559
- activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
560
-
561
- if model_type == 'MLP':
562
- layer = x_test
563
- for i in range(len(W)):
564
- if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
565
-
566
- layer = layer @ W[i].T
567
-
568
- result = layer
569
-
570
- if model_type == 'PLAN':
676
+ if not cuda:
677
+ from .cpu.data_ops import normalization
678
+
679
+ if model:
680
+ sample_acc = model.accuracy
681
+ if hasattr(sample_acc, "get"): model = model._replace(accuracy=sample_acc.get())
571
682
 
572
- x_test = apply_activation(x_test, activations)
573
- result = x_test @ W.T
683
+ else:
684
+ from cuda.data_ops import normalization
685
+ x_test = cp.array(x_test)
686
+ y_test = cp.array(y_test)
687
+
688
+ if model:
689
+ sample_acc = model.accuracy
690
+ if isinstance(sample_acc, np.number): model = model._replace(accuracy=cp.array(sample_acc))
691
+
692
+ if model is None:
693
+ from .model_ops import get_model_template
694
+ template_model = get_model_template()
695
+ model = template_model(
696
+ W,
697
+ None,
698
+ None,
699
+ activations,
700
+ None,
701
+ None,
702
+ model_type,
703
+ None,
704
+ None,
705
+ None,
706
+ None,
707
+ activation_potentiations)
574
708
 
575
- if model_type == 'PTNN':
709
+ result = predict_from_memory(x_test, model, cuda=cuda)
576
710
 
577
- if isinstance(activation_potentiations, str):
578
- activation_potentiations = [activation_potentiations]
579
- elif isinstance(activation_potentiations, list):
580
- activation_potentiations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiations]
581
-
582
- x_test = apply_activation(x_test, activation_potentiations)
583
- layer = x_test @ W[0].T
711
+ if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
584
712
 
585
- for i in range(1, len(W)):
586
- if i != len(W) - 1: layer = apply_activation(layer, activations[i])
713
+ array_type = cp if cuda else np
587
714
 
588
- layer = layer @ W[i].T
715
+ max_vals = array_type.max(result, axis=1, keepdims=True)
716
+
717
+ softmax_preds = array_type.exp(result - max_vals) / array_type.sum(array_type.exp(result - max_vals), axis=1, keepdims=True)
718
+ accuracy = (array_type.argmax(softmax_preds, axis=1) == array_type.argmax(y_test, axis=1)).mean()
589
719
 
590
- result = layer
720
+ if show_report: plot_evaluate(x_test, y_test, result, model=model, cuda=cuda)
591
721
 
592
- max_vals = np.max(result, axis=1, keepdims=True)
593
-
594
- softmax_preds = np.exp(result - max_vals) / np.sum(np.exp(result - max_vals), axis=1, keepdims=True)
595
- accuracy = (np.argmax(softmax_preds, axis=1) == np.argmax(y_test, axis=1)).mean()
596
-
597
722
  return W, result, accuracy, None, None, softmax_preds
@@ -327,7 +327,7 @@ def load_model(model_name,
327
327
  lists: Weights, None, test_accuracy, activations, scaler_params, None, model_type, weight_type, weight_format, device_version, (list[df_elements])=Pandas DataFrame of the model
328
328
  """
329
329
 
330
- from .. import __version__
330
+ from . import __version__
331
331
 
332
332
  try:
333
333
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 5.37
3
+ Version: 5.40a0
4
4
  Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -28,8 +28,9 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
28
28
  'pip install pyerualjetwork'
29
29
 
30
30
  package modules:
31
- 'from pyerualjetwork.cpu import nn, ene, data_ops, model_ops, memory_ops'
32
- 'from pyerualjetwork.cuda import nn, ene, data_ops, model_ops, memory_ops'
31
+ 'from pyerualjetwork import nn, ene, model_ops, memory_ops'
32
+ 'from pyerualjetwork.cpu data_ops'
33
+ 'from pyerualjetwork.cuda data_ops'
33
34
 
34
35
  please read docstrings.
35
36
 
@@ -94,13 +95,13 @@ You can create artificial intelligence models that perform natural language proc
94
95
 
95
96
  ![PLAN VISION](https://github.com/HCB06/PyerualJetwork/blob/main/Media/NLP.gif)
96
97
 
97
- PLANEAT:<br>
98
+ ENE:<br>
98
99
 
99
100
  You can create artificial intelligence models that perform reinforcement learning tasks and genetic optimization tasks using the ene module:
100
101
 
101
- ![PLANEAT](https://github.com/HCB06/PyerualJetwork/blob/main/Media/PLANEAT_1.gif)<br>
102
- ![PLANEAT](https://github.com/HCB06/PyerualJetwork/blob/main/Media/PLANEAT_2.gif)<br>
103
- ![PLANEAT](https://github.com/HCB06/PyerualJetwork/blob/main/Media/mario.gif)<br><br>
102
+ ![ENE](https://github.com/HCB06/PyerualJetwork/blob/main/Media/PLANEAT_1.gif)<br>
103
+ ![ENE](https://github.com/HCB06/PyerualJetwork/blob/main/Media/PLANEAT_2.gif)<br>
104
+ ![ENE](https://github.com/HCB06/PyerualJetwork/blob/main/Media/mario.gif)<br><br>
104
105
 
105
106
  YOU CAN CREATE DYNAMIC ANIMATIONS OF YOUR MODELS
106
107
 
@@ -0,0 +1,27 @@
1
+ pyerualjetwork/__init__.py,sha256=rfo3H-CvlOk1oZmfsHKvg3DXIz6xUvH74WzM2_F9wmE,2798
2
+ pyerualjetwork/ene.py,sha256=8g2XEPRm3NLqSaN7xihIj1xXdIjSrl8Q69zqWTIXPI4,42142
3
+ pyerualjetwork/fitness_functions.py,sha256=D9JVCr9DFid_xXgBD4uCKxdW2k10MVDE5HZRSOK4Igg,1237
4
+ pyerualjetwork/help.py,sha256=sn9jBzXkQsTZvdgsUXUpSs_BbYYIgY3whofg6dj8peI,848
5
+ pyerualjetwork/issue_solver.py,sha256=W7DIetvWD535bjJ4JueTUmWRFceqr5B_U1tFjv0S7f0,4100
6
+ pyerualjetwork/memory_ops.py,sha256=TUFh9SYWCKL6N-vNdWId_EwU313TuZomQCHOrltrD-4,14280
7
+ pyerualjetwork/model_ops.py,sha256=sGPvgWFwLI5dVV4cRVqeA7Zc0NTiz8F-3v55UbiV9_k,22547
8
+ pyerualjetwork/nn.py,sha256=GXTAoFjPxxiS2r0rMcr5DEb7t2aJXTYW8KXvDYaoltY,36277
9
+ pyerualjetwork/old_cpu_model_ops.py,sha256=1KNgjUeYCO_TsA5RtbNiuIiBJzq8-rL2dE6jxKqCBU0,21481
10
+ pyerualjetwork/old_cuda_model_ops.py,sha256=KAscAd8e_I8Vqdd9BJaHd6-IG6fhxFglAFxys0sqmEo,23079
11
+ pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
12
+ pyerualjetwork/cpu/__init__.py,sha256=9apRbPqvGKLJwyI3Md6R5a478YbZ7kIq0dRRa_lqgrY,789
13
+ pyerualjetwork/cpu/activation_functions.py,sha256=zZSoOQ452Ykp_RsHVxklxesJmmFgufyIB4F3WQjudEQ,6689
14
+ pyerualjetwork/cpu/data_ops.py,sha256=SPsIcjU0JPHfsnEmGjD8q-yTlpgYk-KPOPJ44dfp-nU,16143
15
+ pyerualjetwork/cpu/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
16
+ pyerualjetwork/cpu/metrics.py,sha256=NF8FARAqtuGlf4omVkQT4pOQZy7uePqzuHZGX9Y_Pn4,6076
17
+ pyerualjetwork/cpu/visualizations.py,sha256=vaXhRlmxmr-R75UhgaoRgaYiMJPZ8prU229l3z3RF7I,27146
18
+ pyerualjetwork/cuda/__init__.py,sha256=Kja6OmNaJ0giOhRNYw9hgGkh5N4F1EUS2v94E_rmp2k,839
19
+ pyerualjetwork/cuda/activation_functions.py,sha256=Gj-qalU0GoAWoZzbFFHsD-R0c0KzHwOK1wwUQneBE44,6872
20
+ pyerualjetwork/cuda/data_ops.py,sha256=k7NX-ckZ6-NwvioigACUHrekG7L5lO4bzTtQbBwH1Fc,18508
21
+ pyerualjetwork/cuda/loss_functions.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
22
+ pyerualjetwork/cuda/metrics.py,sha256=PjDBoRvr6va8vRvDIJJGBO4-I4uumrk3NCM1Vz4NJTo,5054
23
+ pyerualjetwork/cuda/visualizations.py,sha256=2mHE7iqqsN3K6xtCnemS4o_YWGS0bIV2IxF4cG6Ur9k,20090
24
+ pyerualjetwork-5.40a0.dist-info/METADATA,sha256=_SsE18Nu7P9uRMOU8L3coDcJA-ltYDexmb0z_ugZuR8,7990
25
+ pyerualjetwork-5.40a0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
26
+ pyerualjetwork-5.40a0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
27
+ pyerualjetwork-5.40a0.dist-info/RECORD,,