pyerualjetwork 5.54a4__py3-none-any.whl → 5.55__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -42,7 +42,7 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
42
42
  - Contact: tchasancan@gmail.com
43
43
  """
44
44
 
45
- __version__ = "5.54a4"
45
+ __version__ = "5.55"
46
46
  __update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
47
47
  * PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
48
48
  * PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
pyerualjetwork/nn.py CHANGED
@@ -23,7 +23,7 @@ Currently, 3 types of models can be trained:
23
23
  PTNN (Potentiation Transfer Neural Network) -- With non-bias
24
24
  * Training Time for Small Projects: fast
25
25
  * Training Time for Big Projects: fast
26
- * Explainability: low
26
+ * Explainability: medium
27
27
  * Learning Capacity: high
28
28
 
29
29
  Read learn function docstring for know how to use of these model architectures.
@@ -100,7 +100,7 @@ def plan_fit(
100
100
 
101
101
  from .cpu.data_ops import normalization
102
102
  if not cuda:
103
- from cpu.activation_functions import apply_activation
103
+ from .cpu.activation_functions import apply_activation
104
104
  array_type = np
105
105
 
106
106
  else:
@@ -127,12 +127,13 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
127
127
  weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False,
128
128
  neurons_history=False, early_stop=False, show_history=False, target_loss=None,
129
129
  interval=33.33, target_acc=None, loss='categorical_crossentropy', acc_impact=0.9, loss_impact=0.1,
130
- start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], parallel_training=True, decision_boundary_history=False, cuda=False, dtype=np.float32):
130
+ start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], parallel_training=False,
131
+ thread_count=cpu_count(), decision_boundary_history=False, cuda=False, dtype=np.float32):
131
132
  """
132
133
  Optimizes the activation functions for a neural network by leveraging train data to find
133
134
  the most accurate combination of activation potentiation(or activation function) & weight values for the labeled classificaiton dataset.
134
135
 
135
- Why genetic optimization ENE(Eugenic NeuroEvolution) and not backpropagation?
136
+ Why genetic optimization FBN(Fitness Biased NeuroEvolution) and not backpropagation?
136
137
  Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
137
138
  Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: ENE.
138
139
 
@@ -193,8 +194,11 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
193
194
  :param decision_boundary_history: (bool, optional): At the end of the training, the decision boundary history is shown in animation form. Note: If you are sure of your memory, set it to True. Default: False
194
195
  :param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP or PTNN. if neurons is not [] --> uses default: ['linear'] * len(neurons). if neurons is [] --> uses [].
195
196
  :param dtype: (numpy.dtype): Data type for the Weight matrices. np.float32 by default. Example: np.float64 or np.float16.
196
- :param parallel_training: (bool, optional): Parallel training process ? (automaticly selects maximum thread count of your system) Default = True.
197
+ :param parallel_training: (bool, optional): Parallel training process ? Default = False.
198
+ - Recommended for large populations (pop_size) to significantly speed up training.
199
+ :param thread_count: (int, optional): If parallel_training is True you can give max thread number for parallel training. Default: (automaticly selects maximum thread count of your system).
197
200
  :param cuda: (bool, optional): CUDA GPU acceleration ? Default = False.
201
+ - Recommended for large neural net architectures or big datasets to significantly speed up training.
198
202
 
199
203
  Returns:
200
204
  tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]].
@@ -367,7 +371,7 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
367
371
  eval_params = []
368
372
  fit_params = []
369
373
 
370
- with Pool(processes=cpu_count()) as pool:
374
+ with Pool(processes=thread_count) as pool:
371
375
 
372
376
  batches = pool.starmap(batcher, [(x_train, y_train)] * pop_size)
373
377
 
@@ -441,27 +445,6 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
441
445
  if show_current_activations:
442
446
  print(f", Current Activations={final_activations}", end='')
443
447
 
444
- # Update visualizations during training
445
- if show_history:
446
- gen_list = range(1, len(best_acc_per_gen_list) + 2)
447
- update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
448
- best_acc_per_gen_list + [best_acc], x_train, final_activations)
449
-
450
- if neurons_history:
451
- viz_objects['neurons']['artists'] = (
452
- update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
453
- viz_objects['neurons']['row'], viz_objects['neurons']['col'],
454
- y_train[0], viz_objects['neurons']['artists'],
455
- data=data, fig1=viz_objects['neurons']['fig'],
456
- acc=best_acc, loss=train_loss)
457
- )
458
-
459
- if neural_web_history:
460
- art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
461
- G=viz_objects['web']['G'], return_objs=True)
462
- art5_list = [art5_1] + [art5_2] + list(art5_3.values())
463
- viz_objects['web']['artists'].append(art5_list)
464
-
465
448
  progress.update(1)
466
449
 
467
450
  else:
@@ -522,27 +505,6 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
522
505
  if show_current_activations:
523
506
  print(f", Current Activations={final_activations}", end='')
524
507
 
525
- # Update visualizations during training
526
- if show_history:
527
- gen_list = range(1, len(best_acc_per_gen_list) + 2)
528
- update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
529
- best_acc_per_gen_list + [best_acc], x_train, final_activations)
530
-
531
- if neurons_history:
532
- viz_objects['neurons']['artists'] = (
533
- update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
534
- viz_objects['neurons']['row'], viz_objects['neurons']['col'],
535
- y_train[0], viz_objects['neurons']['artists'],
536
- data=data, fig1=viz_objects['neurons']['fig'],
537
- acc=best_acc, loss=train_loss)
538
- )
539
-
540
- if neural_web_history:
541
- art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
542
- G=viz_objects['web']['G'], return_objs=True)
543
- art5_list = [art5_1] + [art5_2] + list(art5_3.values())
544
- viz_objects['web']['artists'].append(art5_list)
545
-
546
508
  # Check target accuracy
547
509
  if target_acc is not None and best_acc >= target_acc:
548
510
  progress.close()
@@ -611,9 +573,31 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
611
573
 
612
574
  return model
613
575
 
614
-
576
+
615
577
  progress.update(1)
616
578
 
579
+ # Update visualizations during training
580
+ if show_history:
581
+ gen_list = range(1, len(best_acc_per_gen_list) + 2)
582
+ update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
583
+ best_acc_per_gen_list + [best_acc], x_train, final_activations)
584
+
585
+ if neurons_history:
586
+ viz_objects['neurons']['artists'] = (
587
+ update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
588
+ viz_objects['neurons']['row'], viz_objects['neurons']['col'],
589
+ y_train[0], viz_objects['neurons']['artists'],
590
+ data=data, fig1=viz_objects['neurons']['fig'],
591
+ acc=best_acc, loss=train_loss)
592
+ )
593
+
594
+ if neural_web_history:
595
+ art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
596
+ G=viz_objects['web']['G'], return_objs=True)
597
+ art5_list = [art5_1] + [art5_2] + list(art5_3.values())
598
+ viz_objects['web']['artists'].append(art5_list)
599
+
600
+
617
601
  if decision_boundary_history:
618
602
  if i == 0:
619
603
  fig, ax = create_decision_boundary_hist()
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 5.54a4
4
- Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
3
+ Version: 5.55
4
+ Summary: PyereualJetwork is a GPU-accelerated + Parallel Threading Supported machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classification,potentiation learning artificial neural networks,NEAT,genetic algorithms,reinforcement learning,neural networks
@@ -61,7 +61,7 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
61
61
 
62
62
  ABOUT PYERUALJETWORK:
63
63
 
64
- PyereualJetwork is a large wide GPU-accelerated machine learning library in Python designed for professionals and researchers.
64
+ PyereualJetwork is a large wide GPU-accelerated + Parallel Threading Supported machine learning library in Python designed for professionals and researchers.
65
65
  It features PLAN, MLP Deep Learning and PTNN training, as well as ENE (Eugenic NeuroEvolution) for genetic optimization,
66
66
  which can also be applied to genetic algorithms or Reinforcement Learning (RL) problems.
67
67
  The library includes functions for data pre-processing, visualizations, model saving and loading, prediction and evaluation,
@@ -1,11 +1,11 @@
1
- pyerualjetwork/__init__.py,sha256=d7tzdeWje0FkOPL_9la8YzHcPrBoROMM9kZgLt26X-w,3089
1
+ pyerualjetwork/__init__.py,sha256=j2PJ8ap2XNId9eE4qknm8J4A7HrmCfw_r499HGMYSI4,3087
2
2
  pyerualjetwork/ene.py,sha256=luTvspHRTose6s3uRas40pNXyKoxU9siaHiMBNI5yoc,42136
3
3
  pyerualjetwork/fitness_functions.py,sha256=D9JVCr9DFid_xXgBD4uCKxdW2k10MVDE5HZRSOK4Igg,1237
4
4
  pyerualjetwork/help.py,sha256=sn9jBzXkQsTZvdgsUXUpSs_BbYYIgY3whofg6dj8peI,848
5
5
  pyerualjetwork/issue_solver.py,sha256=uay_9XK6xWnLmK2P_BeyDQlyNXzg_zYffnXYd228wZk,4102
6
6
  pyerualjetwork/memory_ops.py,sha256=TUFh9SYWCKL6N-vNdWId_EwU313TuZomQCHOrltrD-4,14280
7
7
  pyerualjetwork/model_ops.py,sha256=WaP1XwKqXMfZl4Yop8a1Bg0xtmLYgap9JFOWHaLr7S4,25143
8
- pyerualjetwork/nn.py,sha256=EpYN3jCtYNfNCCPX0BN8JaL42JrkxLiopQkL6_j1Hio,42513
8
+ pyerualjetwork/nn.py,sha256=nA26datlq7QMPm9BgpneivLlV2xHKzAMK4toYZeBChA,41409
9
9
  pyerualjetwork/old_cpu_model_ops.py,sha256=1KNgjUeYCO_TsA5RtbNiuIiBJzq8-rL2dE6jxKqCBU0,21481
10
10
  pyerualjetwork/old_cuda_model_ops.py,sha256=KAscAd8e_I8Vqdd9BJaHd6-IG6fhxFglAFxys0sqmEo,23079
11
11
  pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
@@ -21,7 +21,7 @@ pyerualjetwork/cuda/data_ops.py,sha256=BEXh4M7BWXaTpYlVS9D2i3CGgOmL5131vy7FZyuTQ
21
21
  pyerualjetwork/cuda/loss_functions.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
22
22
  pyerualjetwork/cuda/metrics.py,sha256=PjDBoRvr6va8vRvDIJJGBO4-I4uumrk3NCM1Vz4NJTo,5054
23
23
  pyerualjetwork/cuda/visualizations.py,sha256=2mHE7iqqsN3K6xtCnemS4o_YWGS0bIV2IxF4cG6Ur9k,20090
24
- pyerualjetwork-5.54a4.dist-info/METADATA,sha256=lgXzJZWTOmS-29lcI2QENAxRR-zXkP4GTVt8VdvzG7A,7990
25
- pyerualjetwork-5.54a4.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
26
- pyerualjetwork-5.54a4.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
27
- pyerualjetwork-5.54a4.dist-info/RECORD,,
24
+ pyerualjetwork-5.55.dist-info/METADATA,sha256=Y_ya6IN7QEh2EAPa34LalZccl-Gsiesh2JK-p3V28WE,8050
25
+ pyerualjetwork-5.55.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
26
+ pyerualjetwork-5.55.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
27
+ pyerualjetwork-5.55.dist-info/RECORD,,