pyerualjetwork 4.3.7.dev1__tar.gz → 4.3.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/PKG-INFO +2 -19
  2. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/README.md +1 -18
  3. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/pyerualjetwork/__init__.py +1 -1
  4. pyerualjetwork-4.3.8/pyerualjetwork/parallel.py +118 -0
  5. {pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner → pyerualjetwork-4.3.8/pyerualjetwork}/plan.py +18 -11
  6. {pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner → pyerualjetwork-4.3.8/pyerualjetwork}/plan_cuda.py +18 -13
  7. {pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner → pyerualjetwork-4.3.8/pyerualjetwork}/planeat_cuda.py +2 -1
  8. {pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner → pyerualjetwork-4.3.8/pyerualjetwork}/visualizations.py +27 -24
  9. {pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner → pyerualjetwork-4.3.8/pyerualjetwork}/visualizations_cuda.py +20 -22
  10. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/pyerualjetwork.egg-info/PKG-INFO +2 -19
  11. pyerualjetwork-4.3.8/pyerualjetwork.egg-info/SOURCES.txt +28 -0
  12. pyerualjetwork-4.3.8/pyerualjetwork.egg-info/top_level.txt +1 -0
  13. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/setup.py +1 -1
  14. pyerualjetwork-4.3.7.dev1/pyerualjetwork/activation_functions.py +0 -290
  15. pyerualjetwork-4.3.7.dev1/pyerualjetwork/activation_functions_cuda.py +0 -340
  16. pyerualjetwork-4.3.7.dev1/pyerualjetwork/data_operations_cuda.py +0 -461
  17. pyerualjetwork-4.3.7.dev1/pyerualjetwork/model_operations.py +0 -408
  18. pyerualjetwork-4.3.7.dev1/pyerualjetwork/model_operations_cuda.py +0 -421
  19. pyerualjetwork-4.3.7.dev1/pyerualjetwork/plan.py +0 -627
  20. pyerualjetwork-4.3.7.dev1/pyerualjetwork/plan_cuda.py +0 -648
  21. pyerualjetwork-4.3.7.dev1/pyerualjetwork/planeat.py +0 -825
  22. pyerualjetwork-4.3.7.dev1/pyerualjetwork/planeat_cuda.py +0 -834
  23. pyerualjetwork-4.3.7.dev1/pyerualjetwork/visualizations.py +0 -823
  24. pyerualjetwork-4.3.7.dev1/pyerualjetwork/visualizations_cuda.py +0 -825
  25. pyerualjetwork-4.3.7.dev1/pyerualjetwork.egg-info/SOURCES.txt +0 -47
  26. pyerualjetwork-4.3.7.dev1/pyerualjetwork.egg-info/top_level.txt +0 -2
  27. pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner/__init__.py +0 -11
  28. pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner/data_operations.py +0 -406
  29. pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner/help.py +0 -17
  30. pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner/loss_functions.py +0 -21
  31. pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner/loss_functions_cuda.py +0 -21
  32. pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner/memory_operations.py +0 -298
  33. pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner/metrics.py +0 -190
  34. pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner/metrics_cuda.py +0 -163
  35. pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner/ui.py +0 -22
  36. {pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner → pyerualjetwork-4.3.8/pyerualjetwork}/activation_functions.py +0 -0
  37. {pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner → pyerualjetwork-4.3.8/pyerualjetwork}/activation_functions_cuda.py +0 -0
  38. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/pyerualjetwork/data_operations.py +0 -0
  39. {pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner → pyerualjetwork-4.3.8/pyerualjetwork}/data_operations_cuda.py +0 -0
  40. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/pyerualjetwork/help.py +0 -0
  41. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/pyerualjetwork/loss_functions.py +0 -0
  42. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/pyerualjetwork/loss_functions_cuda.py +0 -0
  43. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/pyerualjetwork/memory_operations.py +0 -0
  44. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/pyerualjetwork/metrics.py +0 -0
  45. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/pyerualjetwork/metrics_cuda.py +0 -0
  46. {pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner → pyerualjetwork-4.3.8/pyerualjetwork}/model_operations.py +0 -0
  47. {pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner → pyerualjetwork-4.3.8/pyerualjetwork}/model_operations_cuda.py +0 -0
  48. {pyerualjetwork-4.3.7.dev1/pyerualjetwork_afterburner → pyerualjetwork-4.3.8/pyerualjetwork}/planeat.py +0 -0
  49. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/pyerualjetwork/ui.py +0 -0
  50. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
  51. {pyerualjetwork-4.3.7.dev1 → pyerualjetwork-4.3.8}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.3.7.dev1
3
+ Version: 4.3.8
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -25,8 +25,6 @@ GitHub Page: https://github.com/HCB06/PyerualJetwork
25
25
  YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7HbsBpCx2VTJ4SK9wcPyse-EHw
26
26
 
27
27
  pip install pyerualjetwork
28
-
29
- 'use this if your data small or memory management is a problem :'
30
28
 
31
29
  from pyerualjetwork import plan
32
30
  from pyerualjetwork import planeat
@@ -38,26 +36,11 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
38
36
  from pyerualjetwork import data_operations_cuda
39
37
  from pyerualjetwork import model_operations_cuda
40
38
 
41
- 'use this if your data large or memory management is not a problem : _afterburner package (afterburner package comes with powerful paralellism,
42
- afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded.
43
- Specially designed for LLM training and other massive model training)'
44
-
45
- from pyerualjetwork_afterburner import plan
46
- from pyerualjetwork_afterburner import planeat
47
- from pyerualjetwork_afterburner import data_operations
48
- from pyerualjetwork_afterburner import model_operations
49
-
50
- from pyerualjetwork_afterburner import plan_cuda
51
- from pyerualjetwork_afterburner import planeat_cuda
52
- from pyerualjetwork_afterburner import data_operations_cuda
53
- from pyerualjetwork_afterburner import model_operations_cuda
54
-
55
39
  Optimized for Visual Studio Code
56
40
 
57
41
  requires=[
58
42
  'scipy==1.13.1',
59
43
  'tqdm==4.66.4',
60
- 'seaborn==0.13.2',
61
44
  'pandas==2.2.2',
62
45
  'networkx==3.3',
63
46
  'numpy==1.26.4',
@@ -67,7 +50,7 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
67
50
  'psutil==6.1.1'
68
51
  ]
69
52
 
70
- matplotlib, seaborn, networkx (optional).
53
+ matplotlib, networkx (optional).
71
54
 
72
55
  ##############################
73
56
 
@@ -16,8 +16,6 @@ GitHub Page: https://github.com/HCB06/PyerualJetwork
16
16
  YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7HbsBpCx2VTJ4SK9wcPyse-EHw
17
17
 
18
18
  pip install pyerualjetwork
19
-
20
- 'use this if your data small or memory management is a problem :'
21
19
 
22
20
  from pyerualjetwork import plan
23
21
  from pyerualjetwork import planeat
@@ -29,26 +27,11 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
29
27
  from pyerualjetwork import data_operations_cuda
30
28
  from pyerualjetwork import model_operations_cuda
31
29
 
32
- 'use this if your data large or memory management is not a problem : _afterburner package (afterburner package comes with powerful paralellism,
33
- afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded.
34
- Specially designed for LLM training and other massive model training)'
35
-
36
- from pyerualjetwork_afterburner import plan
37
- from pyerualjetwork_afterburner import planeat
38
- from pyerualjetwork_afterburner import data_operations
39
- from pyerualjetwork_afterburner import model_operations
40
-
41
- from pyerualjetwork_afterburner import plan_cuda
42
- from pyerualjetwork_afterburner import planeat_cuda
43
- from pyerualjetwork_afterburner import data_operations_cuda
44
- from pyerualjetwork_afterburner import model_operations_cuda
45
-
46
30
  Optimized for Visual Studio Code
47
31
 
48
32
  requires=[
49
33
  'scipy==1.13.1',
50
34
  'tqdm==4.66.4',
51
- 'seaborn==0.13.2',
52
35
  'pandas==2.2.2',
53
36
  'networkx==3.3',
54
37
  'numpy==1.26.4',
@@ -58,7 +41,7 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
58
41
  'psutil==6.1.1'
59
42
  ]
60
43
 
61
- matplotlib, seaborn, networkx (optional).
44
+ matplotlib, networkx (optional).
62
45
 
63
46
  ##############################
64
47
 
@@ -1,4 +1,4 @@
1
- __version__ = "4.3.7dev1"
1
+ __version__ = "4.3.8"
2
2
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
3
3
 
4
4
  def print_version(__version__):
@@ -0,0 +1,118 @@
1
+ import multiprocessing as mp
2
+ from functools import partial
3
+ import random
4
+ from .planeat import mutation, cross_over, second_parent_selection
5
+
6
+ def run_process(policy, best_weight, best_activations, good_weights,
7
+ good_activations, bad_weights, bad_activations, best_fitness,
8
+ normalized_fitness, child_W, child_act, mutated_W, mutated_act,
9
+ cross_over_mode, activation_selection_add_prob,
10
+ activation_selection_change_prob, activation_selection_threshold,
11
+ bad_genomes_selection_prob, fitness_bias, epsilon,
12
+ bad_genomes_mutation_prob, activation_mutate_prob,
13
+ activation_mutate_add_prob, activation_mutate_delete_prob,
14
+ activation_mutate_change_prob, weight_mutate_prob,
15
+ weight_mutate_threshold, activation_mutate_threshold):
16
+
17
+ process_func = partial(process_single, policy=policy, best_weight=best_weight,
18
+ best_activations=best_activations, good_weights=good_weights,
19
+ good_activations=good_activations, bad_weights=bad_weights,
20
+ bad_activations=bad_activations, best_fitness=best_fitness,
21
+ normalized_fitness=normalized_fitness, child_W=child_W,
22
+ child_act=child_act, mutated_W=mutated_W, mutated_act=mutated_act,
23
+ cross_over_mode=cross_over_mode,
24
+ activation_selection_add_prob=activation_selection_add_prob,
25
+ activation_selection_change_prob=activation_selection_change_prob,
26
+ activation_selection_threshold=activation_selection_threshold,
27
+ bad_genomes_selection_prob=bad_genomes_selection_prob,
28
+ fitness_bias=fitness_bias,
29
+ epsilon=epsilon,
30
+ bad_genomes_mutation_prob=bad_genomes_mutation_prob,
31
+ activation_mutate_prob=activation_mutate_prob,
32
+ activation_mutate_add_prob=activation_mutate_add_prob,
33
+ activation_mutate_delete_prob=activation_mutate_delete_prob,
34
+ activation_mutate_change_prob=activation_mutate_change_prob,
35
+ weight_mutate_prob=weight_mutate_prob,
36
+ weight_mutate_threshold=weight_mutate_threshold,
37
+ activation_mutate_threshold=activation_mutate_threshold)
38
+
39
+ with mp.Pool() as pool:
40
+ results = pool.map(process_func, range(len(bad_weights)))
41
+
42
+ for i, new_child_W, new_child_act, new_mutated_W, new_mutated_act in results:
43
+ child_W[i] = new_child_W
44
+ child_act[i] = new_child_act
45
+ mutated_W[i] = new_mutated_W
46
+ mutated_act[i] = new_mutated_act
47
+
48
+
49
+ return child_W, child_act, mutated_W, mutated_act
50
+
51
+
52
+ def process_single(i, policy, best_weight, best_activations,
53
+ good_weights, good_activations,
54
+ bad_weights, bad_activations,
55
+ best_fitness, normalized_fitness,
56
+ cross_over_mode,
57
+ activation_selection_add_prob, activation_selection_change_prob,
58
+ activation_selection_threshold,
59
+ bad_genomes_selection_prob, fitness_bias,
60
+ epsilon, bad_genomes_mutation_prob,
61
+ activation_mutate_prob, activation_mutate_add_prob,
62
+ activation_mutate_delete_prob, activation_mutate_change_prob,
63
+ weight_mutate_prob, weight_mutate_threshold,
64
+ activation_mutate_threshold):
65
+
66
+ if policy == 'aggressive':
67
+ first_parent_W = best_weight
68
+ first_parent_act = best_activations
69
+ elif policy == 'explorer':
70
+ first_parent_W = good_weights[i]
71
+ first_parent_act = good_activations[i]
72
+ else:
73
+ raise ValueError("policy parameter must be: 'aggressive' or 'explorer'")
74
+
75
+ second_parent_W, second_parent_act, s_i = second_parent_selection(
76
+ good_weights, bad_weights, good_activations, bad_activations, bad_genomes_selection_prob)
77
+
78
+ new_child_W, new_child_act = cross_over(
79
+ first_parent_W,
80
+ second_parent_W,
81
+ first_parent_act,
82
+ second_parent_act,
83
+ cross_over_mode=cross_over_mode,
84
+ activation_selection_add_prob=activation_selection_add_prob,
85
+ activation_selection_change_prob=activation_selection_change_prob,
86
+ activation_selection_threshold=activation_selection_threshold,
87
+ bad_genomes_selection_prob=bad_genomes_selection_prob,
88
+ first_parent_fitness=best_fitness,
89
+ fitness_bias=fitness_bias,
90
+ second_parent_fitness=normalized_fitness[s_i],
91
+ epsilon=epsilon
92
+ )
93
+
94
+ mutation_prob = random.uniform(0, 1)
95
+ if mutation_prob > bad_genomes_mutation_prob:
96
+ genome_W = good_weights[i]
97
+ genome_act = good_activations[i]
98
+ fitness_index = int(len(bad_weights) / 2 + i)
99
+ else:
100
+ genome_W = bad_weights[i]
101
+ genome_act = bad_activations[i]
102
+ fitness_index = i
103
+
104
+ new_mutated_W, new_mutated_act = mutation(
105
+ genome_W,
106
+ genome_act,
107
+ activation_mutate_prob=activation_mutate_prob,
108
+ activation_add_prob=activation_mutate_add_prob,
109
+ activation_delete_prob=activation_mutate_delete_prob,
110
+ activation_change_prob=activation_mutate_change_prob,
111
+ weight_mutate_prob=weight_mutate_prob,
112
+ weight_mutate_threshold=weight_mutate_threshold,
113
+ genome_fitness=normalized_fitness[fitness_index],
114
+ activation_mutate_threshold=activation_mutate_threshold,
115
+ epsilon=epsilon
116
+ )
117
+
118
+ return (i, new_child_W, new_child_act, new_mutated_W, new_mutated_act)
@@ -73,12 +73,12 @@ def fit(
73
73
  LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
74
74
 
75
75
  x_train = apply_activation(x_train, activation_potentiation)
76
- LTPW += np.array(y_train, dtype=optimize_labels(y_train, cuda=False).dtype).T @ x_train
76
+ LTPW += y_train.T @ x_train
77
77
 
78
78
  return normalization(LTPW, dtype=dtype)
79
79
 
80
80
 
81
- def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
81
+ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
82
82
  neural_web_history=False, show_current_activations=False,
83
83
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
84
84
  interval=33.33, target_acc=None, target_loss=None,
@@ -123,6 +123,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
123
123
 
124
124
  batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
125
125
 
126
+ pop_size (int, optional): Population size of each generation. Default: count of activation functions
127
+
126
128
  early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
127
129
 
128
130
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
@@ -156,14 +158,19 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
156
158
 
157
159
  data = 'Train'
158
160
 
159
- activation_potentiation = all_activations()
161
+ except_this = ['spiral', 'circular']
162
+ activation_potentiation = [item for item in all_activations() if item not in except_this]
160
163
  activation_potentiation_len = len(activation_potentiation)
161
164
 
162
165
  # Pre-checks
163
166
 
167
+ if pop_size is None: pop_size = activation_potentiation_len
168
+
164
169
  x_train = x_train.astype(dtype, copy=False)
165
170
  y_train = optimize_labels(y_train, cuda=False)
166
171
 
172
+ if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
173
+
167
174
  if gen is None:
168
175
  gen = activation_potentiation_len
169
176
 
@@ -192,16 +199,16 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
192
199
 
193
200
  progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
194
201
 
195
- if fit_start is False:
196
- weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len, dtype=dtype)
202
+ if fit_start is False or pop_size > activation_potentiation_len:
203
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
197
204
 
198
205
  if start_this_act is not None and start_this_W is not None:
199
206
  weight_pop[0] = start_this_W
200
207
  act_pop[0] = start_this_act
201
208
 
202
209
  else:
203
- weight_pop = []
204
- act_pop = []
210
+ weight_pop = [0] * pop_size
211
+ act_pop = [0] * pop_size
205
212
 
206
213
  for i in range(gen):
207
214
  postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
@@ -211,14 +218,14 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
211
218
  progress.last_print_n = 0
212
219
  progress.update(0)
213
220
 
214
- for j in range(activation_potentiation_len):
221
+ for j in range(pop_size):
215
222
 
216
223
  x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
217
224
 
218
- if fit_start is True and i == 0:
219
- act_pop.append(activation_potentiation[j])
225
+ if fit_start is True and i == 0 and j < activation_potentiation_len:
226
+ act_pop[j] = activation_potentiation[j]
220
227
  W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], dtype=dtype)
221
- weight_pop.append(W)
228
+ weight_pop[j] = W
222
229
 
223
230
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
224
231
  acc = model[get_acc()]
@@ -71,12 +71,12 @@ def fit(
71
71
  LTPW = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
72
72
 
73
73
  x_train = apply_activation(x_train, activation_potentiation)
74
- LTPW += cp.array(y_train, dtype=optimize_labels(y_train, cuda=True).dtype).T @ x_train
74
+ LTPW += y_train.T @ x_train
75
75
 
76
76
  return normalization(LTPW, dtype=dtype)
77
77
 
78
78
 
79
- def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
79
+ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
80
80
  neural_web_history=False, show_current_activations=False,
81
81
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
82
82
  interval=33.33, target_acc=None, target_loss=None,
@@ -120,6 +120,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
120
120
 
121
121
  batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
122
122
 
123
+ pop_size (int, optional): Population size of each generation. Default: count of activation functions
124
+
123
125
  early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
124
126
 
125
127
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
@@ -129,7 +131,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
129
131
  loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
130
132
 
131
133
  interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
132
-
134
+
133
135
  target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
134
136
 
135
137
  target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
@@ -155,12 +157,15 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
155
157
 
156
158
  data = 'Train'
157
159
 
158
- activation_potentiation = all_activations()
159
-
160
+ except_this = ['spiral', 'circular']
161
+ activation_potentiation = [item for item in all_activations() if item not in except_this]
160
162
  activation_potentiation_len = len(activation_potentiation)
161
163
 
164
+ if pop_size is None: pop_size = activation_potentiation_len
162
165
  y_train = optimize_labels(y_train, cuda=True)
163
166
 
167
+ if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
168
+
164
169
  if memory == 'gpu':
165
170
  x_train = transfer_to_gpu(x_train, dtype=dtype)
166
171
  y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
@@ -201,16 +206,16 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
201
206
 
202
207
  progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
203
208
 
204
- if fit_start is False:
205
- weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len, dtype=dtype)
209
+ if fit_start is False or pop_size > activation_potentiation_len:
210
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
206
211
 
207
212
  if start_this_act is not None and start_this_W is not None:
208
213
  weight_pop[0] = start_this_W
209
214
  act_pop[0] = start_this_act
210
215
 
211
216
  else:
212
- weight_pop = []
213
- act_pop = []
217
+ weight_pop = [0] * pop_size
218
+ act_pop = [0] * pop_size
214
219
 
215
220
  for i in range(gen):
216
221
  postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
@@ -220,17 +225,17 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
220
225
  progress.last_print_n = 0
221
226
  progress.update(0)
222
227
 
223
- for j in range(activation_potentiation_len):
228
+ for j in range(pop_size):
224
229
 
225
230
  x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
226
231
 
227
232
  x_train_batch = cp.array(x_train_batch, dtype=dtype, copy=False)
228
233
  y_train_batch = cp.array(y_train_batch, dtype=dtype, copy=False)
229
234
 
230
- if fit_start is True and i == 0:
231
- act_pop.append(activation_potentiation[j])
235
+ if fit_start is True and i == 0 and j < activation_potentiation_len:
236
+ act_pop[j] = activation_potentiation[j]
232
237
  W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], dtype=dtype)
233
- weight_pop.append(W)
238
+ weight_pop[j] = W
234
239
 
235
240
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
236
241
  acc = model[get_acc()]
@@ -722,7 +722,8 @@ def mutation(weight,
722
722
 
723
723
  new_threshold = threshold
724
724
 
725
- all_acts = all_activations()
725
+ except_this = ['spiral', 'circular']
726
+ all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
726
727
 
727
728
  activation_add_prob = 1 - activation_add_prob
728
729
  activation_delete_prob = 1 - activation_delete_prob
@@ -2,7 +2,6 @@ import networkx as nx
2
2
  import matplotlib.pyplot as plt
3
3
  import numpy as np
4
4
  from scipy.spatial import ConvexHull
5
- import seaborn as sns
6
5
  from matplotlib.animation import ArtistAnimation
7
6
 
8
7
  def draw_neural_web(W, ax, G, return_objs=False):
@@ -323,7 +322,8 @@ def draw_activations(x_train, activation):
323
322
  except:
324
323
  print('\rWARNING: error in drawing some activation.', end='')
325
324
  return x_train
326
-
325
+
326
+ """ DISABLED
327
327
  def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation):
328
328
 
329
329
  from .metrics import metrics, confusion_matrix, roc_curve
@@ -451,7 +451,7 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
451
451
  print(f"Hata oluştu: {e}")
452
452
 
453
453
  plt.show()
454
-
454
+ """
455
455
 
456
456
  def plot_decision_boundary(x, y, activation_potentiation, W, artist=None, ax=None):
457
457
 
@@ -614,9 +614,9 @@ def update_neuron_history(LTPW, ax1, row, col, class_count, artist5, fig1, acc=F
614
614
 
615
615
  fig1.suptitle(suptitle_info, fontsize=16)
616
616
 
617
-
617
+ """ DISABLED
618
618
  def initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train):
619
- """Initializes the visualization setup based on the parameters."""
619
+
620
620
  from .data_operations import find_closest_factors
621
621
  visualization_objects = {}
622
622
 
@@ -649,32 +649,33 @@ def initialize_visualization_for_fit(val, show_training, neurons_history, x_trai
649
649
  })
650
650
 
651
651
  return visualization_objects
652
-
652
+ """
653
653
 
654
-
654
+ """ DISABLED
655
655
  def update_neural_web_for_fit(W, ax, G, artist):
656
- """
657
- The function `update_neural_web_for_fit` updates a neural web visualization for fitting.
658
- """
656
+
659
657
  art5_1, art5_2, art5_3 = draw_neural_web(W=W, ax=ax, G=G, return_objs=True)
660
658
  art5_list = [art5_1] + [art5_2] + list(art5_3.values())
661
659
  artist.append(art5_list)
662
-
663
-
660
+ """
661
+
662
+ """ DISABLED
664
663
  def update_weight_visualization_for_fit(ax, LTPW, artist2):
665
- """Updates the weight visualization plot."""
664
+
666
665
  art2 = ax.imshow(LTPW, interpolation='sinc', cmap='viridis')
667
666
  artist2.append([art2])
667
+ """
668
668
 
669
-
669
+ """ DISABLED
670
670
  def update_decision_boundary_for_fit(ax, x_val, y_val, activation_potentiation, LTPW, artist1):
671
- """Updates the decision boundary visualization."""
671
+
672
672
  art1_1, art1_2 = plot_decision_boundary(x_val, y_val, activation_potentiation, LTPW, artist=artist1, ax=ax)
673
673
  artist1.append([*art1_1.collections, art1_2])
674
+ """
674
675
 
675
-
676
+ """ DISABLED
676
677
  def update_validation_history_for_fit(ax, val_list, artist3):
677
- """Updates the validation accuracy history plot."""
678
+
678
679
  period = list(range(1, len(val_list) + 1))
679
680
  art3 = ax.plot(
680
681
  period,
@@ -691,19 +692,21 @@ def update_validation_history_for_fit(ax, val_list, artist3):
691
692
  ax.set_ylabel('Validation Accuracy')
692
693
  ax.set_ylim([0, 1])
693
694
  artist3.append(art3)
694
-
695
-
695
+ """
696
+
697
+ """ DISABLED
696
698
  def display_visualization_for_fit(fig, artist_list, interval):
697
- """Displays the animation for the given artist list."""
699
+
698
700
  ani = ArtistAnimation(fig, artist_list, interval=interval, blit=True)
699
701
  return ani
700
-
702
+ """
703
+
701
704
  def show():
702
705
  plt.tight_layout()
703
706
  plt.show()
704
707
 
705
708
  def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
706
- """Initialize all visualization components"""
709
+
707
710
  from .data_operations import find_closest_factors
708
711
  viz_objects = {}
709
712
 
@@ -745,7 +748,7 @@ def initialize_visualization_for_learner(show_history, neurons_history, neural_w
745
748
  return viz_objects
746
749
 
747
750
  def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_acc_per_depth_list, x_train, final_activations):
748
- """Update history visualization plots"""
751
+
749
752
  if 'history' not in viz_objects:
750
753
  return
751
754
 
@@ -772,7 +775,7 @@ def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_ac
772
775
  hist['artist3'].append(art3)
773
776
 
774
777
  def display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval):
775
- """Display all final visualizations"""
778
+
776
779
  if 'history' in viz_objects:
777
780
  hist = viz_objects['history']
778
781
  for _ in range(30):
@@ -2,7 +2,6 @@ import networkx as nx
2
2
  import matplotlib.pyplot as plt
3
3
  import cupy as cp
4
4
  from scipy.spatial import ConvexHull
5
- import seaborn as sns
6
5
  from matplotlib.animation import ArtistAnimation
7
6
 
8
7
  def draw_neural_web(W, ax, G, return_objs=False):
@@ -325,7 +324,7 @@ def draw_activations(x_train, activation):
325
324
  print('\rWARNING: error in drawing some activation.', end='')
326
325
  return x_train
327
326
 
328
-
327
+ """ DISABLED
329
328
  def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation):
330
329
 
331
330
  from .metrics_cuda import metrics, confusion_matrix, roc_curve
@@ -447,7 +446,7 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
447
446
  axs[1,1].set_title('Decision Boundary')
448
447
 
449
448
  plt.show()
450
-
449
+ """
451
450
 
452
451
  def plot_decision_boundary(x, y, activation_potentiation, W, artist=None, ax=None):
453
452
 
@@ -583,9 +582,8 @@ def update_neuron_history(LTPW, ax1, row, col, class_count, artist5, fig1, acc=F
583
582
 
584
583
  fig1.suptitle(suptitle_info, fontsize=16)
585
584
 
586
-
585
+ """ DISABLED
587
586
  def initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train):
588
- """Initializes the visualization setup based on the parameters."""
589
587
  from .data_operations_cuda import find_closest_factors
590
588
  visualization_objects = {}
591
589
 
@@ -618,33 +616,34 @@ def initialize_visualization_for_fit(val, show_training, neurons_history, x_trai
618
616
  })
619
617
 
620
618
  return visualization_objects
619
+ """
620
+
621
621
 
622
-
622
+ """ DISABLED
623
623
  def update_weight_visualization_for_fit(ax, LTPW, artist2):
624
- """Updates the weight visualization plot."""
625
624
  art2 = ax.imshow(LTPW.get(), interpolation='sinc', cmap='viridis')
626
625
  artist2.append([art2])
626
+ """
627
627
 
628
628
  def show():
629
629
  plt.tight_layout()
630
630
  plt.show()
631
631
 
632
+ """ DISABLED
632
633
  def update_neural_web_for_fit(W, ax, G, artist):
633
- """
634
- The function `update_neural_web_for_fit` updates a neural web visualization for fitting.
635
- """
636
634
  art5_1, art5_2, art5_3 = draw_neural_web(W=W, ax=ax, G=G, return_objs=True)
637
635
  art5_list = [art5_1] + [art5_2] + list(art5_3.values())
638
636
  artist.append(art5_list)
639
-
637
+ """
638
+
639
+ """ DISABLED
640
640
  def update_decision_boundary_for_fit(ax, x_val, y_val, activation_potentiation, LTPW, artist1):
641
- """Updates the decision boundary visualization."""
642
641
  art1_1, art1_2 = plot_decision_boundary(x_val, y_val, activation_potentiation, LTPW, artist=artist1, ax=ax)
643
642
  artist1.append([*art1_1.collections, art1_2])
643
+ """
644
644
 
645
-
645
+ """ DISABLED
646
646
  def update_validation_history_for_fit(ax, val_list, artist3):
647
- """Updates the validation accuracy history plot."""
648
647
  val_list_cpu = []
649
648
  for i in range(len(val_list)):
650
649
  val_list_cpu.append(val_list[i].get())
@@ -664,13 +663,12 @@ def update_validation_history_for_fit(ax, val_list, artist3):
664
663
  ax.set_ylabel('Validation Accuracy')
665
664
  ax.set_ylim([0, 1])
666
665
  artist3.append(art3)
667
-
668
-
666
+ """
667
+ """ DISABLED
669
668
  def display_visualization_for_fit(fig, artist_list, interval):
670
- """Displays the animation for the given artist list."""
671
669
  ani = ArtistAnimation(fig, artist_list, interval=interval, blit=True)
672
670
  return ani
673
-
671
+ """
674
672
  def update_neuron_history_for_learner(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=False, loss=False):
675
673
 
676
674
  for j in range(len(class_count)):
@@ -699,7 +697,7 @@ def update_neuron_history_for_learner(LTPW, ax1, row, col, class_count, artist5,
699
697
  return artist5
700
698
 
701
699
  def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
702
- """Initialize all visualization components"""
700
+
703
701
  from .data_operations_cuda import find_closest_factors
704
702
  viz_objects = {}
705
703
 
@@ -741,7 +739,7 @@ def initialize_visualization_for_learner(show_history, neurons_history, neural_w
741
739
  return viz_objects
742
740
 
743
741
  def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_acc_per_depth_list, x_train, final_activations):
744
- """Update history visualization plots"""
742
+
745
743
  if 'history' not in viz_objects:
746
744
  return
747
745
 
@@ -770,11 +768,11 @@ def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_ac
770
768
  translated_x_train += draw_activations(x, activation)
771
769
 
772
770
  art3 = hist['ax'][2].plot(x.get(), translated_x_train.get(), color='b', markersize=6, linewidth=2)
773
- hist['ax'][2].set_title('Potentiation Shape Over Gen')
771
+ hist['ax'][2].set_title('Activation Shape Over Gen')
774
772
  hist['artist3'].append(art3)
775
773
 
776
774
  def display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval):
777
- """Display all final visualizations"""
775
+
778
776
  if 'history' in viz_objects:
779
777
  hist = viz_objects['history']
780
778
  for _ in range(30):