pyerualjetwork 4.8__py3-none-any.whl → 5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,18 +1,33 @@
1
1
  # -*- coding: utf-8 -*-
2
- """
2
+ """
3
+
4
+
5
+ NEU (Neural Networks)
6
+ =====================
7
+ This module hosts functions for training and evaluating artificial neural networks for labeled classification tasks (for now).
8
+
9
+ Currently, two types of models can be trained:
10
+
11
+ PLAN (Potentiation Learning Artificial Neural Network)
12
+ MLP (Multi-Layer Perceptron → Deep Learning) -- With non-bias
13
+
14
+ For more information about PLAN: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
3
15
 
4
- MAIN MODULE FOR PLAN
16
+ Module functions:
17
+ -----------------
18
+ - plan_fit()
19
+ - learn()
20
+ - evaluate()
5
21
 
6
22
  Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
7
23
 
8
- PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
9
- PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
24
+ PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
10
25
 
11
- @author: Hasan Can Beydili
12
- @YouTube: https://www.youtube.com/@HasanCanBeydili
13
- @Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
14
- @Instagram: https://www.instagram.com/canbeydilj/
15
- @contact: tchasancan@gmail.com
26
+ - Author: Hasan Can Beydili
27
+ - YouTube: https://www.youtube.com/@HasanCanBeydili
28
+ - Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
29
+ - Instagram: https://www.instagram.com/canbeydilj
30
+ - Contact: tchasancan@gmail.com
16
31
  """
17
32
 
18
33
  import numpy as np
@@ -40,23 +55,23 @@ bar_format_learner = loading_bars()[1]
40
55
 
41
56
  # BUILD -----
42
57
 
43
- def fit(
58
+ def plan_fit(
44
59
  x_train,
45
60
  y_train,
46
- activation_potentiation=['linear'],
61
+ activations=['linear'],
47
62
  W=None,
48
63
  auto_normalization=False,
49
64
  dtype=np.float32
50
65
  ):
51
66
  """
52
- Creates a model to fitting data.
67
+ Creates a PLAN model to fitting data.
53
68
 
54
69
  Args:
55
70
  x_train (aray-like[num]): List or numarray of input data.
56
71
 
57
72
  y_train (aray-like[num]): List or numarray of target labels. (one hot encoded)
58
73
 
59
- activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
74
+ activations (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
60
75
 
61
76
  W (numpy.ndarray): If you want to re-continue or update model
62
77
 
@@ -74,8 +89,8 @@ def fit(
74
89
 
75
90
  weight = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
76
91
 
77
- if auto_normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
78
- elif auto_normalization is False: x_train = apply_activation(x_train, activation_potentiation)
92
+ if auto_normalization is True: x_train = normalization(apply_activation(x_train, activations))
93
+ elif auto_normalization is False: x_train = apply_activation(x_train, activations)
79
94
  else: raise ValueError('normalization parameter only be True or False')
80
95
 
81
96
  weight += y_train.T @ x_train
@@ -83,11 +98,11 @@ def fit(
83
98
  return normalization(weight, dtype=dtype)
84
99
 
85
100
 
86
- def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
101
+ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
87
102
  weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False,
88
103
  neurons_history=False, early_stop=False, show_history=False, target_loss=None,
89
104
  interval=33.33, target_acc=None, loss='categorical_crossentropy', acc_impact=0.9, loss_impact=0.1,
90
- start_this_act=None, start_this_W=None, neurons=None, hidden=0, activation_functions=None, dtype=np.float32):
105
+ start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], dtype=np.float32):
91
106
  """
92
107
  Optimizes the activation functions for a neural network by leveraging train data to find
93
108
  the most accurate combination of activation potentiation(or activation function) & weight values for the given dataset.
@@ -99,15 +114,15 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
99
114
  :Args:
100
115
  :param x_train: (array-like): Training input data.
101
116
  :param y_train: (array-like): Labels for training data. one-hot encoded.
102
- :param optimizer: (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
117
+ :param optimizer: (function): Optimization technique with hyperparameters. (PLAN and MLP (both) using ENE for optimization.) Please use this: from pyerualjetwork.ene import evolver (and) optimizer = lambda *args, **kwargs: evolver(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
103
118
  ```python
104
- optimizer = lambda *args, **kwargs: planeat.evolver(*args,
119
+ optimizer = lambda *args, **kwargs: ene.evolver(*args,
105
120
  activation_add_prob=0.05,
106
121
  strategy='aggressive',
107
122
  policy='more_selective',
108
123
  **kwargs)
109
124
 
110
- model = plan.learner(x_train,
125
+ model = neu.learn(x_train,
111
126
  y_train,
112
127
  optimizer,
113
128
  fit_start=True,
@@ -135,40 +150,40 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
135
150
  :param loss_impact: (float, optional): Impact of loss for optimization [0-1]. Default: 0.1
136
151
  :param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
137
152
  :param start_this_W: (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
138
- :param hidden: (int, optional): If you dont want train PLAN model this parameter represents a hidden layer count for MLP model. Default: 0 (PLAN)
139
- :param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
140
- :param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
153
+ :param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Number of elements --> Layer count. Default: [] (No hidden layer) --> architecture setted to PLAN, if not --> architecture setted to MLP.
154
+ :param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. if neurons is not [] --> uses default: ['linear'] * len(neurons). if neurons is [] --> uses [].
141
155
  :param dtype: (numpy.dtype): Data type for the Weight matrices. np.float32 by default. Example: np.float64 or np.float16.
142
156
 
143
157
  Returns:
144
- tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
158
+ tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]].
145
159
  """
146
160
 
147
- from .planeat import define_genomes
161
+ from .ene import define_genomes
148
162
 
149
163
  data = 'Train'
150
164
 
151
165
  except_this = ['spiral', 'circular']
152
- activation_potentiation = [item for item in all_activations() if item not in except_this]
153
- activation_potentiation_len = len(activation_potentiation)
166
+ activations = [item for item in all_activations() if item not in except_this]
167
+ activations_len = len(activations)
154
168
 
155
169
  # Pre-checks
156
170
 
157
- if pop_size is None: pop_size = activation_potentiation_len
171
+ if pop_size is None: pop_size = activations_len
158
172
  y_train = optimize_labels(y_train, cuda=False)
159
173
 
160
- if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
174
+ if pop_size < activations_len: raise ValueError(f"pop_size must be higher or equal to {activations_len}")
161
175
 
162
- if gen is None: gen = activation_potentiation_len
176
+ if gen is None: gen = activations_len
163
177
 
164
178
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
165
179
  if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
166
180
 
167
- if hidden is not 0 and neurons is not None and activation_functions is not None:
181
+ if neurons != []:
168
182
  fit_start = False
169
183
  weight_evolve = True
170
184
  is_mlp = True
171
- activation_potentiation = activation_functions
185
+ if activation_functions == []: activation_functions = ['linear'] * len(neurons)
186
+ activations = activation_functions
172
187
  else:
173
188
  is_mlp = False
174
189
 
@@ -184,10 +199,10 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
184
199
  loss_list = []
185
200
  target_pop = []
186
201
 
187
- progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=77, bar_format=bar_format_learner)
202
+ progress = initialize_loading_bar(total=activations_len, desc="", ncols=77, bar_format=bar_format_learner)
188
203
 
189
- if fit_start is False or pop_size > activation_potentiation_len:
190
- weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, hidden=hidden, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
204
+ if fit_start is False or pop_size > activations_len:
205
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
191
206
 
192
207
  else:
193
208
  weight_pop = [0] * pop_size
@@ -210,18 +225,18 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
210
225
 
211
226
  x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
212
227
 
213
- if fit_start is True and i == 0 and j < activation_potentiation_len:
228
+ if fit_start is True and i == 0 and j < activations_len:
214
229
  if start_this_act is not None and j == 0:
215
230
  pass
216
231
  else:
217
- act_pop[j] = activation_potentiation[j]
218
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
232
+ act_pop[j] = activations[j]
233
+ W = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
219
234
  weight_pop[j] = W
220
235
 
221
236
  if weight_evolve is False:
222
- weight_pop[j] = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
237
+ weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
223
238
 
224
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j], auto_normalization=auto_normalization, is_mlp=is_mlp)
239
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], auto_normalization=auto_normalization, is_mlp=is_mlp)
225
240
  acc = model[get_acc()]
226
241
 
227
242
  if loss == 'categorical_crossentropy':
@@ -278,7 +293,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
278
293
  if target_acc is not None and best_acc >= target_acc:
279
294
  progress.close()
280
295
  train_model = evaluate(x_train, y_train, W=best_weight,
281
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
296
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
282
297
  if loss == 'categorical_crossentropy':
283
298
  train_loss = categorical_crossentropy(y_true_batch=y_train,
284
299
  y_pred_batch=train_model[get_preds_softmax()])
@@ -298,7 +313,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
298
313
  if target_loss is not None and best_loss <= target_loss:
299
314
  progress.close()
300
315
  train_model = evaluate(x_train, y_train, W=best_weight,
301
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
316
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
302
317
 
303
318
  if loss == 'categorical_crossentropy':
304
319
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -351,7 +366,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
351
366
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
352
367
  progress.close()
353
368
  train_model = evaluate(x_train, y_train, W=best_weight,
354
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
369
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
355
370
 
356
371
  if loss == 'categorical_crossentropy':
357
372
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -373,7 +388,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
373
388
  progress.close()
374
389
 
375
390
  train_model = evaluate(x_train, y_train, W=best_weight,
376
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
391
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
377
392
 
378
393
  if loss == 'categorical_crossentropy':
379
394
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -395,7 +410,7 @@ def evaluate(
395
410
  x_test,
396
411
  y_test,
397
412
  W,
398
- activation_potentiation=['linear'],
413
+ activations=['linear'],
399
414
  auto_normalization=False,
400
415
  is_mlp=False
401
416
  ) -> tuple:
@@ -409,7 +424,7 @@ def evaluate(
409
424
 
410
425
  W (np.ndarray): Neural net weight matrix.
411
426
 
412
- activation_potentiation (list, optional): Activation list. Default = ['linear'].
427
+ activations (list, optional): Activation list. Default = ['linear'].
413
428
 
414
429
  auto_normalization (bool, optional): Normalization for x_test ? Default = False.
415
430
 
@@ -421,15 +436,15 @@ def evaluate(
421
436
 
422
437
  if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
423
438
 
424
- if isinstance(activation_potentiation, str):
425
- activation_potentiation = [activation_potentiation]
426
- elif isinstance(activation_potentiation, list):
427
- activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
439
+ if isinstance(activations, str):
440
+ activations = [activations]
441
+ elif isinstance(activations, list):
442
+ activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
428
443
 
429
444
  if is_mlp:
430
445
  layer = x_test
431
446
  for i in range(len(W)):
432
- if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activation_potentiation[i])
447
+ if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
433
448
 
434
449
  layer = layer @ W[i].T
435
450
 
@@ -437,7 +452,7 @@ def evaluate(
437
452
 
438
453
  else:
439
454
 
440
- x_test = apply_activation(x_test, activation_potentiation)
455
+ x_test = apply_activation(x_test, activations)
441
456
  result = x_test @ W.T
442
457
 
443
458
  max_vals = np.max(result, axis=1, keepdims=True)
@@ -1,18 +1,33 @@
1
1
  # -*- coding: utf-8 -*-
2
- """
2
+ """
3
+
4
+
5
+ NEU (Neural Networks) on CUDA
6
+ =============================
7
+ This module hosts functions for training and evaluating artificial neural networks on CUDA GPU for labeled classification tasks (for now).
8
+
9
+ Currently, two types of models can be trained:
3
10
 
4
- MAIN MODULE FOR PLAN_CUDA
11
+ PLAN (Potentiation Learning Artificial Neural Network)
12
+ MLP (Multi-Layer Perceptron → Deep Learning) -- With non-bias
13
+
14
+ For more information about PLAN: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
15
+
16
+ Module functions:
17
+ -----------------
18
+ - plan_fit()
19
+ - learn()
20
+ - evaluate()
5
21
 
6
22
  Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
7
23
 
8
- PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
9
- PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
24
+ PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
10
25
 
11
- @author: Hasan Can Beydili
12
- @YouTube: https://www.youtube.com/@HasanCanBeydili
13
- @Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
14
- @Instagram: https://www.instagram.com/canbeydilj/
15
- @contact: tchasancan@gmail.com
26
+ - Author: Hasan Can Beydili
27
+ - YouTube: https://www.youtube.com/@HasanCanBeydili
28
+ - Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
29
+ - Instagram: https://www.instagram.com/canbeydilj
30
+ - Contact: tchasancan@gmail.com
16
31
  """
17
32
 
18
33
  import cupy as cp
@@ -41,21 +56,21 @@ bar_format_learner = loading_bars()[1]
41
56
 
42
57
  # BUILD -----
43
58
 
44
- def fit(
59
+ def plan_fit(
45
60
  x_train,
46
61
  y_train,
47
- activation_potentiation=['linear'],
62
+ activations=['linear'],
48
63
  W=None,
49
64
  auto_normalization=False,
50
65
  dtype=cp.float32
51
66
  ):
52
67
  """
53
- Creates a model to fitting data.,
68
+ Creates a PLAN model to fitting data.,
54
69
 
55
- fit Args:
70
+ plan_fit Args:
56
71
  :param (aray-like[cupy]) x_train: (aray-like[cupy]): List or cupy array of input data.
57
72
  :param (aray-like[cupy]) y_train: List or cupy array of target labels. (one hot encoded)
58
- :param (list) activation_potentiation: For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
73
+ :param (list) activations: For deeper PLAN networks, activation function parameters. For more information please run this code: nn.activations_list() default: [None] (optional)
59
74
  W (cupy.ndarray, optional): If you want to re-continue or update model
60
75
  auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
61
76
  dtype (cupy.dtype, optional): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
@@ -69,40 +84,40 @@ def fit(
69
84
 
70
85
  weight = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
71
86
 
72
- if auto_normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
73
- elif auto_normalization is False: x_train = apply_activation(x_train, activation_potentiation)
87
+ if auto_normalization is True: x_train = normalization(apply_activation(x_train, activations))
88
+ elif auto_normalization is False: x_train = apply_activation(x_train, activations)
74
89
  else: raise ValueError('normalization parameter only be True or False')
75
90
 
76
91
  weight += y_train.T @ x_train
77
92
 
78
93
  return normalization(weight, dtype=dtype)
79
94
 
80
-
81
- def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
95
+ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
82
96
  weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False, target_acc=None,
83
97
  neurons_history=False, early_stop=False, show_history=False, loss='categorical_crossentropy',
84
98
  interval=33.33, target_loss=None, loss_impact=0.1, acc_impact=0.9,
85
- start_this_act=None, start_this_W=None, neurons=None, hidden=0, activation_functions=None, dtype=cp.float32, memory='gpu'):
99
+ start_this_act=None, start_this_W=None, neurons=[], activations=[], dtype=cp.float32, memory='gpu'):
86
100
  """
87
101
  Optimizes the activation functions for a neural network by leveraging train data to find
88
102
  the most accurate combination of activation potentiation(or activation function) & weight values for the given dataset.
89
103
 
90
104
  Why genetic optimization ENE(Eugenic NeuroEvolution) and not backpropagation?
91
105
  Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
92
- Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
106
+ Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: ENE.
93
107
 
94
108
  :Args:
95
109
  :param x_train: (array-like): Training input data.
96
110
  :param y_train: (array-like): Labels for training data.
97
- :param optimizer: (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
111
+ :param optimizer: (function): Optimization technique with hyperparameters. (PLAN and MLP (both) using ENE for optimization.) Please use this: from pyerualjetwork.ene_cuda import evolver (and) optimizer = lambda *args, **kwargs: evolver(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
98
112
  ```python
99
- optimizer = lambda *args, **kwargs: planeat_cuda.evolver(*args,
100
- activation_add_prob=0.05,
101
- strategy='aggressive',
102
- policy='more_selective',
103
- **kwargs)
113
+
114
+ optimizer = lambda *args, **kwargs: ene_cuda.evolver(*args,
115
+ activation_add_prob=0.05,
116
+ strategy='aggressive',
117
+ policy='more_selective',
118
+ **kwargs)
104
119
 
105
- model = plan_cuda.learner(x_train,
120
+ model = neu_cuda.learn(x_train,
106
121
  y_train,
107
122
  optimizer,
108
123
  fit_start=True,
@@ -130,30 +145,29 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
130
145
  :param acc_impact: (float, optional): Impact of accuracy for optimization [0-1]. Default: 0.9
131
146
  :param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
132
147
  :param start_this_W: (cupy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
133
- :param hidden: (int, optional): If you dont want train PLAN model this parameter represents a hidden layer count for MLP model. Default: 0 (PLAN)
134
- :param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
135
- :param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
148
+ :param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Number of elements --> Layer count. Default: [] (No hidden layer) --> architecture setted to PLAN, if not --> architecture setted to MLP.
149
+ :param activations: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. if neurons is not [] --> uses default: ['linear'] * len(neurons). if neurons is [] --> uses [].
136
150
  :param dtype: (cupy.dtype): Data type for the Weight matrices. np.float32 by default. Example: cp.float64 or cp.float16.
137
151
  :param memory: (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
138
152
 
139
153
  Returns:
140
- tuple: A list for model parameters: [Weight matrix, Preds, Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
154
+ tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
141
155
  """
142
156
 
143
- from .planeat_cuda import define_genomes
157
+ from .ene_cuda import define_genomes
144
158
 
145
159
  data = 'Train'
146
160
 
147
161
  except_this = ['spiral', 'circular']
148
- activation_potentiation = [item for item in all_activations() if item not in except_this]
149
- activation_potentiation_len = len(activation_potentiation)
162
+ activations = [item for item in all_activations() if item not in except_this]
163
+ activations_len = len(activations)
150
164
 
151
- if pop_size is None: pop_size = activation_potentiation_len
165
+ if pop_size is None: pop_size = activations_len
152
166
  y_train = optimize_labels(y_train, cuda=True)
153
167
 
154
- if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
168
+ if pop_size < activations_len: raise ValueError(f"pop_size must be higher or equal to {activations_len}")
155
169
 
156
- if gen is None: gen = activation_potentiation_len
170
+ if gen is None: gen = activations_len
157
171
 
158
172
  if memory == 'gpu':
159
173
  x_train = transfer_to_gpu(x_train, dtype=x_train.dtype)
@@ -173,11 +187,12 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
173
187
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
174
188
  if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
175
189
 
176
- if hidden is not 0 and neurons is not None and activation_functions is not None:
190
+ if neurons != []:
177
191
  fit_start = False
178
192
  weight_evolve = True
179
193
  is_mlp = True
180
- activation_potentiation = activation_functions
194
+ if activation_functions == []: activation_functions = ['linear'] * len(neurons)
195
+ activations = activation_functions
181
196
  else:
182
197
  is_mlp = False
183
198
 
@@ -193,10 +208,10 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
193
208
  loss_list = []
194
209
  target_pop = []
195
210
 
196
- progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=79, bar_format=bar_format_learner)
211
+ progress = initialize_loading_bar(total=activations_len, desc="", ncols=79, bar_format=bar_format_learner)
197
212
 
198
- if fit_start is False or pop_size > activation_potentiation_len:
199
- weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, hidden=hidden, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
213
+ if fit_start is False or pop_size > activations_len:
214
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, activations=activations, population_size=pop_size, dtype=dtype)
200
215
 
201
216
  else:
202
217
  weight_pop = [0] * pop_size
@@ -222,18 +237,18 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
222
237
  x_train_batch = cp.array(x_train_batch, dtype=x_train_batch.dtype, copy=False)
223
238
  y_train_batch = cp.array(y_train_batch, dtype=y_train.dtype)
224
239
 
225
- if fit_start is True and i == 0 and j < activation_potentiation_len:
240
+ if fit_start is True and i == 0 and j < activations_len:
226
241
  if start_this_act is not None and j == 0:
227
242
  pass
228
243
  else:
229
- act_pop[j] = activation_potentiation[j]
230
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
244
+ act_pop[j] = activations[j]
245
+ W = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
231
246
  weight_pop[j] = W
232
247
 
233
248
  if weight_evolve is False:
234
- weight_pop[j] = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
249
+ weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
235
250
 
236
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j], auto_normalization=auto_normalization, is_mlp=is_mlp)
251
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], auto_normalization=auto_normalization, is_mlp=is_mlp)
237
252
  acc = model[get_acc()]
238
253
 
239
254
  if loss == 'categorical_crossentropy':
@@ -291,7 +306,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
291
306
  if target_acc is not None and best_acc >= target_acc:
292
307
  progress.close()
293
308
  train_model = evaluate(x_train, y_train, W=best_weight,
294
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
309
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
295
310
  if loss == 'categorical_crossentropy':
296
311
  train_loss = categorical_crossentropy(y_true_batch=y_train,
297
312
  y_pred_batch=train_model[get_preds_softmax()])
@@ -311,7 +326,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
311
326
  if target_loss is not None and best_loss <= target_loss:
312
327
  progress.close()
313
328
  train_model = evaluate(x_train, y_train, W=best_weight,
314
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
329
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
315
330
 
316
331
  if loss == 'categorical_crossentropy':
317
332
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -363,7 +378,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
363
378
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
364
379
  progress.close()
365
380
  train_model = evaluate(x_train, y_train, W=best_weight,
366
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
381
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
367
382
 
368
383
  if loss == 'categorical_crossentropy':
369
384
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -384,7 +399,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
384
399
  # Final evaluation
385
400
  progress.close()
386
401
  train_model = evaluate(x_train, y_train, W=best_weight,
387
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
402
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
388
403
 
389
404
  if loss == 'categorical_crossentropy':
390
405
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -405,7 +420,7 @@ def evaluate(
405
420
  x_test,
406
421
  y_test,
407
422
  W,
408
- activation_potentiation=['linear'],
423
+ activations=['linear'],
409
424
  auto_normalization=False,
410
425
  is_mlp=False
411
426
  ) -> tuple:
@@ -419,7 +434,7 @@ def evaluate(
419
434
 
420
435
  W (cp.ndarray): Neural net weight matrix.
421
436
 
422
- activation_potentiation (list): Activation list. Default = ['linear'].
437
+ activations (list): Activation list. Default = ['linear'].
423
438
 
424
439
  auto_normalization (bool, optional): Normalization for x_test ? Default = False.
425
440
 
@@ -431,15 +446,15 @@ def evaluate(
431
446
 
432
447
  if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
433
448
 
434
- if isinstance(activation_potentiation, str):
435
- activation_potentiation = [activation_potentiation]
436
- elif isinstance(activation_potentiation, list):
437
- activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
449
+ if isinstance(activations, str):
450
+ activations = [activations]
451
+ elif isinstance(activations, list):
452
+ activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
438
453
 
439
454
  if is_mlp:
440
455
  layer = x_test
441
456
  for i in range(len(W)):
442
- if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activation_potentiation[i])
457
+ if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
443
458
 
444
459
  layer = layer @ W[i].T
445
460
 
@@ -447,7 +462,7 @@ def evaluate(
447
462
 
448
463
  else:
449
464
 
450
- x_test = apply_activation(x_test, activation_potentiation)
465
+ x_test = apply_activation(x_test, activations)
451
466
  result = x_test @ W.T
452
467
 
453
468
  max_vals = cp.max(result, axis=1, keepdims=True)