pyerualjetwork 4.8__py3-none-any.whl → 5b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,23 +1,40 @@
1
1
  # -*- coding: utf-8 -*-
2
- """
2
+ """
3
+
4
+
5
+ NEU (Neural Networks)
6
+ =====================
7
+ This module hosts functions for training and evaluating artificial neural networks for labeled classification tasks (for now).
8
+
9
+ Currently, two types of models can be trained:
3
10
 
4
- MAIN MODULE FOR PLAN
11
+ PLAN (Potentiation Learning Artificial Neural Network)
12
+ MLP (Multi-Layer Perceptron → Deep Learning) -- With non-bias
13
+
14
+ For more information about PLAN: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
15
+
16
+ Module functions:
17
+ -----------------
18
+ - plan_fit()
19
+ - learn()
20
+ - evaluate()
5
21
 
6
22
  Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
7
23
 
8
- PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
9
- PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
24
+ PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
10
25
 
11
- @author: Hasan Can Beydili
12
- @YouTube: https://www.youtube.com/@HasanCanBeydili
13
- @Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
14
- @Instagram: https://www.instagram.com/canbeydilj/
15
- @contact: tchasancan@gmail.com
26
+ - Author: Hasan Can Beydili
27
+ - YouTube: https://www.youtube.com/@HasanCanBeydili
28
+ - Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
29
+ - Instagram: https://www.instagram.com/canbeydilj
30
+ - Contact: tchasancan@gmail.com
16
31
  """
17
32
 
18
33
  import numpy as np
19
34
  import copy
20
35
 
36
+ import neu
37
+
21
38
  ### LIBRARY IMPORTS ###
22
39
  from .ui import loading_bars, initialize_loading_bar
23
40
  from .data_operations import normalization, batcher
@@ -40,23 +57,23 @@ bar_format_learner = loading_bars()[1]
40
57
 
41
58
  # BUILD -----
42
59
 
43
- def fit(
60
+ def plan_fit(
44
61
  x_train,
45
62
  y_train,
46
- activation_potentiation=['linear'],
63
+ activations=['linear'],
47
64
  W=None,
48
65
  auto_normalization=False,
49
66
  dtype=np.float32
50
67
  ):
51
68
  """
52
- Creates a model to fitting data.
69
+ Creates a PLAN model to fitting data.
53
70
 
54
71
  Args:
55
72
  x_train (aray-like[num]): List or numarray of input data.
56
73
 
57
74
  y_train (aray-like[num]): List or numarray of target labels. (one hot encoded)
58
75
 
59
- activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
76
+ activations (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
60
77
 
61
78
  W (numpy.ndarray): If you want to re-continue or update model
62
79
 
@@ -74,8 +91,8 @@ def fit(
74
91
 
75
92
  weight = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
76
93
 
77
- if auto_normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
78
- elif auto_normalization is False: x_train = apply_activation(x_train, activation_potentiation)
94
+ if auto_normalization is True: x_train = normalization(apply_activation(x_train, activations))
95
+ elif auto_normalization is False: x_train = apply_activation(x_train, activations)
79
96
  else: raise ValueError('normalization parameter only be True or False')
80
97
 
81
98
  weight += y_train.T @ x_train
@@ -83,11 +100,11 @@ def fit(
83
100
  return normalization(weight, dtype=dtype)
84
101
 
85
102
 
86
- def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
103
+ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
87
104
  weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False,
88
105
  neurons_history=False, early_stop=False, show_history=False, target_loss=None,
89
106
  interval=33.33, target_acc=None, loss='categorical_crossentropy', acc_impact=0.9, loss_impact=0.1,
90
- start_this_act=None, start_this_W=None, neurons=None, hidden=0, activation_functions=None, dtype=np.float32):
107
+ start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], dtype=np.float32):
91
108
  """
92
109
  Optimizes the activation functions for a neural network by leveraging train data to find
93
110
  the most accurate combination of activation potentiation(or activation function) & weight values for the given dataset.
@@ -99,15 +116,15 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
99
116
  :Args:
100
117
  :param x_train: (array-like): Training input data.
101
118
  :param y_train: (array-like): Labels for training data. one-hot encoded.
102
- :param optimizer: (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
119
+ :param optimizer: (function): Optimization technique with hyperparameters. (PLAN and MLP (both) using ENE for optimization.) Please use this: from pyerualjetwork.ene import evolver (and) optimizer = lambda *args, **kwargs: evolver(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
103
120
  ```python
104
- optimizer = lambda *args, **kwargs: planeat.evolver(*args,
121
+ optimizer = lambda *args, **kwargs: ene.evolver(*args,
105
122
  activation_add_prob=0.05,
106
123
  strategy='aggressive',
107
124
  policy='more_selective',
108
125
  **kwargs)
109
126
 
110
- model = plan.learner(x_train,
127
+ model = neu.learn(x_train,
111
128
  y_train,
112
129
  optimizer,
113
130
  fit_start=True,
@@ -135,40 +152,40 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
135
152
  :param loss_impact: (float, optional): Impact of loss for optimization [0-1]. Default: 0.1
136
153
  :param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
137
154
  :param start_this_W: (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
138
- :param hidden: (int, optional): If you dont want train PLAN model this parameter represents a hidden layer count for MLP model. Default: 0 (PLAN)
139
- :param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
140
- :param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
155
+ :param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Number of elements --> Layer count. Default: [] (No hidden layer) --> architecture setted to PLAN, if not --> architecture setted to MLP.
156
+ :param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. if neurons is not [] --> uses default: ['linear'] * len(neurons). if neurons is [] --> uses [].
141
157
  :param dtype: (numpy.dtype): Data type for the Weight matrices. np.float32 by default. Example: np.float64 or np.float16.
142
158
 
143
159
  Returns:
144
- tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
160
+ tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]].
145
161
  """
146
162
 
147
- from .planeat import define_genomes
163
+ from .ene import define_genomes
148
164
 
149
165
  data = 'Train'
150
166
 
151
167
  except_this = ['spiral', 'circular']
152
- activation_potentiation = [item for item in all_activations() if item not in except_this]
153
- activation_potentiation_len = len(activation_potentiation)
168
+ activations = [item for item in all_activations() if item not in except_this]
169
+ activations_len = len(activations)
154
170
 
155
171
  # Pre-checks
156
172
 
157
- if pop_size is None: pop_size = activation_potentiation_len
173
+ if pop_size is None: pop_size = activations_len
158
174
  y_train = optimize_labels(y_train, cuda=False)
159
175
 
160
- if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
176
+ if pop_size < activations_len: raise ValueError(f"pop_size must be higher or equal to {activations_len}")
161
177
 
162
- if gen is None: gen = activation_potentiation_len
178
+ if gen is None: gen = activations_len
163
179
 
164
180
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
165
181
  if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
166
182
 
167
- if hidden is not 0 and neurons is not None and activation_functions is not None:
183
+ if neurons is not []:
168
184
  fit_start = False
169
185
  weight_evolve = True
170
186
  is_mlp = True
171
- activation_potentiation = activation_functions
187
+ if activation_functions == []: activation_functions = ['linear'] * len(neurons)
188
+ activations = activation_functions
172
189
  else:
173
190
  is_mlp = False
174
191
 
@@ -184,10 +201,10 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
184
201
  loss_list = []
185
202
  target_pop = []
186
203
 
187
- progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=77, bar_format=bar_format_learner)
204
+ progress = initialize_loading_bar(total=activations_len, desc="", ncols=77, bar_format=bar_format_learner)
188
205
 
189
- if fit_start is False or pop_size > activation_potentiation_len:
190
- weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, hidden=hidden, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
206
+ if fit_start is False or pop_size > activations_len:
207
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
191
208
 
192
209
  else:
193
210
  weight_pop = [0] * pop_size
@@ -210,18 +227,18 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
210
227
 
211
228
  x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
212
229
 
213
- if fit_start is True and i == 0 and j < activation_potentiation_len:
230
+ if fit_start is True and i == 0 and j < activations_len:
214
231
  if start_this_act is not None and j == 0:
215
232
  pass
216
233
  else:
217
- act_pop[j] = activation_potentiation[j]
218
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
234
+ act_pop[j] = activations[j]
235
+ W = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
219
236
  weight_pop[j] = W
220
237
 
221
238
  if weight_evolve is False:
222
- weight_pop[j] = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
239
+ weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
223
240
 
224
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j], auto_normalization=auto_normalization, is_mlp=is_mlp)
241
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], auto_normalization=auto_normalization, is_mlp=is_mlp)
225
242
  acc = model[get_acc()]
226
243
 
227
244
  if loss == 'categorical_crossentropy':
@@ -278,7 +295,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
278
295
  if target_acc is not None and best_acc >= target_acc:
279
296
  progress.close()
280
297
  train_model = evaluate(x_train, y_train, W=best_weight,
281
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
298
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
282
299
  if loss == 'categorical_crossentropy':
283
300
  train_loss = categorical_crossentropy(y_true_batch=y_train,
284
301
  y_pred_batch=train_model[get_preds_softmax()])
@@ -298,7 +315,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
298
315
  if target_loss is not None and best_loss <= target_loss:
299
316
  progress.close()
300
317
  train_model = evaluate(x_train, y_train, W=best_weight,
301
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
318
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
302
319
 
303
320
  if loss == 'categorical_crossentropy':
304
321
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -351,7 +368,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
351
368
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
352
369
  progress.close()
353
370
  train_model = evaluate(x_train, y_train, W=best_weight,
354
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
371
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
355
372
 
356
373
  if loss == 'categorical_crossentropy':
357
374
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -373,7 +390,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
373
390
  progress.close()
374
391
 
375
392
  train_model = evaluate(x_train, y_train, W=best_weight,
376
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
393
+ activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
377
394
 
378
395
  if loss == 'categorical_crossentropy':
379
396
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -395,7 +412,7 @@ def evaluate(
395
412
  x_test,
396
413
  y_test,
397
414
  W,
398
- activation_potentiation=['linear'],
415
+ activations=['linear'],
399
416
  auto_normalization=False,
400
417
  is_mlp=False
401
418
  ) -> tuple:
@@ -409,7 +426,7 @@ def evaluate(
409
426
 
410
427
  W (np.ndarray): Neural net weight matrix.
411
428
 
412
- activation_potentiation (list, optional): Activation list. Default = ['linear'].
429
+ activations (list, optional): Activation list. Default = ['linear'].
413
430
 
414
431
  auto_normalization (bool, optional): Normalization for x_test ? Default = False.
415
432
 
@@ -421,15 +438,15 @@ def evaluate(
421
438
 
422
439
  if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
423
440
 
424
- if isinstance(activation_potentiation, str):
425
- activation_potentiation = [activation_potentiation]
426
- elif isinstance(activation_potentiation, list):
427
- activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
441
+ if isinstance(activations, str):
442
+ activations = [activations]
443
+ elif isinstance(activations, list):
444
+ activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
428
445
 
429
446
  if is_mlp:
430
447
  layer = x_test
431
448
  for i in range(len(W)):
432
- if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activation_potentiation[i])
449
+ if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
433
450
 
434
451
  layer = layer @ W[i].T
435
452
 
@@ -437,7 +454,7 @@ def evaluate(
437
454
 
438
455
  else:
439
456
 
440
- x_test = apply_activation(x_test, activation_potentiation)
457
+ x_test = apply_activation(x_test, activations)
441
458
  result = x_test @ W.T
442
459
 
443
460
  max_vals = np.max(result, axis=1, keepdims=True)
@@ -1,18 +1,33 @@
1
1
  # -*- coding: utf-8 -*-
2
- """
2
+ """
3
+
4
+
5
+ NEU (Neural Networks) on CUDA
6
+ =============================
7
+ This module hosts functions for training and evaluating artificial neural networks on CUDA GPU for labeled classification tasks (for now).
8
+
9
+ Currently, two types of models can be trained:
3
10
 
4
- MAIN MODULE FOR PLAN_CUDA
11
+ PLAN (Potentiation Learning Artificial Neural Network)
12
+ MLP (Multi-Layer Perceptron → Deep Learning) -- With non-bias
13
+
14
+ For more information about PLAN: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
15
+
16
+ Module functions:
17
+ -----------------
18
+ - plan_fit()
19
+ - learn()
20
+ - evaluate()
5
21
 
6
22
  Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
7
23
 
8
- PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
9
- PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
24
+ PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
10
25
 
11
- @author: Hasan Can Beydili
12
- @YouTube: https://www.youtube.com/@HasanCanBeydili
13
- @Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
14
- @Instagram: https://www.instagram.com/canbeydilj/
15
- @contact: tchasancan@gmail.com
26
+ - Author: Hasan Can Beydili
27
+ - YouTube: https://www.youtube.com/@HasanCanBeydili
28
+ - Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
29
+ - Instagram: https://www.instagram.com/canbeydilj
30
+ - Contact: tchasancan@gmail.com
16
31
  """
17
32
 
18
33
  import cupy as cp
@@ -41,21 +56,21 @@ bar_format_learner = loading_bars()[1]
41
56
 
42
57
  # BUILD -----
43
58
 
44
- def fit(
59
+ def plan_fit(
45
60
  x_train,
46
61
  y_train,
47
- activation_potentiation=['linear'],
62
+ activation_functions=['linear'],
48
63
  W=None,
49
64
  auto_normalization=False,
50
65
  dtype=cp.float32
51
66
  ):
52
67
  """
53
- Creates a model to fitting data.,
68
+ Creates a PLAN model to fitting data.,
54
69
 
55
- fit Args:
70
+ plan_fit Args:
56
71
  :param (aray-like[cupy]) x_train: (aray-like[cupy]): List or cupy array of input data.
57
72
  :param (aray-like[cupy]) y_train: List or cupy array of target labels. (one hot encoded)
58
- :param (list) activation_potentiation: For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
73
+ :param (list) activation_functions: For deeper PLAN networks, activation function parameters. For more information please run this code: nn.activations_list() default: [None] (optional)
59
74
  W (cupy.ndarray, optional): If you want to re-continue or update model
60
75
  auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
61
76
  dtype (cupy.dtype, optional): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
@@ -69,40 +84,40 @@ def fit(
69
84
 
70
85
  weight = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
71
86
 
72
- if auto_normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
73
- elif auto_normalization is False: x_train = apply_activation(x_train, activation_potentiation)
87
+ if auto_normalization is True: x_train = normalization(apply_activation(x_train, activation_functions))
88
+ elif auto_normalization is False: x_train = apply_activation(x_train, activation_functions)
74
89
  else: raise ValueError('normalization parameter only be True or False')
75
90
 
76
91
  weight += y_train.T @ x_train
77
92
 
78
93
  return normalization(weight, dtype=dtype)
79
94
 
80
-
81
- def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
95
+ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
82
96
  weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False, target_acc=None,
83
97
  neurons_history=False, early_stop=False, show_history=False, loss='categorical_crossentropy',
84
98
  interval=33.33, target_loss=None, loss_impact=0.1, acc_impact=0.9,
85
- start_this_act=None, start_this_W=None, neurons=None, hidden=0, activation_functions=None, dtype=cp.float32, memory='gpu'):
99
+ start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], dtype=cp.float32, memory='gpu'):
86
100
  """
87
101
  Optimizes the activation functions for a neural network by leveraging train data to find
88
102
  the most accurate combination of activation potentiation(or activation function) & weight values for the given dataset.
89
103
 
90
104
  Why genetic optimization ENE(Eugenic NeuroEvolution) and not backpropagation?
91
105
  Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
92
- Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
106
+ Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: ENE.
93
107
 
94
108
  :Args:
95
109
  :param x_train: (array-like): Training input data.
96
110
  :param y_train: (array-like): Labels for training data.
97
- :param optimizer: (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
111
+ :param optimizer: (function): Optimization technique with hyperparameters. (PLAN and MLP (both) using ENE for optimization.) Please use this: from pyerualjetwork.ene_cuda import evolver (and) optimizer = lambda *args, **kwargs: evolver(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
98
112
  ```python
99
- optimizer = lambda *args, **kwargs: planeat_cuda.evolver(*args,
100
- activation_add_prob=0.05,
101
- strategy='aggressive',
102
- policy='more_selective',
103
- **kwargs)
113
+
114
+ optimizer = lambda *args, **kwargs: ene_cuda.evolver(*args,
115
+ activation_add_prob=0.05,
116
+ strategy='aggressive',
117
+ policy='more_selective',
118
+ **kwargs)
104
119
 
105
- model = plan_cuda.learner(x_train,
120
+ model = neu_cuda.learn(x_train,
106
121
  y_train,
107
122
  optimizer,
108
123
  fit_start=True,
@@ -130,30 +145,29 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
130
145
  :param acc_impact: (float, optional): Impact of accuracy for optimization [0-1]. Default: 0.9
131
146
  :param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
132
147
  :param start_this_W: (cupy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
133
- :param hidden: (int, optional): If you dont want train PLAN model this parameter represents a hidden layer count for MLP model. Default: 0 (PLAN)
134
- :param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
135
- :param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
148
+ :param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Number of elements --> Layer count. Default: [] (No hidden layer) --> architecture setted to PLAN, if not --> architecture setted to MLP.
149
+ :param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. if neurons is not [] --> uses default: ['linear'] * len(neurons). if neurons is [] --> uses [].
136
150
  :param dtype: (cupy.dtype): Data type for the Weight matrices. np.float32 by default. Example: cp.float64 or cp.float16.
137
151
  :param memory: (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
138
152
 
139
153
  Returns:
140
- tuple: A list for model parameters: [Weight matrix, Preds, Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
154
+ tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
141
155
  """
142
156
 
143
- from .planeat_cuda import define_genomes
157
+ from .ene_cuda import define_genomes
144
158
 
145
159
  data = 'Train'
146
160
 
147
161
  except_this = ['spiral', 'circular']
148
- activation_potentiation = [item for item in all_activations() if item not in except_this]
149
- activation_potentiation_len = len(activation_potentiation)
162
+ activation_functions = [item for item in all_activations() if item not in except_this]
163
+ activation_functions_len = len(activation_functions)
150
164
 
151
- if pop_size is None: pop_size = activation_potentiation_len
165
+ if pop_size is None: pop_size = activation_functions_len
152
166
  y_train = optimize_labels(y_train, cuda=True)
153
167
 
154
- if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
168
+ if pop_size < activation_functions_len: raise ValueError(f"pop_size must be higher or equal to {activation_functions_len}")
155
169
 
156
- if gen is None: gen = activation_potentiation_len
170
+ if gen is None: gen = activation_functions_len
157
171
 
158
172
  if memory == 'gpu':
159
173
  x_train = transfer_to_gpu(x_train, dtype=x_train.dtype)
@@ -173,11 +187,12 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
173
187
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
174
188
  if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
175
189
 
176
- if hidden is not 0 and neurons is not None and activation_functions is not None:
190
+ if neurons is not []:
177
191
  fit_start = False
178
192
  weight_evolve = True
179
193
  is_mlp = True
180
- activation_potentiation = activation_functions
194
+ if activation_functions == []: activation_functions = ['linear'] * len(neurons)
195
+ activations = activation_functions
181
196
  else:
182
197
  is_mlp = False
183
198
 
@@ -193,10 +208,10 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
193
208
  loss_list = []
194
209
  target_pop = []
195
210
 
196
- progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=79, bar_format=bar_format_learner)
211
+ progress = initialize_loading_bar(total=activation_functions_len, desc="", ncols=79, bar_format=bar_format_learner)
197
212
 
198
- if fit_start is False or pop_size > activation_potentiation_len:
199
- weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, hidden=hidden, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
213
+ if fit_start is False or pop_size > activation_functions_len:
214
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
200
215
 
201
216
  else:
202
217
  weight_pop = [0] * pop_size
@@ -222,18 +237,18 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
222
237
  x_train_batch = cp.array(x_train_batch, dtype=x_train_batch.dtype, copy=False)
223
238
  y_train_batch = cp.array(y_train_batch, dtype=y_train.dtype)
224
239
 
225
- if fit_start is True and i == 0 and j < activation_potentiation_len:
240
+ if fit_start is True and i == 0 and j < activation_functions_len:
226
241
  if start_this_act is not None and j == 0:
227
242
  pass
228
243
  else:
229
- act_pop[j] = activation_potentiation[j]
230
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
244
+ act_pop[j] = activation_functions[j]
245
+ W = plan_fit(x_train_batch, y_train_batch, activation_functions=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
231
246
  weight_pop[j] = W
232
247
 
233
248
  if weight_evolve is False:
234
- weight_pop[j] = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
249
+ weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activation_functions=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
235
250
 
236
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j], auto_normalization=auto_normalization, is_mlp=is_mlp)
251
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_functions=act_pop[j], auto_normalization=auto_normalization, is_mlp=is_mlp)
237
252
  acc = model[get_acc()]
238
253
 
239
254
  if loss == 'categorical_crossentropy':
@@ -291,7 +306,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
291
306
  if target_acc is not None and best_acc >= target_acc:
292
307
  progress.close()
293
308
  train_model = evaluate(x_train, y_train, W=best_weight,
294
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
309
+ activation_functions=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
295
310
  if loss == 'categorical_crossentropy':
296
311
  train_loss = categorical_crossentropy(y_true_batch=y_train,
297
312
  y_pred_batch=train_model[get_preds_softmax()])
@@ -311,7 +326,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
311
326
  if target_loss is not None and best_loss <= target_loss:
312
327
  progress.close()
313
328
  train_model = evaluate(x_train, y_train, W=best_weight,
314
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
329
+ activation_functions=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
315
330
 
316
331
  if loss == 'categorical_crossentropy':
317
332
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -363,7 +378,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
363
378
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
364
379
  progress.close()
365
380
  train_model = evaluate(x_train, y_train, W=best_weight,
366
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
381
+ activation_functions=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
367
382
 
368
383
  if loss == 'categorical_crossentropy':
369
384
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -384,7 +399,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
384
399
  # Final evaluation
385
400
  progress.close()
386
401
  train_model = evaluate(x_train, y_train, W=best_weight,
387
- activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
402
+ activation_functions=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
388
403
 
389
404
  if loss == 'categorical_crossentropy':
390
405
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -405,7 +420,7 @@ def evaluate(
405
420
  x_test,
406
421
  y_test,
407
422
  W,
408
- activation_potentiation=['linear'],
423
+ activation_functions=['linear'],
409
424
  auto_normalization=False,
410
425
  is_mlp=False
411
426
  ) -> tuple:
@@ -419,7 +434,7 @@ def evaluate(
419
434
 
420
435
  W (cp.ndarray): Neural net weight matrix.
421
436
 
422
- activation_potentiation (list): Activation list. Default = ['linear'].
437
+ activation_functions (list): Activation list. Default = ['linear'].
423
438
 
424
439
  auto_normalization (bool, optional): Normalization for x_test ? Default = False.
425
440
 
@@ -431,15 +446,15 @@ def evaluate(
431
446
 
432
447
  if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
433
448
 
434
- if isinstance(activation_potentiation, str):
435
- activation_potentiation = [activation_potentiation]
436
- elif isinstance(activation_potentiation, list):
437
- activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
449
+ if isinstance(activation_functions, str):
450
+ activation_functions = [activation_functions]
451
+ elif isinstance(activation_functions, list):
452
+ activation_functions = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_functions]
438
453
 
439
454
  if is_mlp:
440
455
  layer = x_test
441
456
  for i in range(len(W)):
442
- if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activation_potentiation[i])
457
+ if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activation_functions[i])
443
458
 
444
459
  layer = layer @ W[i].T
445
460
 
@@ -447,7 +462,7 @@ def evaluate(
447
462
 
448
463
  else:
449
464
 
450
- x_test = apply_activation(x_test, activation_potentiation)
465
+ x_test = apply_activation(x_test, activation_functions)
451
466
  result = x_test @ W.T
452
467
 
453
468
  max_vals = cp.max(result, axis=1, keepdims=True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.8
3
+ Version: 5b0
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -0,0 +1,25 @@
1
+ pyerualjetwork/__init__.py,sha256=jec86Uk9HsivIT2ZJGGRxAwLDdKKr074d_UyYmhtTGY,2631
2
+ pyerualjetwork/activation_functions.py,sha256=X7Kv8qv8oZq8hvTdUiV-GkFjKHRlKIQypRPXh6gdkm4,7614
3
+ pyerualjetwork/activation_functions_cuda.py,sha256=pefklsl9QuSVbKwiUUHeF_ExN0bICH7QIF1MfoMU40Q,7665
4
+ pyerualjetwork/data_operations.py,sha256=LKmLfl43zSCCuP2cWkBM-D6GtlhxXQggsNvZNUwHDe4,16347
5
+ pyerualjetwork/data_operations_cuda.py,sha256=7p_v0yabHwq5Ft0jxWGEq1ZKyBFxOxvZvtPQjkcligk,18525
6
+ pyerualjetwork/ene.py,sha256=Dnx96FmQHxCbeT0IcUOydG7R8lPTlzmU_xXU9P4VBPY,45117
7
+ pyerualjetwork/ene_cuda.py,sha256=KgmpfgUH9KK5hDy6qqk3P9CX7N1dsFTTVIbFcASOLrM,45648
8
+ pyerualjetwork/fitness_functions.py,sha256=urRdeMvUhNgWxD4ZGHCRdQlIf9cTWYMvF3_aVBojRqY,1235
9
+ pyerualjetwork/help.py,sha256=F1xDDKqHGeUIXM-mo5c0Eav5XidCVNc62LvXqwS2Zbs,785
10
+ pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
11
+ pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
12
+ pyerualjetwork/memory_operations.py,sha256=Ch2QydBGHR5Be6fZu59C8eF8z-C2c3HVDIH8fz07BZo,14258
13
+ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
14
+ pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
15
+ pyerualjetwork/model_operations.py,sha256=F5qYAU578yrS1cUCwsaPJnqgfEAEUD_50vspakypTGY,15971
16
+ pyerualjetwork/model_operations_cuda.py,sha256=lgYAEGUERTMU7TEWRVKaa3yk_IOt4Jo9RJP9lDsNREU,17206
17
+ pyerualjetwork/neu.py,sha256=5CwWKZ-tix4M4_Gf0PfmnMLJttHdKXsB1GZ6_FvfB04,24966
18
+ pyerualjetwork/neu_cuda.py,sha256=3NnpiQPwqcbbkQSLZNyCSZLJdb5DS_Q0_fEAzPYkEYU,26327
19
+ pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
20
+ pyerualjetwork/visualizations.py,sha256=utnX9zQhzmtvBJLOLNGm2jecVVk4zHXABQdjb0XzJac,28352
21
+ pyerualjetwork/visualizations_cuda.py,sha256=gnoaaazZ-nc9E1ImqXrZBRgQ4Rnpi2qh2yGJ2eLKMlE,28807
22
+ pyerualjetwork-5b0.dist-info/METADATA,sha256=Wjw5A82pD-qSNvLGtVFTHHs31hfxQW6jMZKNBUDbzQc,7503
23
+ pyerualjetwork-5b0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
24
+ pyerualjetwork-5b0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
25
+ pyerualjetwork-5b0.dist-info/RECORD,,