pyerualjetwork 4.4__py3-none-any.whl → 4.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/data_operations.py +62 -37
- pyerualjetwork/data_operations_cuda.py +62 -44
- pyerualjetwork/fitness_functions.py +9 -9
- pyerualjetwork/memory_operations.py +9 -23
- pyerualjetwork/model_operations.py +43 -55
- pyerualjetwork/model_operations_cuda.py +43 -53
- pyerualjetwork/plan.py +50 -72
- pyerualjetwork/plan_cuda.py +57 -85
- pyerualjetwork/planeat.py +13 -23
- pyerualjetwork/planeat_cuda.py +14 -24
- pyerualjetwork/visualizations.py +17 -17
- pyerualjetwork/visualizations_cuda.py +17 -17
- {pyerualjetwork-4.4.dist-info → pyerualjetwork-4.5.1.dist-info}/METADATA +1 -1
- pyerualjetwork-4.5.1.dist-info/RECORD +25 -0
- pyerualjetwork-4.4.dist-info/RECORD +0 -25
- {pyerualjetwork-4.4.dist-info → pyerualjetwork-4.5.1.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.4.dist-info → pyerualjetwork-4.5.1.dist-info}/top_level.txt +0 -0
@@ -23,34 +23,24 @@ def save_model(model_name,
|
|
23
23
|
):
|
24
24
|
|
25
25
|
"""
|
26
|
-
Function to save a potentiation learning artificial neural network model.
|
27
|
-
|
28
|
-
Arguments:
|
29
|
-
|
30
|
-
model_name (str): Name of the model.
|
31
|
-
|
32
|
-
model_type (str): Type of the model. default: 'PLAN'
|
33
|
-
|
34
|
-
test_acc (float): Test accuracy of the model. default: None
|
35
|
-
|
36
|
-
weights_type (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
|
37
|
-
|
38
|
-
WeightFormat (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
|
39
|
-
|
40
|
-
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
|
41
26
|
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
27
|
+
Function to save a potentiation learning artificial neural network model.
|
28
|
+
Args:
|
29
|
+
model_name: (str): Name of the model.
|
30
|
+
W: Weights of the model.
|
31
|
+
scaler_params: (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
32
|
+
model_type: (str): Type of the model. default: 'PLAN'
|
33
|
+
test_acc: (float): Test accuracy of the model. default: None
|
34
|
+
model_path: (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
|
35
|
+
activation_potentiation: (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
|
36
|
+
weights_type: (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
|
37
|
+
weights_format: (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
|
38
|
+
show_architecture: (bool): It draws model architecture. True or False. Default: False
|
39
|
+
show_info: (bool): Prints model details into console. default: True
|
49
40
|
|
50
|
-
show_info (bool): Prints model details into console. default: True
|
51
41
|
|
52
42
|
Returns:
|
53
|
-
|
43
|
+
No return.
|
54
44
|
"""
|
55
45
|
|
56
46
|
from .visualizations_cuda import draw_model_architecture
|
@@ -185,14 +175,14 @@ def load_model(model_name,
|
|
185
175
|
"""
|
186
176
|
Function to load a potentiation learning model.
|
187
177
|
|
188
|
-
|
178
|
+
Args:
|
189
179
|
|
190
|
-
|
180
|
+
model_name (str): Name of the model.
|
191
181
|
|
192
|
-
|
182
|
+
model_path (str): Path where the model is saved.
|
193
183
|
|
194
184
|
Returns:
|
195
|
-
|
185
|
+
lists: W(list[num]), activation_potentiation, DataFrame of the model
|
196
186
|
"""
|
197
187
|
try:
|
198
188
|
|
@@ -250,18 +240,18 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
|
250
240
|
Function to make a prediction using a potentiation learning artificial neural network (PLAN).
|
251
241
|
from storage
|
252
242
|
|
253
|
-
|
243
|
+
Args:
|
254
244
|
|
255
|
-
|
245
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
256
246
|
|
257
|
-
|
247
|
+
model_name (str): Name of the model.
|
258
248
|
|
259
|
-
|
249
|
+
model_path (str): Path of the model. Default: ''
|
260
250
|
|
261
|
-
|
251
|
+
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
262
252
|
|
263
253
|
Returns:
|
264
|
-
|
254
|
+
cparray: Output from the model.
|
265
255
|
"""
|
266
256
|
|
267
257
|
Input = cp.array(Input, dtype=dtype, copy=False)
|
@@ -294,18 +284,18 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=cp.float3
|
|
294
284
|
|
295
285
|
"""
|
296
286
|
reverse prediction function from storage
|
297
|
-
|
287
|
+
Args:
|
298
288
|
|
299
|
-
|
289
|
+
output (list or ndarray): output layer for the model (single probability vector, output layer of trained model).
|
300
290
|
|
301
|
-
|
291
|
+
model_name (str): Name of the model.
|
302
292
|
|
303
|
-
|
293
|
+
model_path (str): Path of the model. Default: ''
|
304
294
|
|
305
|
-
|
295
|
+
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
306
296
|
|
307
297
|
Returns:
|
308
|
-
|
298
|
+
cparray: Input from the model.
|
309
299
|
"""
|
310
300
|
|
311
301
|
output = cp.array(output, dtype=dtype, copy=False)
|
@@ -328,20 +318,20 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
328
318
|
Function to make a prediction using a potentiation learning artificial neural network (PLAN).
|
329
319
|
from memory.
|
330
320
|
|
331
|
-
|
321
|
+
Args:
|
332
322
|
|
333
|
-
|
323
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
334
324
|
|
335
|
-
|
325
|
+
W (list of ndarrays): Weights of the model.
|
336
326
|
|
337
|
-
|
338
|
-
|
339
|
-
|
327
|
+
scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
|
328
|
+
|
329
|
+
activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
340
330
|
|
341
|
-
|
331
|
+
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
342
332
|
|
343
333
|
Returns:
|
344
|
-
|
334
|
+
cparray: Output from the model.
|
345
335
|
"""
|
346
336
|
|
347
337
|
from .data_operations_cuda import standard_scaler
|
@@ -368,16 +358,16 @@ def reverse_predict_model_ram(output, W, dtype=cp.float32):
|
|
368
358
|
"""
|
369
359
|
reverse prediction function from memory
|
370
360
|
|
371
|
-
|
361
|
+
Args:
|
372
362
|
|
373
|
-
|
363
|
+
output (list or ndarray): output layer for the model (single probability vector, output layer of trained model).
|
374
364
|
|
375
|
-
|
365
|
+
W (list of ndarrays): Weights of the model.
|
376
366
|
|
377
|
-
|
367
|
+
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
378
368
|
|
379
369
|
Returns:
|
380
|
-
|
370
|
+
cparray: Input from the model.
|
381
371
|
"""
|
382
372
|
|
383
373
|
output = cp.array(output, dtype=dtype, copy=False)
|
pyerualjetwork/plan.py
CHANGED
@@ -50,8 +50,7 @@ def fit(
|
|
50
50
|
"""
|
51
51
|
Creates a model to fitting data.
|
52
52
|
|
53
|
-
|
54
|
-
|
53
|
+
Args:
|
55
54
|
x_train (aray-like[num]): List or numarray of input data.
|
56
55
|
|
57
56
|
y_train (aray-like[num]): List or numarray of target labels. (one hot encoded)
|
@@ -62,7 +61,7 @@ def fit(
|
|
62
61
|
|
63
62
|
auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
64
63
|
|
65
|
-
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
|
64
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
|
66
65
|
|
67
66
|
Returns:
|
68
67
|
numpyarray: (Weight matrix).
|
@@ -91,80 +90,54 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
91
90
|
"""
|
92
91
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
93
92
|
the most accurate combination of activation potentiation for the given dataset using genetic algorithm NEAT (Neuroevolution of Augmenting Topologies). But modifided for PLAN version. Created by me: PLANEAT.
|
94
|
-
|
93
|
+
|
95
94
|
Why genetic optimization and not backpropagation?
|
96
95
|
Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
|
97
96
|
Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
|
98
97
|
|
99
|
-
Args:
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
106
|
-
```python
|
107
|
-
optimizer = lambda *args, **kwargs: planeat.evolver(*args,
|
98
|
+
:Args:
|
99
|
+
:param x_train: (array-like): Training input data.
|
100
|
+
:param y_train: (array-like): Labels for training data. one-hot encoded.
|
101
|
+
:param optimizer: (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
102
|
+
```python
|
103
|
+
optimizer = lambda *args, **kwargs: planeat.evolver(*args,
|
108
104
|
activation_add_prob=0.05,
|
109
105
|
strategy='aggressive',
|
110
106
|
policy='more_selective',
|
111
107
|
**kwargs)
|
112
108
|
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
|
144
|
-
|
145
|
-
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
146
|
-
|
147
|
-
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
148
|
-
|
149
|
-
auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
150
|
-
|
151
|
-
interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
152
|
-
|
153
|
-
target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
|
154
|
-
|
155
|
-
start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
156
|
-
|
157
|
-
start_this_W (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
158
|
-
|
159
|
-
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
160
|
-
|
161
|
-
neural_web_history (bool, optional): Draws history of neural web. Default is False.
|
162
|
-
|
163
|
-
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
109
|
+
model = plan.learner(x_train,
|
110
|
+
y_train,
|
111
|
+
optimizer,
|
112
|
+
fit_start=True,
|
113
|
+
show_history=True,
|
114
|
+
gen=15,
|
115
|
+
batch_size=0.05,
|
116
|
+
interval=16.67)
|
117
|
+
```
|
118
|
+
:param fit_start: (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
|
119
|
+
:param gen: (int, optional): The generation count for genetic optimization.
|
120
|
+
:param batch_size: (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire train set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents %8 of the train set. Default is 1. (%100 of train)
|
121
|
+
:param pop_size: (int, optional): Population size of each generation. Default: count of activation functions
|
122
|
+
:param weight_evolve: (bool, optional): Activation combinations already optimizes by PLANEAT genetic search algorithm. Should the weight parameters also evolve or should the weights be determined according to the aggregating learning principle of the PLAN algorithm? Default: True (Evolves Weights)
|
123
|
+
:param neural_web_history: (bool, optional): Draws history of neural web. Default is False.
|
124
|
+
:param show_current_activations: (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
125
|
+
:param auto_normalization: (bool, optional): Normalization may solves overflow problem. Default: False
|
126
|
+
:param neurons_history: (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
127
|
+
:param early_stop: (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
|
128
|
+
:param show_history: (bool, optional): If True, displays the training history after optimization. Default is False.
|
129
|
+
:param target_loss: (float, optional): The target loss to stop training early when achieved. Default is None.
|
130
|
+
:param interval: (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
131
|
+
:param target_acc: (float, optional): The target accuracy to stop training early when achieved. Default is None.
|
132
|
+
:param loss: (str, optional): options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
133
|
+
:param acc_impact: (float, optional): Impact of accuracy for optimization [0-1]. Default: 0.9
|
134
|
+
:param loss_impact: (float, optional): Impact of loss for optimization [0-1]. Default: 0.1
|
135
|
+
:param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
136
|
+
:param start_this_W: (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
137
|
+
:param dtype: (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
|
164
138
|
|
165
139
|
Returns:
|
166
140
|
tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
|
167
|
-
|
168
141
|
"""
|
169
142
|
|
170
143
|
from .planeat import define_genomes
|
@@ -238,7 +211,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
238
211
|
if weight_evolve is False:
|
239
212
|
weight_pop[j] = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
|
240
213
|
|
241
|
-
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
|
214
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j], auto_normalization=auto_normalization)
|
242
215
|
acc = model[get_acc()]
|
243
216
|
|
244
217
|
if loss == 'categorical_crossentropy':
|
@@ -295,7 +268,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
295
268
|
if target_acc is not None and best_acc >= target_acc:
|
296
269
|
progress.close()
|
297
270
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
298
|
-
activation_potentiation=final_activations)
|
271
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
299
272
|
if loss == 'categorical_crossentropy':
|
300
273
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
301
274
|
y_pred_batch=train_model[get_preds_softmax()])
|
@@ -315,7 +288,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
315
288
|
if target_loss is not None and best_loss <= target_loss:
|
316
289
|
progress.close()
|
317
290
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
318
|
-
activation_potentiation=final_activations)
|
291
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
319
292
|
|
320
293
|
if loss == 'categorical_crossentropy':
|
321
294
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -365,7 +338,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
365
338
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
366
339
|
progress.close()
|
367
340
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
368
|
-
activation_potentiation=final_activations)
|
341
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
369
342
|
|
370
343
|
if loss == 'categorical_crossentropy':
|
371
344
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -386,7 +359,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
386
359
|
# Final evaluation
|
387
360
|
progress.close()
|
388
361
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
389
|
-
activation_potentiation=final_activations)
|
362
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
390
363
|
|
391
364
|
if loss == 'categorical_crossentropy':
|
392
365
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -408,7 +381,8 @@ def evaluate(
|
|
408
381
|
x_test,
|
409
382
|
y_test,
|
410
383
|
W,
|
411
|
-
activation_potentiation=['linear']
|
384
|
+
activation_potentiation=['linear'],
|
385
|
+
auto_normalization=False
|
412
386
|
) -> tuple:
|
413
387
|
"""
|
414
388
|
Evaluates the neural network model using the given test data.
|
@@ -420,12 +394,16 @@ def evaluate(
|
|
420
394
|
|
421
395
|
W (np.ndarray): Neural net weight matrix.
|
422
396
|
|
423
|
-
activation_potentiation (list): Activation list. Default = ['linear'].
|
397
|
+
activation_potentiation (list, optional): Activation list. Default = ['linear'].
|
398
|
+
|
399
|
+
auto_normalization (bool, optional): Normalization for x_test ? Default = False.
|
424
400
|
|
425
401
|
Returns:
|
426
402
|
tuple: Model (list).
|
427
403
|
"""
|
428
404
|
|
405
|
+
if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
|
406
|
+
|
429
407
|
x_test = apply_activation(x_test, activation_potentiation)
|
430
408
|
|
431
409
|
result = x_test @ W.T
|
pyerualjetwork/plan_cuda.py
CHANGED
@@ -51,18 +51,12 @@ def fit(
|
|
51
51
|
Creates a model to fitting data.,
|
52
52
|
|
53
53
|
fit Args:
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
y_train (aray-like[cupy]): List or cupy array of target labels. (one hot encoded)
|
58
|
-
|
59
|
-
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
|
60
|
-
|
54
|
+
:param (aray-like[cupy]) x_train: (aray-like[cupy]): List or cupy array of input data.
|
55
|
+
:param (aray-like[cupy]) y_train: List or cupy array of target labels. (one hot encoded)
|
56
|
+
:param (list) activation_potentiation: For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
|
61
57
|
W (cupy.ndarray, optional): If you want to re-continue or update model
|
62
|
-
|
63
58
|
auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
64
|
-
|
65
|
-
dtype (cupy.dtype, optional): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
59
|
+
dtype (cupy.dtype, optional): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
|
66
60
|
|
67
61
|
Returns:
|
68
62
|
cupyarray: (Weight matrix).
|
@@ -90,82 +84,55 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
90
84
|
"""
|
91
85
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
92
86
|
the most accurate combination of activation potentiation for the given dataset.
|
93
|
-
|
87
|
+
|
94
88
|
Why genetic optimization and not backpropagation?
|
95
89
|
Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
|
96
90
|
Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
|
97
91
|
|
98
|
-
Args:
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
pop_size (int, optional): Population size of each generation. Default: count of activation functions
|
141
|
-
|
142
|
-
early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
|
143
|
-
|
144
|
-
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
145
|
-
|
146
|
-
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
147
|
-
|
148
|
-
auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
149
|
-
|
150
|
-
interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
151
|
-
|
152
|
-
target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
|
153
|
-
|
154
|
-
start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
155
|
-
|
156
|
-
start_this_W (cupy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
157
|
-
|
158
|
-
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
159
|
-
|
160
|
-
neural_web_history (bool, optional): Draws history of neural web. Default is False.
|
161
|
-
|
162
|
-
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
163
|
-
|
164
|
-
memory (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
|
92
|
+
:Args:
|
93
|
+
:param x_train: (array-like): Training input data.
|
94
|
+
:param y_train: (array-like): Labels for training data.
|
95
|
+
:param optimizer: (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
96
|
+
```python
|
97
|
+
optimizer = lambda *args, **kwargs: planeat_cuda.evolver(*args,
|
98
|
+
activation_add_prob=0.05,
|
99
|
+
strategy='aggressive',
|
100
|
+
policy='more_selective',
|
101
|
+
**kwargs)
|
102
|
+
|
103
|
+
model = plan_cuda.learner(x_train,
|
104
|
+
y_train,
|
105
|
+
optimizer,
|
106
|
+
fit_start=True,
|
107
|
+
show_history=True,
|
108
|
+
gen=15,
|
109
|
+
batch_size=0.05,
|
110
|
+
interval=16.67)
|
111
|
+
```
|
112
|
+
:param fit_start: (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
|
113
|
+
:param gen: (int, optional): The generation count for genetic optimization.
|
114
|
+
:param batch_size: (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire train set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents %8 of the train set. Default is 1. (%100 of train)
|
115
|
+
:param pop_size: (int, optional): Population size of each generation. Default: count of activation functions
|
116
|
+
:param weight_evolve: (bool, optional): Activation combinations already optimizes by PLANEAT genetic search algorithm. Should the weight parameters also evolve or should the weights be determined according to the aggregating learning principle of the PLAN algorithm? Default: True (Evolves Weights)
|
117
|
+
:param neural_web_history: (bool, optional): Draws history of neural web. Default is False.
|
118
|
+
:param show_current_activations: (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
119
|
+
:param auto_normalization: (bool, optional): Normalization may solves overflow problem. Default: False
|
120
|
+
:param target_acc: (float, optional): The target accuracy to stop training early when achieved. Default is None.
|
121
|
+
:param neurons_history: (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
122
|
+
:param early_stop: (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
|
123
|
+
:param show_history: (bool, optional): If True, displays the training history after optimization. Default is False.
|
124
|
+
:param loss: (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
125
|
+
:param interval: (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 33.33.
|
126
|
+
:param target_loss: (float, optional): The target loss to stop training early when achieved. Default is None.
|
127
|
+
:param loss_impact: (float, optional): Impact of loss for optimization [0-1]. Default: 0.1
|
128
|
+
:param acc_impact: (float, optional): Impact of accuracy for optimization [0-1]. Default: 0.9
|
129
|
+
:param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
130
|
+
:param start_this_W: (cupy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
131
|
+
:param dtype: (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16.
|
132
|
+
:param memory: (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
|
165
133
|
|
166
134
|
Returns:
|
167
135
|
tuple: A list for model parameters: [Weight matrix, Preds, Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
|
168
|
-
|
169
136
|
"""
|
170
137
|
|
171
138
|
from .planeat_cuda import define_genomes
|
@@ -253,7 +220,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
253
220
|
if weight_evolve is False:
|
254
221
|
weight_pop[j] = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
|
255
222
|
|
256
|
-
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
|
223
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j], auto_normalization=auto_normalization)
|
257
224
|
acc = model[get_acc()]
|
258
225
|
|
259
226
|
if loss == 'categorical_crossentropy':
|
@@ -311,7 +278,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
311
278
|
if target_acc is not None and best_acc >= target_acc:
|
312
279
|
progress.close()
|
313
280
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
314
|
-
activation_potentiation=final_activations)
|
281
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
315
282
|
if loss == 'categorical_crossentropy':
|
316
283
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
317
284
|
y_pred_batch=train_model[get_preds_softmax()])
|
@@ -331,7 +298,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
331
298
|
if target_loss is not None and best_loss <= target_loss:
|
332
299
|
progress.close()
|
333
300
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
334
|
-
activation_potentiation=final_activations)
|
301
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
335
302
|
|
336
303
|
if loss == 'categorical_crossentropy':
|
337
304
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -381,7 +348,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
381
348
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
382
349
|
progress.close()
|
383
350
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
384
|
-
activation_potentiation=final_activations)
|
351
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
385
352
|
|
386
353
|
if loss == 'categorical_crossentropy':
|
387
354
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -402,7 +369,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
402
369
|
# Final evaluation
|
403
370
|
progress.close()
|
404
371
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
405
|
-
activation_potentiation=final_activations)
|
372
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
406
373
|
|
407
374
|
if loss == 'categorical_crossentropy':
|
408
375
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -423,7 +390,8 @@ def evaluate(
|
|
423
390
|
x_test,
|
424
391
|
y_test,
|
425
392
|
W,
|
426
|
-
activation_potentiation=['linear']
|
393
|
+
activation_potentiation=['linear'],
|
394
|
+
auto_normalization=False
|
427
395
|
) -> tuple:
|
428
396
|
"""
|
429
397
|
Evaluates the neural network model using the given test data.
|
@@ -436,11 +404,15 @@ def evaluate(
|
|
436
404
|
W (cp.ndarray): Neural net weight matrix.
|
437
405
|
|
438
406
|
activation_potentiation (list): Activation list. Default = ['linear'].
|
407
|
+
|
408
|
+
auto_normalization (bool, optional): Normalization for x_test ? Default = False.
|
439
409
|
|
440
410
|
Returns:
|
441
411
|
tuple: Model (list).
|
442
412
|
"""
|
443
413
|
|
414
|
+
if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
|
415
|
+
|
444
416
|
x_test = apply_activation(x_test, activation_potentiation)
|
445
417
|
|
446
418
|
result = x_test @ W.T
|