pyerualjetwork 4.5__py3-none-any.whl → 4.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/data_operations.py +51 -46
- pyerualjetwork/data_operations_cuda.py +48 -51
- pyerualjetwork/fitness_functions.py +3 -8
- pyerualjetwork/memory_operations.py +9 -23
- pyerualjetwork/model_operations.py +43 -55
- pyerualjetwork/model_operations_cuda.py +43 -53
- pyerualjetwork/plan.py +50 -72
- pyerualjetwork/plan_cuda.py +57 -85
- pyerualjetwork/planeat.py +1 -1
- pyerualjetwork/planeat_cuda.py +3 -3
- pyerualjetwork/ui.py +10 -9
- pyerualjetwork/visualizations.py +17 -17
- pyerualjetwork/visualizations_cuda.py +17 -17
- {pyerualjetwork-4.5.dist-info → pyerualjetwork-4.5.2.dist-info}/METADATA +1 -1
- pyerualjetwork-4.5.2.dist-info/RECORD +25 -0
- pyerualjetwork-4.5.dist-info/RECORD +0 -25
- {pyerualjetwork-4.5.dist-info → pyerualjetwork-4.5.2.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.5.dist-info → pyerualjetwork-4.5.2.dist-info}/top_level.txt +0 -0
@@ -23,33 +23,21 @@ def save_model(model_name,
|
|
23
23
|
|
24
24
|
"""
|
25
25
|
Function to save a potentiation learning artificial neural network model.
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
|
40
|
-
|
41
|
-
scaler_params (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
42
|
-
|
43
|
-
W: Weights of the model.
|
44
|
-
|
45
|
-
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
|
46
|
-
|
47
|
-
show_architecture (bool): It draws model architecture. True or False. Default: False
|
48
|
-
|
49
|
-
show_info (bool): Prints model details into console. default: True
|
26
|
+
Args:
|
27
|
+
model_name: (str): Name of the model.
|
28
|
+
W: Weights of the model.
|
29
|
+
scaler_params: (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
30
|
+
model_type: (str): Type of the model. default: 'PLAN'
|
31
|
+
test_acc: (float): Test accuracy of the model. default: None
|
32
|
+
model_path: (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
|
33
|
+
activation_potentiation: (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
|
34
|
+
weights_type: (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
|
35
|
+
weights_format: (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
|
36
|
+
show_architecture: (bool): It draws model architecture. True or False. Default: False
|
37
|
+
show_info: (bool): Prints model details into console. default: True
|
50
38
|
|
51
39
|
Returns:
|
52
|
-
|
40
|
+
No return.
|
53
41
|
"""
|
54
42
|
|
55
43
|
from .visualizations import draw_model_architecture
|
@@ -181,14 +169,14 @@ def load_model(model_name,
|
|
181
169
|
"""
|
182
170
|
Function to load a potentiation learning model.
|
183
171
|
|
184
|
-
|
172
|
+
Args:
|
185
173
|
|
186
|
-
|
174
|
+
model_name (str): Name of the model.
|
187
175
|
|
188
|
-
|
176
|
+
model_path (str): Path where the model is saved.
|
189
177
|
|
190
178
|
Returns:
|
191
|
-
|
179
|
+
lists: W(list[num]), activation_potentiation, DataFrame of the model
|
192
180
|
"""
|
193
181
|
|
194
182
|
try:
|
@@ -244,18 +232,18 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
|
|
244
232
|
Function to make a prediction using a potentiation learning artificial neural network (PLAN).
|
245
233
|
from storage
|
246
234
|
|
247
|
-
|
235
|
+
Args:
|
248
236
|
|
249
|
-
|
237
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
250
238
|
|
251
|
-
|
239
|
+
model_name (str): Name of the model.
|
252
240
|
|
253
|
-
|
241
|
+
model_path (str): Path of the model. Default: ''
|
254
242
|
|
255
|
-
|
243
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
256
244
|
|
257
245
|
Returns:
|
258
|
-
|
246
|
+
ndarray: Output from the model.
|
259
247
|
"""
|
260
248
|
|
261
249
|
from .activation_functions import apply_activation
|
@@ -285,18 +273,18 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=np.float3
|
|
285
273
|
|
286
274
|
"""
|
287
275
|
reverse prediction function from storage
|
288
|
-
|
276
|
+
Args:
|
289
277
|
|
290
|
-
|
278
|
+
output (list or ndarray): output layer for the model (single probability vector, output layer of trained model).
|
291
279
|
|
292
|
-
|
280
|
+
model_name (str): Name of the model.
|
293
281
|
|
294
|
-
|
282
|
+
model_path (str): Path of the model. Default: ''
|
295
283
|
|
296
|
-
|
297
|
-
|
284
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
285
|
+
|
298
286
|
Returns:
|
299
|
-
|
287
|
+
ndarray: Input from the model.
|
300
288
|
"""
|
301
289
|
|
302
290
|
model = load_model(model_name, model_path)
|
@@ -318,20 +306,20 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
318
306
|
Function to make a prediction using a potentiation learning artificial neural network (PLAN).
|
319
307
|
from memory.
|
320
308
|
|
321
|
-
|
309
|
+
Args:
|
322
310
|
|
323
|
-
|
311
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
324
312
|
|
325
|
-
|
313
|
+
W (list of ndarrays): Weights of the model.
|
326
314
|
|
327
|
-
|
328
|
-
|
329
|
-
|
315
|
+
scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
|
316
|
+
|
317
|
+
activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
330
318
|
|
331
|
-
|
319
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
332
320
|
|
333
321
|
Returns:
|
334
|
-
|
322
|
+
ndarray: Output from the model.
|
335
323
|
"""
|
336
324
|
|
337
325
|
from .data_operations import standard_scaler
|
@@ -358,16 +346,16 @@ def reverse_predict_model_ram(output, W, dtype=np.float32):
|
|
358
346
|
"""
|
359
347
|
reverse prediction function from memory
|
360
348
|
|
361
|
-
|
349
|
+
Args:
|
362
350
|
|
363
|
-
|
351
|
+
output (list or ndarray): output layer for the model (single probability vector, output layer of trained model).
|
364
352
|
|
365
|
-
|
353
|
+
W (list of ndarrays): Weights of the model.
|
366
354
|
|
367
|
-
|
355
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
368
356
|
|
369
357
|
Returns:
|
370
|
-
|
358
|
+
ndarray: Input from the model.
|
371
359
|
"""
|
372
360
|
|
373
361
|
try:
|
@@ -23,34 +23,24 @@ def save_model(model_name,
|
|
23
23
|
):
|
24
24
|
|
25
25
|
"""
|
26
|
-
Function to save a potentiation learning artificial neural network model.
|
27
|
-
|
28
|
-
Arguments:
|
29
|
-
|
30
|
-
model_name (str): Name of the model.
|
31
|
-
|
32
|
-
model_type (str): Type of the model. default: 'PLAN'
|
33
|
-
|
34
|
-
test_acc (float): Test accuracy of the model. default: None
|
35
|
-
|
36
|
-
weights_type (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
|
37
|
-
|
38
|
-
WeightFormat (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
|
39
|
-
|
40
|
-
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
|
41
26
|
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
27
|
+
Function to save a potentiation learning artificial neural network model.
|
28
|
+
Args:
|
29
|
+
model_name: (str): Name of the model.
|
30
|
+
W: Weights of the model.
|
31
|
+
scaler_params: (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
32
|
+
model_type: (str): Type of the model. default: 'PLAN'
|
33
|
+
test_acc: (float): Test accuracy of the model. default: None
|
34
|
+
model_path: (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
|
35
|
+
activation_potentiation: (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
|
36
|
+
weights_type: (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
|
37
|
+
weights_format: (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
|
38
|
+
show_architecture: (bool): It draws model architecture. True or False. Default: False
|
39
|
+
show_info: (bool): Prints model details into console. default: True
|
49
40
|
|
50
|
-
show_info (bool): Prints model details into console. default: True
|
51
41
|
|
52
42
|
Returns:
|
53
|
-
|
43
|
+
No return.
|
54
44
|
"""
|
55
45
|
|
56
46
|
from .visualizations_cuda import draw_model_architecture
|
@@ -185,14 +175,14 @@ def load_model(model_name,
|
|
185
175
|
"""
|
186
176
|
Function to load a potentiation learning model.
|
187
177
|
|
188
|
-
|
178
|
+
Args:
|
189
179
|
|
190
|
-
|
180
|
+
model_name (str): Name of the model.
|
191
181
|
|
192
|
-
|
182
|
+
model_path (str): Path where the model is saved.
|
193
183
|
|
194
184
|
Returns:
|
195
|
-
|
185
|
+
lists: W(list[num]), activation_potentiation, DataFrame of the model
|
196
186
|
"""
|
197
187
|
try:
|
198
188
|
|
@@ -250,18 +240,18 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
|
250
240
|
Function to make a prediction using a potentiation learning artificial neural network (PLAN).
|
251
241
|
from storage
|
252
242
|
|
253
|
-
|
243
|
+
Args:
|
254
244
|
|
255
|
-
|
245
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
256
246
|
|
257
|
-
|
247
|
+
model_name (str): Name of the model.
|
258
248
|
|
259
|
-
|
249
|
+
model_path (str): Path of the model. Default: ''
|
260
250
|
|
261
|
-
|
251
|
+
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
262
252
|
|
263
253
|
Returns:
|
264
|
-
|
254
|
+
cparray: Output from the model.
|
265
255
|
"""
|
266
256
|
|
267
257
|
Input = cp.array(Input, dtype=dtype, copy=False)
|
@@ -294,18 +284,18 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=cp.float3
|
|
294
284
|
|
295
285
|
"""
|
296
286
|
reverse prediction function from storage
|
297
|
-
|
287
|
+
Args:
|
298
288
|
|
299
|
-
|
289
|
+
output (list or ndarray): output layer for the model (single probability vector, output layer of trained model).
|
300
290
|
|
301
|
-
|
291
|
+
model_name (str): Name of the model.
|
302
292
|
|
303
|
-
|
293
|
+
model_path (str): Path of the model. Default: ''
|
304
294
|
|
305
|
-
|
295
|
+
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
306
296
|
|
307
297
|
Returns:
|
308
|
-
|
298
|
+
cparray: Input from the model.
|
309
299
|
"""
|
310
300
|
|
311
301
|
output = cp.array(output, dtype=dtype, copy=False)
|
@@ -328,20 +318,20 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
328
318
|
Function to make a prediction using a potentiation learning artificial neural network (PLAN).
|
329
319
|
from memory.
|
330
320
|
|
331
|
-
|
321
|
+
Args:
|
332
322
|
|
333
|
-
|
323
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
334
324
|
|
335
|
-
|
325
|
+
W (list of ndarrays): Weights of the model.
|
336
326
|
|
337
|
-
|
338
|
-
|
339
|
-
|
327
|
+
scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
|
328
|
+
|
329
|
+
activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
340
330
|
|
341
|
-
|
331
|
+
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
342
332
|
|
343
333
|
Returns:
|
344
|
-
|
334
|
+
cparray: Output from the model.
|
345
335
|
"""
|
346
336
|
|
347
337
|
from .data_operations_cuda import standard_scaler
|
@@ -368,16 +358,16 @@ def reverse_predict_model_ram(output, W, dtype=cp.float32):
|
|
368
358
|
"""
|
369
359
|
reverse prediction function from memory
|
370
360
|
|
371
|
-
|
361
|
+
Args:
|
372
362
|
|
373
|
-
|
363
|
+
output (list or ndarray): output layer for the model (single probability vector, output layer of trained model).
|
374
364
|
|
375
|
-
|
365
|
+
W (list of ndarrays): Weights of the model.
|
376
366
|
|
377
|
-
|
367
|
+
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
378
368
|
|
379
369
|
Returns:
|
380
|
-
|
370
|
+
cparray: Input from the model.
|
381
371
|
"""
|
382
372
|
|
383
373
|
output = cp.array(output, dtype=dtype, copy=False)
|
pyerualjetwork/plan.py
CHANGED
@@ -50,8 +50,7 @@ def fit(
|
|
50
50
|
"""
|
51
51
|
Creates a model to fitting data.
|
52
52
|
|
53
|
-
|
54
|
-
|
53
|
+
Args:
|
55
54
|
x_train (aray-like[num]): List or numarray of input data.
|
56
55
|
|
57
56
|
y_train (aray-like[num]): List or numarray of target labels. (one hot encoded)
|
@@ -62,7 +61,7 @@ def fit(
|
|
62
61
|
|
63
62
|
auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
64
63
|
|
65
|
-
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
|
64
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
|
66
65
|
|
67
66
|
Returns:
|
68
67
|
numpyarray: (Weight matrix).
|
@@ -91,80 +90,54 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
91
90
|
"""
|
92
91
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
93
92
|
the most accurate combination of activation potentiation for the given dataset using genetic algorithm NEAT (Neuroevolution of Augmenting Topologies). But modifided for PLAN version. Created by me: PLANEAT.
|
94
|
-
|
93
|
+
|
95
94
|
Why genetic optimization and not backpropagation?
|
96
95
|
Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
|
97
96
|
Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
|
98
97
|
|
99
|
-
Args:
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
106
|
-
```python
|
107
|
-
optimizer = lambda *args, **kwargs: planeat.evolver(*args,
|
98
|
+
:Args:
|
99
|
+
:param x_train: (array-like): Training input data.
|
100
|
+
:param y_train: (array-like): Labels for training data. one-hot encoded.
|
101
|
+
:param optimizer: (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
102
|
+
```python
|
103
|
+
optimizer = lambda *args, **kwargs: planeat.evolver(*args,
|
108
104
|
activation_add_prob=0.05,
|
109
105
|
strategy='aggressive',
|
110
106
|
policy='more_selective',
|
111
107
|
**kwargs)
|
112
108
|
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
|
144
|
-
|
145
|
-
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
146
|
-
|
147
|
-
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
148
|
-
|
149
|
-
auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
150
|
-
|
151
|
-
interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
152
|
-
|
153
|
-
target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
|
154
|
-
|
155
|
-
start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
156
|
-
|
157
|
-
start_this_W (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
158
|
-
|
159
|
-
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
160
|
-
|
161
|
-
neural_web_history (bool, optional): Draws history of neural web. Default is False.
|
162
|
-
|
163
|
-
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
109
|
+
model = plan.learner(x_train,
|
110
|
+
y_train,
|
111
|
+
optimizer,
|
112
|
+
fit_start=True,
|
113
|
+
show_history=True,
|
114
|
+
gen=15,
|
115
|
+
batch_size=0.05,
|
116
|
+
interval=16.67)
|
117
|
+
```
|
118
|
+
:param fit_start: (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
|
119
|
+
:param gen: (int, optional): The generation count for genetic optimization.
|
120
|
+
:param batch_size: (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire train set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents %8 of the train set. Default is 1. (%100 of train)
|
121
|
+
:param pop_size: (int, optional): Population size of each generation. Default: count of activation functions
|
122
|
+
:param weight_evolve: (bool, optional): Activation combinations already optimizes by PLANEAT genetic search algorithm. Should the weight parameters also evolve or should the weights be determined according to the aggregating learning principle of the PLAN algorithm? Default: True (Evolves Weights)
|
123
|
+
:param neural_web_history: (bool, optional): Draws history of neural web. Default is False.
|
124
|
+
:param show_current_activations: (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
125
|
+
:param auto_normalization: (bool, optional): Normalization may solves overflow problem. Default: False
|
126
|
+
:param neurons_history: (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
127
|
+
:param early_stop: (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
|
128
|
+
:param show_history: (bool, optional): If True, displays the training history after optimization. Default is False.
|
129
|
+
:param target_loss: (float, optional): The target loss to stop training early when achieved. Default is None.
|
130
|
+
:param interval: (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
131
|
+
:param target_acc: (float, optional): The target accuracy to stop training early when achieved. Default is None.
|
132
|
+
:param loss: (str, optional): options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
133
|
+
:param acc_impact: (float, optional): Impact of accuracy for optimization [0-1]. Default: 0.9
|
134
|
+
:param loss_impact: (float, optional): Impact of loss for optimization [0-1]. Default: 0.1
|
135
|
+
:param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
136
|
+
:param start_this_W: (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
137
|
+
:param dtype: (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
|
164
138
|
|
165
139
|
Returns:
|
166
140
|
tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
|
167
|
-
|
168
141
|
"""
|
169
142
|
|
170
143
|
from .planeat import define_genomes
|
@@ -238,7 +211,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
238
211
|
if weight_evolve is False:
|
239
212
|
weight_pop[j] = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
|
240
213
|
|
241
|
-
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
|
214
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j], auto_normalization=auto_normalization)
|
242
215
|
acc = model[get_acc()]
|
243
216
|
|
244
217
|
if loss == 'categorical_crossentropy':
|
@@ -295,7 +268,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
295
268
|
if target_acc is not None and best_acc >= target_acc:
|
296
269
|
progress.close()
|
297
270
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
298
|
-
activation_potentiation=final_activations)
|
271
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
299
272
|
if loss == 'categorical_crossentropy':
|
300
273
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
301
274
|
y_pred_batch=train_model[get_preds_softmax()])
|
@@ -315,7 +288,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
315
288
|
if target_loss is not None and best_loss <= target_loss:
|
316
289
|
progress.close()
|
317
290
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
318
|
-
activation_potentiation=final_activations)
|
291
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
319
292
|
|
320
293
|
if loss == 'categorical_crossentropy':
|
321
294
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -365,7 +338,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
365
338
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
366
339
|
progress.close()
|
367
340
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
368
|
-
activation_potentiation=final_activations)
|
341
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
369
342
|
|
370
343
|
if loss == 'categorical_crossentropy':
|
371
344
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -386,7 +359,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
386
359
|
# Final evaluation
|
387
360
|
progress.close()
|
388
361
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
389
|
-
activation_potentiation=final_activations)
|
362
|
+
activation_potentiation=final_activations, auto_normalization=auto_normalization)
|
390
363
|
|
391
364
|
if loss == 'categorical_crossentropy':
|
392
365
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -408,7 +381,8 @@ def evaluate(
|
|
408
381
|
x_test,
|
409
382
|
y_test,
|
410
383
|
W,
|
411
|
-
activation_potentiation=['linear']
|
384
|
+
activation_potentiation=['linear'],
|
385
|
+
auto_normalization=False
|
412
386
|
) -> tuple:
|
413
387
|
"""
|
414
388
|
Evaluates the neural network model using the given test data.
|
@@ -420,12 +394,16 @@ def evaluate(
|
|
420
394
|
|
421
395
|
W (np.ndarray): Neural net weight matrix.
|
422
396
|
|
423
|
-
activation_potentiation (list): Activation list. Default = ['linear'].
|
397
|
+
activation_potentiation (list, optional): Activation list. Default = ['linear'].
|
398
|
+
|
399
|
+
auto_normalization (bool, optional): Normalization for x_test ? Default = False.
|
424
400
|
|
425
401
|
Returns:
|
426
402
|
tuple: Model (list).
|
427
403
|
"""
|
428
404
|
|
405
|
+
if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
|
406
|
+
|
429
407
|
x_test = apply_activation(x_test, activation_potentiation)
|
430
408
|
|
431
409
|
result = x_test @ W.T
|