pyerualjetwork 4.1.7__tar.gz → 4.1.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/PKG-INFO +1 -1
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/__init__.py +2 -13
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/memory_operations.py +1 -1
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/plan.py +104 -136
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/plan_cuda.py +107 -118
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/planeat.py +26 -17
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/planeat_cuda.py +26 -18
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/visualizations.py +6 -4
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/visualizations_cuda.py +5 -2
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork.egg-info/PKG-INFO +1 -1
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/setup.py +1 -1
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/README.md +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/activation_functions.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/activation_functions_cuda.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/data_operations.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/data_operations_cuda.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/help.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/loss_functions.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/loss_functions_cuda.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/metrics.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/metrics_cuda.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/model_operations.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/model_operations_cuda.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork/ui.py +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork.egg-info/SOURCES.txt +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/pyerualjetwork.egg-info/top_level.txt +0 -0
- {pyerualjetwork-4.1.7 → pyerualjetwork-4.1.8}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.1.
|
3
|
+
Version: 4.1.8
|
4
4
|
Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -48,7 +48,7 @@ for package_name in package_names:
|
|
48
48
|
|
49
49
|
print(f"PyerualJetwork is ready to use with {err} errors")
|
50
50
|
|
51
|
-
__version__ = "4.1.
|
51
|
+
__version__ = "4.1.8"
|
52
52
|
__update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
|
53
53
|
|
54
54
|
def print_version(__version__):
|
@@ -58,15 +58,4 @@ def print_update_notes(__update__):
|
|
58
58
|
print(f"Update Notes:\n{__update__}")
|
59
59
|
|
60
60
|
print_version(__version__)
|
61
|
-
print_update_notes(__update__)
|
62
|
-
|
63
|
-
from .plan import *
|
64
|
-
from .planeat import *
|
65
|
-
from .activation_functions import *
|
66
|
-
from .data_operations import *
|
67
|
-
from .loss_functions import *
|
68
|
-
from .metrics import *
|
69
|
-
from .model_operations import *
|
70
|
-
from .ui import *
|
71
|
-
from .visualizations import *
|
72
|
-
from .help import *
|
61
|
+
print_update_notes(__update__)
|
@@ -40,7 +40,7 @@ def transfer_to_cpu(x, dtype=np.float32):
|
|
40
40
|
|
41
41
|
:return: NumPy array with the specified dtype
|
42
42
|
"""
|
43
|
-
from
|
43
|
+
from ui import loading_bars, initialize_loading_bar
|
44
44
|
try:
|
45
45
|
if isinstance(x, np.ndarray):
|
46
46
|
return x.astype(dtype) if x.dtype != dtype else x
|
@@ -3,8 +3,8 @@
|
|
3
3
|
|
4
4
|
MAIN MODULE FOR PLAN
|
5
5
|
|
6
|
-
PLAN document: https://github.com/HCB06/
|
7
|
-
|
6
|
+
PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
|
7
|
+
PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
8
8
|
|
9
9
|
@author: Hasan Can Beydili
|
10
10
|
@YouTube: https://www.youtube.com/@HasanCanBeydili
|
@@ -24,6 +24,7 @@ from .loss_functions import binary_crossentropy, categorical_crossentropy
|
|
24
24
|
from .activation_functions import apply_activation, Softmax, all_activations
|
25
25
|
from .metrics import metrics
|
26
26
|
from .model_operations import get_acc, get_preds, get_preds_softmax
|
27
|
+
from .memory_operations import optimize_labels
|
27
28
|
from .visualizations import (
|
28
29
|
draw_neural_web,
|
29
30
|
update_neural_web_for_fit,
|
@@ -102,8 +103,6 @@ def fit(
|
|
102
103
|
Returns:
|
103
104
|
numpyarray([num]): (Weight matrix).
|
104
105
|
"""
|
105
|
-
|
106
|
-
from model_operations import get_acc
|
107
106
|
|
108
107
|
# Pre-checks
|
109
108
|
|
@@ -173,45 +172,61 @@ def fit(
|
|
173
172
|
return normalization(LTPW, dtype=dtype)
|
174
173
|
|
175
174
|
|
176
|
-
def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', batch_size=1,
|
175
|
+
def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
|
177
176
|
neural_web_history=False, show_current_activations=False, auto_normalization=True,
|
178
|
-
neurons_history=False,
|
179
|
-
early_stop=False, loss='categorical_crossentropy', show_history=False,
|
177
|
+
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
180
178
|
interval=33.33, target_acc=None, target_loss=None, except_this=None,
|
181
|
-
only_this=None,
|
179
|
+
only_this=None, start_this_act=None, start_this_W=None, target_fitness='max', dtype=np.float32):
|
182
180
|
"""
|
183
181
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
184
|
-
the most accurate combination of activation potentiation for the given dataset.
|
182
|
+
the most accurate combination of activation potentiation for the given dataset using genetic algorithm NEAT (Neuroevolution of Augmenting Topologies). But modifided for PLAN version. Created by me: PLANEAT.
|
185
183
|
|
184
|
+
Why genetic optimization and not backpropagation?
|
185
|
+
Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
|
186
|
+
Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
|
187
|
+
|
186
188
|
Args:
|
187
189
|
|
188
190
|
x_train (array-like): Training input data.
|
189
191
|
|
190
192
|
y_train (array-like): Labels for training data. one-hot encoded.
|
191
193
|
|
194
|
+
optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
195
|
+
```python
|
196
|
+
genetic_optimizer = lambda *args, **kwargs: planeat.evolve(*args,
|
197
|
+
activation_add_prob=0.85,
|
198
|
+
mutations=False,
|
199
|
+
strategy='cross_over',
|
200
|
+
**kwargs)
|
201
|
+
|
202
|
+
model = plan.learner(x_train,
|
203
|
+
y_train,
|
204
|
+
optimizer=genetic_optimizer,
|
205
|
+
strategy='accuracy',
|
206
|
+
show_history=True,
|
207
|
+
target_acc=0.94,
|
208
|
+
interval=16.67)
|
209
|
+
```
|
210
|
+
|
192
211
|
x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
|
193
212
|
|
194
213
|
y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
|
195
214
|
|
196
|
-
strategy (str, optional): Learning strategy. (options: 'accuracy', '
|
197
|
-
|
198
|
-
patience ((int, float), optional): patience value for adaptive strategies. For 'adaptive_accuracy' Default value: 5. For 'adaptive_loss' Default value: 0.150.
|
215
|
+
strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
|
199
216
|
|
200
|
-
|
217
|
+
gen (int, optional): The generation count for genetic optimization.
|
201
218
|
|
202
219
|
batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
|
203
220
|
|
204
221
|
auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
|
205
222
|
|
206
|
-
|
207
|
-
|
208
|
-
early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two depth stops learning.) Default is False.
|
223
|
+
early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
|
209
224
|
|
210
225
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
211
226
|
|
212
227
|
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
213
228
|
|
214
|
-
loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training
|
229
|
+
loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
215
230
|
|
216
231
|
interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
217
232
|
|
@@ -223,81 +238,54 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
223
238
|
|
224
239
|
only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
|
225
240
|
|
226
|
-
|
241
|
+
start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
242
|
+
|
243
|
+
start_this_W (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
227
244
|
|
228
245
|
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
229
246
|
|
230
247
|
neural_web_history (bool, optional): Draws history of neural web. Default is False.
|
231
248
|
|
249
|
+
target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
|
250
|
+
|
232
251
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
233
252
|
|
234
253
|
Returns:
|
235
254
|
tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
|
236
255
|
|
237
256
|
"""
|
238
|
-
|
257
|
+
|
258
|
+
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
|
239
259
|
|
240
260
|
activation_potentiation = all_activations()
|
241
261
|
|
242
262
|
# Pre-checks
|
243
263
|
|
244
264
|
x_train = x_train.astype(dtype, copy=False)
|
245
|
-
|
246
|
-
if len(y_train[0]) < 256:
|
247
|
-
if y_train.dtype != np.uint8:
|
248
|
-
y_train = np.array(y_train, copy=False).astype(np.uint8, copy=False)
|
249
|
-
elif len(y_train[0]) <= 32767:
|
250
|
-
if y_train.dtype != np.uint16:
|
251
|
-
y_train = np.array(y_train, copy=False).astype(np.uint16, copy=False)
|
252
|
-
else:
|
253
|
-
if y_train.dtype != np.uint32:
|
254
|
-
y_train = np.array(y_train, copy=False).astype(np.uint32, copy=False)
|
255
|
-
|
256
|
-
if x_test is not None:
|
257
|
-
x_test = x_test.astype(dtype, copy=False)
|
258
|
-
|
259
|
-
if len(y_test[0]) < 256:
|
260
|
-
if y_test.dtype != np.uint8:
|
261
|
-
y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
|
262
|
-
elif len(y_test[0]) <= 32767:
|
263
|
-
if y_test.dtype != np.uint16:
|
264
|
-
y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
|
265
|
-
else:
|
266
|
-
if y_test.dtype != np.uint32:
|
267
|
-
y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
|
265
|
+
y_train = optimize_labels(y_train, cuda=False)
|
268
266
|
|
269
267
|
if x_test is None and y_test is None:
|
270
268
|
x_test = x_train
|
271
269
|
y_test = y_train
|
272
270
|
data = 'Train'
|
273
271
|
else:
|
272
|
+
x_test = x_test.astype(dtype, copy=False)
|
273
|
+
y_test = optimize_labels(y_test, cuda=False)
|
274
274
|
data = 'Test'
|
275
275
|
|
276
|
-
if early_shifting != False:
|
277
|
-
shift_patience = early_shifting
|
278
|
-
|
279
|
-
# Strategy initialization
|
280
|
-
if strategy == 'adaptive_accuracy':
|
281
|
-
strategy = 'accuracy'
|
282
|
-
adaptive = True
|
283
|
-
if patience == None:
|
284
|
-
patience = 5
|
285
|
-
elif strategy == 'adaptive_loss':
|
286
|
-
strategy = 'loss'
|
287
|
-
adaptive = True
|
288
|
-
if patience == None:
|
289
|
-
patience = 0.150
|
290
|
-
else:
|
291
|
-
adaptive = False
|
292
|
-
|
293
276
|
# Filter activation functions
|
294
|
-
if only_this
|
277
|
+
if only_this is not None:
|
295
278
|
activation_potentiation = only_this
|
296
|
-
if except_this
|
279
|
+
if except_this is not None:
|
297
280
|
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
298
|
-
if
|
299
|
-
|
281
|
+
if gen is None:
|
282
|
+
gen = len(activation_potentiation)
|
283
|
+
|
284
|
+
if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
|
300
285
|
|
286
|
+
if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
287
|
+
if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
288
|
+
|
301
289
|
# Initialize visualization components
|
302
290
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
303
291
|
|
@@ -306,19 +294,20 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
306
294
|
ncols = 76
|
307
295
|
else:
|
308
296
|
ncols = 89
|
309
|
-
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
310
297
|
|
311
298
|
# Initialize variables
|
312
|
-
|
313
|
-
|
314
|
-
|
299
|
+
act_pop = []
|
300
|
+
weight_pop = []
|
301
|
+
|
302
|
+
if start_this_act is None and start_this_W is None:
|
315
303
|
best_acc = 0
|
316
304
|
else:
|
317
|
-
|
305
|
+
act_pop.append(start_this_act)
|
306
|
+
weight_pop.append(start_this_W)
|
307
|
+
|
318
308
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
319
309
|
|
320
|
-
|
321
|
-
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=activations, dtype=dtype)
|
310
|
+
model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype)
|
322
311
|
|
323
312
|
if loss == 'categorical_crossentropy':
|
324
313
|
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
@@ -328,12 +317,15 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
328
317
|
best_acc = model[get_acc()]
|
329
318
|
best_loss = test_loss
|
330
319
|
|
331
|
-
|
320
|
+
best_acc_per_gen_list = []
|
332
321
|
postfix_dict = {}
|
333
322
|
loss_list = []
|
334
|
-
|
335
|
-
|
336
|
-
|
323
|
+
target_pop = []
|
324
|
+
|
325
|
+
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
326
|
+
|
327
|
+
for i in range(gen):
|
328
|
+
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
337
329
|
progress.set_postfix(postfix_dict)
|
338
330
|
|
339
331
|
progress.n = 0
|
@@ -341,80 +333,65 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
341
333
|
progress.update(0)
|
342
334
|
|
343
335
|
for j in range(len(activation_potentiation)):
|
344
|
-
for k in range(len(best_activations)):
|
345
|
-
activations.append(best_activations[k])
|
346
|
-
|
347
|
-
activations.append(activation_potentiation[j])
|
348
336
|
|
349
337
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
338
|
+
|
339
|
+
if i == 0:
|
340
|
+
act_pop.append(activation_potentiation[j])
|
341
|
+
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
342
|
+
weight_pop.append(W)
|
350
343
|
|
351
|
-
|
352
|
-
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=activations, dtype=dtype)
|
353
|
-
|
344
|
+
model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
|
354
345
|
acc = model[get_acc()]
|
346
|
+
|
347
|
+
if strategy == 'accuracy': target_pop.append(acc)
|
355
348
|
|
356
|
-
|
357
|
-
if loss == 'categorical_crossentropy':
|
358
|
-
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
359
|
-
else:
|
360
|
-
test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
361
|
-
|
362
|
-
if i == 0 and j == 0 and start_this is None:
|
363
|
-
best_loss = test_loss
|
364
|
-
|
365
|
-
if strategy == 'f1' or strategy == 'precision' or strategy == 'recall' or strategy == 'all':
|
349
|
+
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
366
350
|
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
367
351
|
|
368
|
-
if strategy == 'precision'
|
352
|
+
if strategy == 'precision':
|
353
|
+
target_pop.append(precision_score)
|
354
|
+
|
369
355
|
if i == 0 and j == 0:
|
370
356
|
best_precision = precision_score
|
371
357
|
|
372
|
-
if strategy == 'recall'
|
358
|
+
if strategy == 'recall':
|
359
|
+
target_pop.append(recall_score)
|
360
|
+
|
373
361
|
if i == 0 and j == 0:
|
374
362
|
best_recall = recall_score
|
375
363
|
|
376
|
-
if strategy == 'f1'
|
364
|
+
if strategy == 'f1':
|
365
|
+
target_pop.append(f1_score)
|
366
|
+
|
377
367
|
if i == 0 and j == 0:
|
378
368
|
best_f1 = f1_score
|
379
369
|
|
380
|
-
if early_shifting != False:
|
381
|
-
if acc <= best_acc:
|
382
|
-
early_shifting -= 1
|
383
|
-
if early_shifting == 0:
|
384
|
-
early_shifting = shift_patience
|
385
|
-
break
|
386
|
-
|
387
370
|
if ((strategy == 'accuracy' and acc >= best_acc) or
|
388
|
-
(strategy == 'loss' and test_loss <= best_loss) or
|
389
371
|
(strategy == 'f1' and f1_score >= best_f1) or
|
390
|
-
(strategy == 'precision' and precision_score >= best_precision) or
|
391
|
-
(strategy == 'recall' and recall_score >= best_recall)
|
392
|
-
(strategy == 'all' and f1_score >= best_f1 and acc >= best_acc and
|
393
|
-
test_loss <= best_loss and precision_score >= best_precision and
|
394
|
-
recall_score >= best_recall)):
|
372
|
+
(strategy == 'precision' and precision_score >= best_precision) or
|
373
|
+
(strategy == 'recall' and recall_score >= best_recall)):
|
395
374
|
|
396
|
-
current_best_activation = activation_potentiation[j]
|
397
375
|
best_acc = acc
|
398
|
-
|
376
|
+
best_weights = weight_pop[j]
|
377
|
+
final_activations = act_pop[j]
|
378
|
+
best_model = model
|
379
|
+
|
380
|
+
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
381
|
+
|
399
382
|
if batch_size == 1:
|
400
383
|
postfix_dict[f"{data} Accuracy"] = best_acc
|
401
384
|
else:
|
402
385
|
postfix_dict[f"{data} Batch Accuracy"] = acc
|
403
386
|
progress.set_postfix(postfix_dict)
|
404
|
-
|
405
|
-
final_activations = activations
|
406
387
|
|
407
388
|
if show_current_activations:
|
408
389
|
print(f", Current Activations={final_activations}", end='')
|
409
390
|
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
if loss == 'categorical_crossentropy':
|
415
|
-
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
416
|
-
else:
|
417
|
-
test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
391
|
+
if loss == 'categorical_crossentropy':
|
392
|
+
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
393
|
+
else:
|
394
|
+
test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
418
395
|
|
419
396
|
if batch_size == 1:
|
420
397
|
postfix_dict[f"{data} Loss"] = test_loss
|
@@ -426,9 +403,9 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
426
403
|
|
427
404
|
# Update visualizations during training
|
428
405
|
if show_history:
|
429
|
-
|
430
|
-
update_history_plots_for_learner(viz_objects,
|
431
|
-
|
406
|
+
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
407
|
+
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [test_loss],
|
408
|
+
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
432
409
|
|
433
410
|
if neurons_history:
|
434
411
|
viz_objects['neurons']['artists'] = (
|
@@ -496,25 +473,16 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
496
473
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
497
474
|
|
498
475
|
progress.update(1)
|
499
|
-
activations = []
|
500
476
|
|
501
|
-
|
502
|
-
best_acc_per_depth_list.append(best_acc)
|
477
|
+
best_acc_per_gen_list.append(best_acc)
|
503
478
|
loss_list.append(best_loss)
|
504
479
|
|
505
|
-
|
506
|
-
|
507
|
-
check = best_acc_per_depth_list[-patience:]
|
508
|
-
if all(x == check[0] for x in check):
|
509
|
-
strategy = 'loss'
|
510
|
-
adaptive = False
|
511
|
-
elif adaptive == True and strategy == 'loss' and best_loss <= patience:
|
512
|
-
strategy = 'accuracy'
|
513
|
-
adaptive = False
|
480
|
+
weight_pop, act_pop = optimizer(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
481
|
+
target_pop = []
|
514
482
|
|
515
483
|
# Early stopping check
|
516
484
|
if early_stop == True and i > 0:
|
517
|
-
if
|
485
|
+
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
518
486
|
progress.close()
|
519
487
|
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
520
488
|
activation_potentiation=final_activations, dtype=dtype)
|
@@ -548,7 +516,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
548
516
|
else:
|
549
517
|
train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
550
518
|
|
551
|
-
print('\nActivations: ',
|
519
|
+
print('\nActivations: ', act_pop[-1])
|
552
520
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
553
521
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
554
522
|
if data == 'Test':
|