pyerualjetwork 4.2.1__py3-none-any.whl → 4.2.2b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +2 -2
- pyerualjetwork/memory_operations.py +1 -1
- pyerualjetwork/model_operations.py +2 -2
- pyerualjetwork/plan.py +43 -95
- pyerualjetwork/plan_cuda.py +48 -100
- pyerualjetwork/planeat.py +27 -26
- pyerualjetwork/planeat_cuda.py +21 -21
- {pyerualjetwork-4.2.1.dist-info → pyerualjetwork-4.2.2b0.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.2.1.dist-info → pyerualjetwork-4.2.2b0.dist-info}/RECORD +11 -11
- {pyerualjetwork-4.2.1.dist-info → pyerualjetwork-4.2.2b0.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.2.1.dist-info → pyerualjetwork-4.2.2b0.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1
|
-
__version__ = "4.2.
|
1
|
+
__version__ = "4.2.2b0"
|
2
2
|
__update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
|
3
3
|
|
4
4
|
def print_version(__version__):
|
5
5
|
print(f"PyerualJetwork Version {__version__}" + '\n')
|
6
6
|
|
7
7
|
def print_update_notes(__update__):
|
8
|
-
print(f"
|
8
|
+
print(f"Notes:\n{__update__}")
|
9
9
|
|
10
10
|
print_version(__version__)
|
11
11
|
print_update_notes(__update__)
|
@@ -40,7 +40,7 @@ def transfer_to_cpu(x, dtype=np.float32):
|
|
40
40
|
|
41
41
|
return: NumPy array with the specified dtype
|
42
42
|
"""
|
43
|
-
from
|
43
|
+
from ui import loading_bars, initialize_loading_bar
|
44
44
|
try:
|
45
45
|
if isinstance(x, np.ndarray):
|
46
46
|
return x.astype(dtype) if x.dtype != dtype else x
|
@@ -334,8 +334,8 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
334
334
|
ndarray: Output from the model.
|
335
335
|
"""
|
336
336
|
|
337
|
-
from
|
338
|
-
from
|
337
|
+
from data_operations import standard_scaler
|
338
|
+
from plan import feed_forward
|
339
339
|
|
340
340
|
Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
|
341
341
|
|
pyerualjetwork/plan.py
CHANGED
@@ -16,7 +16,6 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
16
16
|
"""
|
17
17
|
|
18
18
|
import numpy as np
|
19
|
-
from colorama import Fore
|
20
19
|
import math
|
21
20
|
|
22
21
|
### LIBRARY IMPORTS ###
|
@@ -174,11 +173,11 @@ def fit(
|
|
174
173
|
return normalization(LTPW, dtype=dtype)
|
175
174
|
|
176
175
|
|
177
|
-
def learner(x_train, y_train, optimizer,
|
178
|
-
neural_web_history=False, show_current_activations=False,
|
176
|
+
def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_size=1,
|
177
|
+
neural_web_history=False, show_current_activations=False,
|
179
178
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
180
|
-
interval=33.33, target_acc=None, target_loss=None,
|
181
|
-
|
179
|
+
interval=33.33, target_acc=None, target_loss=None,
|
180
|
+
start_this_act=None, start_this_W=None, dtype=np.float32):
|
182
181
|
"""
|
183
182
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
184
183
|
the most accurate combination of activation potentiation for the given dataset using genetic algorithm NEAT (Neuroevolution of Augmenting Topologies). But modifided for PLAN version. Created by me: PLANEAT.
|
@@ -206,23 +205,18 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
206
205
|
optimizer=genetic_optimizer,
|
207
206
|
strategy='accuracy',
|
208
207
|
show_history=True,
|
209
|
-
|
208
|
+
gen=15,
|
209
|
+
batch_size=0.05,
|
210
210
|
interval=16.67)
|
211
211
|
```
|
212
212
|
|
213
|
-
x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
|
214
|
-
|
215
|
-
y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
|
216
|
-
|
217
213
|
strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
|
218
214
|
|
219
215
|
gen (int, optional): The generation count for genetic optimization.
|
220
216
|
|
221
|
-
batch_size (float, optional): Batch size is used in the prediction process to receive
|
222
|
-
|
223
|
-
auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
|
217
|
+
batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
|
224
218
|
|
225
|
-
early_stop (bool, optional): If True, implements early stopping during training.(If
|
219
|
+
early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
|
226
220
|
|
227
221
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
228
222
|
|
@@ -235,10 +229,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
235
229
|
target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
|
236
230
|
|
237
231
|
target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
|
238
|
-
|
239
|
-
except_this (list, optional): A list of activations to exclude from optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
|
240
|
-
|
241
|
-
only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
|
242
232
|
|
243
233
|
start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
244
234
|
|
@@ -247,8 +237,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
247
237
|
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
248
238
|
|
249
239
|
neural_web_history (bool, optional): Draws history of neural web. Default is False.
|
250
|
-
|
251
|
-
target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
|
252
240
|
|
253
241
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
254
242
|
|
@@ -257,37 +245,23 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
257
245
|
|
258
246
|
"""
|
259
247
|
|
260
|
-
|
248
|
+
from .planeat import define_genomes
|
249
|
+
|
250
|
+
data = 'Train'
|
261
251
|
|
262
|
-
|
252
|
+
activation_potentiation_len = len(all_activations())
|
263
253
|
|
264
254
|
# Pre-checks
|
265
255
|
|
266
256
|
x_train = x_train.astype(dtype, copy=False)
|
267
257
|
y_train = optimize_labels(y_train, cuda=False)
|
268
258
|
|
269
|
-
if x_test is None and y_test is None:
|
270
|
-
x_test = x_train
|
271
|
-
y_test = y_train
|
272
|
-
data = 'Train'
|
273
|
-
else:
|
274
|
-
x_test = x_test.astype(dtype, copy=False)
|
275
|
-
y_test = optimize_labels(y_test, cuda=False)
|
276
|
-
data = 'Test'
|
277
|
-
|
278
|
-
# Filter activation functions
|
279
|
-
if only_this is not None:
|
280
|
-
activation_potentiation = only_this
|
281
|
-
if except_this is not None:
|
282
|
-
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
283
259
|
if gen is None:
|
284
|
-
gen =
|
260
|
+
gen = activation_potentiation_len
|
285
261
|
|
286
262
|
if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
|
263
|
+
if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
|
287
264
|
|
288
|
-
if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
289
|
-
if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
290
|
-
|
291
265
|
# Initialize visualization components
|
292
266
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
293
267
|
|
@@ -300,31 +274,23 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
300
274
|
# Initialize variables
|
301
275
|
act_pop = []
|
302
276
|
weight_pop = []
|
303
|
-
|
304
|
-
if start_this_act is None and start_this_W is None:
|
305
|
-
best_acc = 0
|
306
|
-
else:
|
307
|
-
act_pop.append(start_this_act)
|
308
|
-
weight_pop.append(start_this_W)
|
309
|
-
|
310
|
-
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
311
|
-
|
312
|
-
model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype)
|
313
|
-
|
314
|
-
if loss == 'categorical_crossentropy':
|
315
|
-
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
316
|
-
else:
|
317
|
-
test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
318
|
-
|
319
|
-
best_acc = model[get_acc()]
|
320
|
-
best_loss = test_loss
|
321
277
|
|
278
|
+
best_acc = 0
|
279
|
+
best_f1 = 0
|
280
|
+
best_recall = 0
|
281
|
+
best_precision = 0
|
322
282
|
best_acc_per_gen_list = []
|
323
283
|
postfix_dict = {}
|
324
284
|
loss_list = []
|
325
285
|
target_pop = []
|
326
286
|
|
327
|
-
progress = initialize_loading_bar(total=
|
287
|
+
progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
|
288
|
+
|
289
|
+
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len)
|
290
|
+
|
291
|
+
if start_this_act is not None and start_this_W is not None:
|
292
|
+
weight_pop[0] = start_this_W
|
293
|
+
act_pop[0] = start_this_act
|
328
294
|
|
329
295
|
for i in range(gen):
|
330
296
|
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
@@ -334,22 +300,16 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
334
300
|
progress.last_print_n = 0
|
335
301
|
progress.update(0)
|
336
302
|
|
337
|
-
for j in range(
|
338
|
-
|
339
|
-
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
340
|
-
|
341
|
-
if i == 0:
|
342
|
-
act_pop.append(activation_potentiation[j])
|
343
|
-
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
344
|
-
weight_pop.append(W)
|
303
|
+
for j in range(activation_potentiation_len):
|
345
304
|
|
346
|
-
|
305
|
+
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
306
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
|
347
307
|
acc = model[get_acc()]
|
348
308
|
|
349
309
|
if strategy == 'accuracy': target_pop.append(acc)
|
350
310
|
|
351
311
|
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
352
|
-
precision_score, recall_score, f1_score = metrics(
|
312
|
+
precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
|
353
313
|
|
354
314
|
if strategy == 'precision':
|
355
315
|
target_pop.append(precision_score)
|
@@ -391,22 +351,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
391
351
|
print(f", Current Activations={final_activations}", end='')
|
392
352
|
|
393
353
|
if loss == 'categorical_crossentropy':
|
394
|
-
|
354
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
|
395
355
|
else:
|
396
|
-
|
356
|
+
train_loss = binary_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
|
397
357
|
|
398
358
|
if batch_size == 1:
|
399
|
-
postfix_dict[f"{data} Loss"] =
|
400
|
-
best_loss =
|
359
|
+
postfix_dict[f"{data} Loss"] = train_loss
|
360
|
+
best_loss = train_loss
|
401
361
|
else:
|
402
|
-
postfix_dict[f"{data} Batch Loss"] =
|
362
|
+
postfix_dict[f"{data} Batch Loss"] = train_loss
|
403
363
|
progress.set_postfix(postfix_dict)
|
404
|
-
best_loss =
|
364
|
+
best_loss = train_loss
|
405
365
|
|
406
366
|
# Update visualizations during training
|
407
367
|
if show_history:
|
408
368
|
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
409
|
-
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [
|
369
|
+
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
|
410
370
|
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
411
371
|
|
412
372
|
if neurons_history:
|
@@ -415,7 +375,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
415
375
|
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
416
376
|
y_train[0], viz_objects['neurons']['artists'],
|
417
377
|
data=data, fig1=viz_objects['neurons']['fig'],
|
418
|
-
acc=best_acc, loss=
|
378
|
+
acc=best_acc, loss=train_loss)
|
419
379
|
)
|
420
380
|
|
421
381
|
if neural_web_history:
|
@@ -440,13 +400,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
440
400
|
print('\nActivations: ', final_activations)
|
441
401
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
442
402
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
443
|
-
if data == 'Test':
|
444
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
445
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
446
403
|
|
447
404
|
# Display final visualizations
|
448
405
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
449
|
-
|
406
|
+
train_loss, y_train, interval)
|
450
407
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
451
408
|
|
452
409
|
# Check target loss
|
@@ -465,13 +422,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
465
422
|
print('\nActivations: ', final_activations)
|
466
423
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
467
424
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
468
|
-
if data == 'Test':
|
469
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
470
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
471
425
|
|
472
426
|
# Display final visualizations
|
473
427
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
474
|
-
|
428
|
+
train_loss, y_train, interval)
|
475
429
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
476
430
|
|
477
431
|
progress.update(1)
|
@@ -479,7 +433,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
479
433
|
best_acc_per_gen_list.append(best_acc)
|
480
434
|
loss_list.append(best_loss)
|
481
435
|
|
482
|
-
weight_pop, act_pop = optimizer(
|
436
|
+
weight_pop, act_pop = optimizer(weight_pop, act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
|
483
437
|
target_pop = []
|
484
438
|
|
485
439
|
# Early stopping check
|
@@ -499,13 +453,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
499
453
|
print('\nActivations: ', final_activations)
|
500
454
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
501
455
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
502
|
-
if data == 'Test':
|
503
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
504
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
505
456
|
|
506
457
|
# Display final visualizations
|
507
458
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
508
|
-
|
459
|
+
train_loss, y_train, interval)
|
509
460
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
510
461
|
|
511
462
|
# Final evaluation
|
@@ -518,15 +469,12 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
518
469
|
else:
|
519
470
|
train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
520
471
|
|
521
|
-
print('\nActivations: ',
|
472
|
+
print('\nActivations: ', final_activations)
|
522
473
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
523
474
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
524
|
-
if data == 'Test':
|
525
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
526
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
527
475
|
|
528
476
|
# Display final visualizations
|
529
|
-
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
477
|
+
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
|
530
478
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
531
479
|
|
532
480
|
|
pyerualjetwork/plan_cuda.py
CHANGED
@@ -189,11 +189,11 @@ def fit(
|
|
189
189
|
return normalization(LTPW, dtype=dtype)
|
190
190
|
|
191
191
|
|
192
|
-
def learner(x_train, y_train, optimizer,
|
193
|
-
neural_web_history=False, show_current_activations=False,
|
192
|
+
def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_size=1,
|
193
|
+
neural_web_history=False, show_current_activations=False,
|
194
194
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
195
|
-
interval=33.33, target_acc=None, target_loss=None,
|
196
|
-
|
195
|
+
interval=33.33, target_acc=None, target_loss=None,
|
196
|
+
start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
|
197
197
|
"""
|
198
198
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
199
199
|
the most accurate combination of activation potentiation for the given dataset.
|
@@ -217,29 +217,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
217
217
|
**kwargs)
|
218
218
|
|
219
219
|
model = plan_cuda.learner(x_train,
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
220
|
+
y_train,
|
221
|
+
optimizer=genetic_optimizer,
|
222
|
+
strategy='accuracy',
|
223
|
+
show_history=True,
|
224
|
+
gen=15,
|
225
|
+
batch_size=0.05,
|
226
|
+
interval=16.67)
|
226
227
|
```
|
227
228
|
|
228
|
-
x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
|
229
|
-
|
230
|
-
y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
|
231
|
-
|
232
229
|
strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
|
233
230
|
|
234
|
-
patience ((int, float), optional): patience value for adaptive strategies. For 'adaptive_accuracy' Default value: 5. For 'adaptive_loss' Default value: 0.150.
|
235
|
-
|
236
231
|
gen (int, optional): The generation count for genetic optimization.
|
237
232
|
|
238
|
-
batch_size (float, optional): Batch size is used in the prediction process to receive
|
233
|
+
batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
|
239
234
|
|
240
|
-
|
241
|
-
|
242
|
-
early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
|
235
|
+
early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
|
243
236
|
|
244
237
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
245
238
|
|
@@ -252,10 +245,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
252
245
|
target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
|
253
246
|
|
254
247
|
target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
|
255
|
-
|
256
|
-
except_this (list, optional): A list of activations to exclude from optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
|
257
|
-
|
258
|
-
only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
|
259
248
|
|
260
249
|
start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
261
250
|
|
@@ -274,20 +263,14 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
274
263
|
|
275
264
|
"""
|
276
265
|
|
277
|
-
|
266
|
+
from .planeat_cuda import define_genomes
|
278
267
|
|
279
|
-
|
268
|
+
data = 'Train'
|
269
|
+
|
270
|
+
activation_potentiation_len = len(all_activations())
|
280
271
|
|
281
272
|
y_train = optimize_labels(y_train, cuda=True)
|
282
273
|
|
283
|
-
if x_test is None and y_test is None:
|
284
|
-
x_test = x_train
|
285
|
-
y_test = y_train
|
286
|
-
data = 'Train'
|
287
|
-
else:
|
288
|
-
data = 'Test'
|
289
|
-
y_test = optimize_labels(y_test, cuda=True)
|
290
|
-
|
291
274
|
if memory == 'gpu':
|
292
275
|
x_train = transfer_to_gpu(x_train, dtype=dtype)
|
293
276
|
y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
|
@@ -310,17 +293,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
310
293
|
raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
|
311
294
|
|
312
295
|
if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
|
313
|
-
|
314
|
-
# Filter activation functions
|
315
|
-
if only_this is not None:
|
316
|
-
activation_potentiation = only_this
|
317
|
-
if except_this is not None:
|
318
|
-
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
319
|
-
if gen is None:
|
320
|
-
gen = len(activation_potentiation)
|
321
|
-
|
322
|
-
if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
323
|
-
if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
296
|
+
if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
|
324
297
|
|
325
298
|
# Initialize visualization components
|
326
299
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
@@ -335,29 +308,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
335
308
|
act_pop = []
|
336
309
|
weight_pop = []
|
337
310
|
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
weight_pop.append(start_this_W)
|
343
|
-
|
344
|
-
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
345
|
-
model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype, memory=memory)
|
346
|
-
|
347
|
-
if loss == 'categorical_crossentropy':
|
348
|
-
test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
349
|
-
else:
|
350
|
-
test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
351
|
-
|
352
|
-
best_acc = model[get_acc()]
|
353
|
-
best_loss = test_loss
|
354
|
-
|
311
|
+
best_acc = 0
|
312
|
+
best_f1 = 0
|
313
|
+
best_recall = 0
|
314
|
+
best_precision = 0
|
355
315
|
best_acc_per_gen_list = []
|
356
316
|
postfix_dict = {}
|
357
317
|
loss_list = []
|
358
318
|
target_pop = []
|
359
319
|
|
360
|
-
progress = initialize_loading_bar(total=
|
320
|
+
progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
|
321
|
+
|
322
|
+
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len)
|
323
|
+
|
324
|
+
if start_this_act is not None and start_this_W is not None:
|
325
|
+
weight_pop[0] = start_this_W
|
326
|
+
act_pop[0] = start_this_act
|
361
327
|
|
362
328
|
for i in range(gen):
|
363
329
|
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
@@ -367,22 +333,16 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
367
333
|
progress.last_print_n = 0
|
368
334
|
progress.update(0)
|
369
335
|
|
370
|
-
for j in range(
|
371
|
-
|
372
|
-
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
373
|
-
|
374
|
-
if i == 0:
|
375
|
-
act_pop.append(activation_potentiation[j])
|
376
|
-
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
377
|
-
weight_pop.append(W)
|
378
|
-
|
379
|
-
model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
|
336
|
+
for j in range(activation_potentiation_len):
|
380
337
|
|
338
|
+
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
339
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
|
381
340
|
acc = model[get_acc()]
|
341
|
+
|
382
342
|
if strategy == 'accuracy': target_pop.append(acc)
|
383
343
|
|
384
344
|
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
385
|
-
precision_score, recall_score, f1_score = metrics(
|
345
|
+
precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
|
386
346
|
|
387
347
|
if strategy == 'precision':
|
388
348
|
target_pop.append(precision_score)
|
@@ -424,22 +384,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
424
384
|
print(f", Current Activations={final_activations}", end='')
|
425
385
|
|
426
386
|
if loss == 'categorical_crossentropy':
|
427
|
-
|
387
|
+
train_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
428
388
|
else:
|
429
|
-
|
389
|
+
train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
430
390
|
|
431
391
|
if batch_size == 1:
|
432
|
-
postfix_dict[f"{data} Loss"] =
|
433
|
-
best_loss =
|
392
|
+
postfix_dict[f"{data} Loss"] = train_loss
|
393
|
+
best_loss = train_loss
|
434
394
|
else:
|
435
|
-
postfix_dict[f"{data} Batch Loss"] =
|
395
|
+
postfix_dict[f"{data} Batch Loss"] = train_loss
|
436
396
|
progress.set_postfix(postfix_dict)
|
437
|
-
best_loss =
|
397
|
+
best_loss = train_loss
|
438
398
|
|
439
399
|
# Update visualizations during training
|
440
400
|
if show_history:
|
441
401
|
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
442
|
-
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [
|
402
|
+
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
|
443
403
|
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
444
404
|
|
445
405
|
if neurons_history:
|
@@ -448,7 +408,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
448
408
|
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
449
409
|
y_train[0], viz_objects['neurons']['artists'],
|
450
410
|
data=data, fig1=viz_objects['neurons']['fig'],
|
451
|
-
acc=best_acc, loss=
|
411
|
+
acc=best_acc, loss=train_loss)
|
452
412
|
)
|
453
413
|
|
454
414
|
if neural_web_history:
|
@@ -473,13 +433,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
473
433
|
print('\nActivations: ', final_activations)
|
474
434
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
475
435
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
476
|
-
|
477
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
478
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
479
|
-
|
436
|
+
|
480
437
|
# Display final visualizations
|
481
438
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
482
|
-
|
439
|
+
train_loss, y_train, interval)
|
483
440
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
484
441
|
|
485
442
|
# Check target loss
|
@@ -498,13 +455,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
498
455
|
print('\nActivations: ', final_activations)
|
499
456
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
500
457
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
501
|
-
if data == 'Test':
|
502
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
503
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
504
458
|
|
505
459
|
# Display final visualizations
|
506
460
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
507
|
-
|
461
|
+
train_loss, y_train, interval)
|
508
462
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
509
463
|
|
510
464
|
progress.update(1)
|
@@ -512,7 +466,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
512
466
|
best_acc_per_gen_list.append(best_acc)
|
513
467
|
loss_list.append(best_loss)
|
514
468
|
|
515
|
-
weight_pop, act_pop = optimizer(
|
469
|
+
weight_pop, act_pop = optimizer(weight_pop, act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), bar_status=False)
|
516
470
|
target_pop = []
|
517
471
|
|
518
472
|
# Early stopping check
|
@@ -532,13 +486,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
532
486
|
print('\nActivations: ', final_activations)
|
533
487
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
534
488
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
535
|
-
if data == 'Test':
|
536
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
537
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
538
489
|
|
539
490
|
# Display final visualizations
|
540
491
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
541
|
-
|
492
|
+
train_loss, y_train, interval)
|
542
493
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
543
494
|
|
544
495
|
# Final evaluation
|
@@ -554,12 +505,9 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
554
505
|
print('\nActivations: ', final_activations)
|
555
506
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
556
507
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
557
|
-
if data == 'Test':
|
558
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
559
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
560
508
|
|
561
509
|
# Display final visualizations
|
562
|
-
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
510
|
+
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
|
563
511
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
564
512
|
|
565
513
|
|
pyerualjetwork/planeat.py
CHANGED
@@ -17,10 +17,10 @@ import random
|
|
17
17
|
import math
|
18
18
|
|
19
19
|
### LIBRARY IMPORTS ###
|
20
|
-
from
|
21
|
-
from
|
22
|
-
from
|
23
|
-
from
|
20
|
+
from plan import feed_forward
|
21
|
+
from data_operations import normalization
|
22
|
+
from ui import loading_bars, initialize_loading_bar
|
23
|
+
from activation_functions import apply_activation, all_activations
|
24
24
|
|
25
25
|
def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
|
26
26
|
"""
|
@@ -247,8 +247,6 @@ def evolver(weights,
|
|
247
247
|
|
248
248
|
else:
|
249
249
|
raise ValueError("strategy parameter must be: 'normal_selective' or 'more_selective' or 'less_selective'")
|
250
|
-
|
251
|
-
if policy == 'explorer': fitness_bias = 0
|
252
250
|
|
253
251
|
if ((activation_mutate_add_prob < 0 or activation_mutate_add_prob > 1) or
|
254
252
|
(activation_mutate_change_prob < 0 or activation_mutate_change_prob > 1) or
|
@@ -328,7 +326,7 @@ def evolver(weights,
|
|
328
326
|
cross_over_mode=cross_over_mode,
|
329
327
|
activation_selection_add_prob=activation_selection_add_prob,
|
330
328
|
activation_selection_change_prob=activation_selection_change_prob,
|
331
|
-
|
329
|
+
threshold=activation_selection_rate,
|
332
330
|
bad_genomes_selection_prob=bad_genomes_selection_prob,
|
333
331
|
first_parent_fitness=best_fitness,
|
334
332
|
fitness_bias=fitness_bias,
|
@@ -351,8 +349,7 @@ def evolver(weights,
|
|
351
349
|
activation_change_prob=activation_mutate_change_prob,
|
352
350
|
weight_mutate_prob=weight_mutate_prob,
|
353
351
|
threshold=weight_mutate_rate,
|
354
|
-
genome_fitness=normalized_fitness[i]
|
355
|
-
epsilon=epsilon
|
352
|
+
genome_fitness=normalized_fitness[i]
|
356
353
|
)
|
357
354
|
|
358
355
|
elif mutation_prob < bad_genomes_mutation_prob:
|
@@ -364,10 +361,9 @@ def evolver(weights,
|
|
364
361
|
activation_change_prob=activation_mutate_change_prob,
|
365
362
|
weight_mutate_prob=weight_mutate_prob,
|
366
363
|
threshold=weight_mutate_rate,
|
367
|
-
genome_fitness=normalized_fitness[i]
|
368
|
-
epsilon=epsilon
|
364
|
+
genome_fitness=normalized_fitness[i]
|
369
365
|
)
|
370
|
-
|
366
|
+
|
371
367
|
if bar_status: progress.update(1)
|
372
368
|
|
373
369
|
weights = np.vstack((bad_weights, good_weights))
|
@@ -397,11 +393,13 @@ def evolver(weights,
|
|
397
393
|
print(" FITNESS BIAS: ", str(fitness_bias))
|
398
394
|
print(" ACTIVATION SELECTION RATE (THRESHOLD VALUE FOR SINGLE CROSS OVER):", str(activation_selection_rate) + '\n')
|
399
395
|
|
396
|
+
|
400
397
|
print("*** Performance ***")
|
401
398
|
print(" MAX FITNESS: ", str(round(max(fitness), 2)))
|
402
399
|
print(" MEAN FITNESS: ", str(round(np.mean(fitness), 2)))
|
403
400
|
print(" MIN FITNESS: ", str(round(min(fitness), 2)) + '\n')
|
404
401
|
|
402
|
+
print(" BEST GENOME ACTIVATION LENGTH: ", str(len(activation_potentiations[-1])))
|
405
403
|
print(" BEST GENOME INDEX: ", str(len(weights)-1))
|
406
404
|
print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
|
407
405
|
|
@@ -487,7 +485,7 @@ def cross_over(first_parent_W,
|
|
487
485
|
cross_over_mode,
|
488
486
|
activation_selection_add_prob,
|
489
487
|
activation_selection_change_prob,
|
490
|
-
|
488
|
+
threshold,
|
491
489
|
bad_genomes_selection_prob,
|
492
490
|
first_parent_fitness,
|
493
491
|
second_parent_fitness,
|
@@ -516,7 +514,7 @@ def cross_over(first_parent_W,
|
|
516
514
|
activation_selection_change_prob (float): Probability of replacing an activation function in the child genome
|
517
515
|
with one from the second parent.
|
518
516
|
|
519
|
-
|
517
|
+
threshold (float): Determines how quickly activation functions are added or replaced
|
520
518
|
during the crossover process.
|
521
519
|
|
522
520
|
bad_genomes_selection_prob (float): Probability of selecting a "bad" genome for replacement with the offspring.
|
@@ -549,7 +547,7 @@ def cross_over(first_parent_W,
|
|
549
547
|
cross_over_mode='tpm',
|
550
548
|
activation_selection_add_prob=0.8,
|
551
549
|
activation_selection_change_prob=0.5,
|
552
|
-
|
550
|
+
threshold=2,
|
553
551
|
bad_genomes_selection_prob=0.7,
|
554
552
|
first_parent_fitness=0.9,
|
555
553
|
second_parent_fitness=0.85,
|
@@ -624,8 +622,8 @@ def cross_over(first_parent_W,
|
|
624
622
|
|
625
623
|
if potential_activation_selection_add > activation_selection_add_prob:
|
626
624
|
|
627
|
-
|
628
|
-
new_threshold =
|
625
|
+
threshold = threshold / succes
|
626
|
+
new_threshold = threshold
|
629
627
|
|
630
628
|
while True:
|
631
629
|
|
@@ -635,7 +633,7 @@ def cross_over(first_parent_W,
|
|
635
633
|
child_act.append(random_undominant_activation)
|
636
634
|
|
637
635
|
if len(dominant_parent_act) > new_threshold:
|
638
|
-
new_threshold +=
|
636
|
+
new_threshold += threshold
|
639
637
|
pass
|
640
638
|
|
641
639
|
else:
|
@@ -646,8 +644,8 @@ def cross_over(first_parent_W,
|
|
646
644
|
|
647
645
|
if potential_activation_selection_change_prob > activation_selection_change_prob:
|
648
646
|
|
649
|
-
|
650
|
-
new_threshold =
|
647
|
+
threshold = threshold / succes
|
648
|
+
new_threshold = threshold
|
651
649
|
|
652
650
|
while True:
|
653
651
|
|
@@ -658,7 +656,7 @@ def cross_over(first_parent_W,
|
|
658
656
|
child_act[random_index_dominant] = random_undominant_activation
|
659
657
|
|
660
658
|
if len(dominant_parent_act) > new_threshold:
|
661
|
-
new_threshold +=
|
659
|
+
new_threshold += threshold
|
662
660
|
pass
|
663
661
|
|
664
662
|
else:
|
@@ -674,8 +672,8 @@ def mutation(weight,
|
|
674
672
|
activation_change_prob,
|
675
673
|
weight_mutate_prob,
|
676
674
|
threshold,
|
677
|
-
genome_fitness
|
678
|
-
|
675
|
+
genome_fitness
|
676
|
+
):
|
679
677
|
"""
|
680
678
|
Performs mutation on the given weight matrix and activation functions.
|
681
679
|
- The weight matrix is mutated by randomly changing its values based on the mutation probability.
|
@@ -699,8 +697,6 @@ def mutation(weight,
|
|
699
697
|
threshold (float): If the value you enter here is equal to the result of input layer * output layer, only a single weight will be mutated during each mutation process. If the value you enter here is half of the result of input layer * output layer, two weights in the weight matrix will be mutated.
|
700
698
|
|
701
699
|
genome_fitness (float): Fitness value of genome
|
702
|
-
|
703
|
-
epsilon (float): Small epsilon constant
|
704
700
|
|
705
701
|
Returns:
|
706
702
|
tuple: A tuple containing:
|
@@ -730,7 +726,8 @@ def mutation(weight,
|
|
730
726
|
row_end = weight.shape[0]
|
731
727
|
col_end = weight.shape[1]
|
732
728
|
|
733
|
-
threshold = threshold *
|
729
|
+
threshold = threshold * genome_fitness
|
730
|
+
performance_control = 0
|
734
731
|
new_threshold = threshold
|
735
732
|
|
736
733
|
while True:
|
@@ -742,10 +739,14 @@ def mutation(weight,
|
|
742
739
|
|
743
740
|
if int(row_end * col_end) > new_threshold:
|
744
741
|
new_threshold += threshold
|
742
|
+
performance_control += 1
|
745
743
|
pass
|
746
744
|
|
747
745
|
else:
|
748
746
|
break
|
747
|
+
|
748
|
+
if performance_control >= int(row_end * col_end):
|
749
|
+
break
|
749
750
|
|
750
751
|
activation_mutate_prob = 1 - activation_mutate_prob
|
751
752
|
potential_activation_mutation = random.uniform(0, 1)
|
pyerualjetwork/planeat_cuda.py
CHANGED
@@ -248,8 +248,6 @@ def evolver(weights,
|
|
248
248
|
|
249
249
|
else:
|
250
250
|
raise ValueError("strategy parameter must be: 'normal_selective' or 'more_selective' or 'less_selective'")
|
251
|
-
|
252
|
-
if policy =='explorer': fitness_bias = 0
|
253
251
|
|
254
252
|
if ((activation_mutate_add_prob < 0 or activation_mutate_add_prob > 1) or
|
255
253
|
(activation_mutate_change_prob < 0 or activation_mutate_change_prob > 1) or
|
@@ -328,7 +326,7 @@ def evolver(weights,
|
|
328
326
|
cross_over_mode=cross_over_mode,
|
329
327
|
activation_selection_add_prob=activation_selection_add_prob,
|
330
328
|
activation_selection_change_prob=activation_selection_change_prob,
|
331
|
-
|
329
|
+
threshold=activation_selection_rate,
|
332
330
|
bad_genomes_selection_prob=bad_genomes_selection_prob,
|
333
331
|
first_parent_fitness=best_fitness,
|
334
332
|
fitness_bias=fitness_bias,
|
@@ -351,8 +349,7 @@ def evolver(weights,
|
|
351
349
|
activation_change_prob=activation_mutate_change_prob,
|
352
350
|
weight_mutate_prob=weight_mutate_prob,
|
353
351
|
threshold=weight_mutate_rate,
|
354
|
-
genome_fitness=normalized_fitness[i]
|
355
|
-
epsilon=epsilon
|
352
|
+
genome_fitness=normalized_fitness[i]
|
356
353
|
)
|
357
354
|
|
358
355
|
elif mutation_prob < bad_genomes_mutation_prob:
|
@@ -364,8 +361,7 @@ def evolver(weights,
|
|
364
361
|
activation_change_prob=activation_mutate_change_prob,
|
365
362
|
weight_mutate_prob=weight_mutate_prob,
|
366
363
|
threshold=weight_mutate_rate,
|
367
|
-
genome_fitness=normalized_fitness[i]
|
368
|
-
epsilon=epsilon
|
364
|
+
genome_fitness=normalized_fitness[i]
|
369
365
|
)
|
370
366
|
|
371
367
|
if bar_status: progress.update(1)
|
@@ -401,6 +397,7 @@ def evolver(weights,
|
|
401
397
|
print(" MEAN REWARD: ", str(cp.round(cp.mean(fitness), 2)))
|
402
398
|
print(" MIN REWARD: ", str(cp.round(min(fitness), 2)) + '\n')
|
403
399
|
|
400
|
+
print(" BEST GENOME ACTIVATION LENGTH: ", str(len(activation_potentiations)))
|
404
401
|
print(" BEST GENOME INDEX: ", str(len(weights)-1))
|
405
402
|
print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
|
406
403
|
|
@@ -490,7 +487,7 @@ def cross_over(first_parent_W,
|
|
490
487
|
cross_over_mode,
|
491
488
|
activation_selection_add_prob,
|
492
489
|
activation_selection_change_prob,
|
493
|
-
|
490
|
+
threshold,
|
494
491
|
bad_genomes_selection_prob,
|
495
492
|
first_parent_fitness,
|
496
493
|
second_parent_fitness,
|
@@ -519,7 +516,7 @@ def cross_over(first_parent_W,
|
|
519
516
|
activation_selection_change_prob (float): Probability of replacing an activation function in the child genome
|
520
517
|
with one from the second parent.
|
521
518
|
|
522
|
-
|
519
|
+
threshold (float): Determines how quickly activation functions are added or replaced
|
523
520
|
during the crossover process.
|
524
521
|
|
525
522
|
bad_genomes_selection_prob (float): Probability of selecting a "bad" genome for replacement with the offspring.
|
@@ -552,7 +549,7 @@ def cross_over(first_parent_W,
|
|
552
549
|
cross_over_mode='tpm',
|
553
550
|
activation_selection_add_prob=0.8,
|
554
551
|
activation_selection_change_prob=0.5,
|
555
|
-
|
552
|
+
threshold=2,
|
556
553
|
bad_genomes_selection_prob=0.7,
|
557
554
|
first_parent_fitness=0.9,
|
558
555
|
second_parent_fitness=0.85,
|
@@ -628,8 +625,8 @@ def cross_over(first_parent_W,
|
|
628
625
|
|
629
626
|
if potential_activation_selection_add > activation_selection_add_prob:
|
630
627
|
|
631
|
-
|
632
|
-
new_threshold =
|
628
|
+
threshold = threshold / succes
|
629
|
+
new_threshold = threshold
|
633
630
|
|
634
631
|
while True:
|
635
632
|
|
@@ -639,7 +636,7 @@ def cross_over(first_parent_W,
|
|
639
636
|
child_act.append(random_undominant_activation)
|
640
637
|
|
641
638
|
if len(dominant_parent_act) > new_threshold:
|
642
|
-
new_threshold +=
|
639
|
+
new_threshold += threshold
|
643
640
|
pass
|
644
641
|
|
645
642
|
else:
|
@@ -650,8 +647,8 @@ def cross_over(first_parent_W,
|
|
650
647
|
|
651
648
|
if potential_activation_selection_change_prob > activation_selection_change_prob:
|
652
649
|
|
653
|
-
|
654
|
-
new_threshold =
|
650
|
+
threshold = threshold / succes
|
651
|
+
new_threshold = threshold
|
655
652
|
|
656
653
|
while True:
|
657
654
|
|
@@ -662,7 +659,7 @@ def cross_over(first_parent_W,
|
|
662
659
|
child_act[random_index_dominant] = random_undominant_activation
|
663
660
|
|
664
661
|
if len(dominant_parent_act) > new_threshold:
|
665
|
-
new_threshold +=
|
662
|
+
new_threshold += threshold
|
666
663
|
pass
|
667
664
|
|
668
665
|
else:
|
@@ -679,8 +676,7 @@ def mutation(weight,
|
|
679
676
|
activation_change_prob,
|
680
677
|
weight_mutate_prob,
|
681
678
|
threshold,
|
682
|
-
genome_fitness
|
683
|
-
epsilon):
|
679
|
+
genome_fitness):
|
684
680
|
"""
|
685
681
|
Performs mutation on the given weight matrix and activation functions.
|
686
682
|
- The weight matrix is mutated by randomly changing its values based on the mutation probability.
|
@@ -705,7 +701,6 @@ def mutation(weight,
|
|
705
701
|
|
706
702
|
genome_fitness (float): Fitness value of genome
|
707
703
|
|
708
|
-
epsilon (float): Small epsilon constant
|
709
704
|
|
710
705
|
Returns:
|
711
706
|
tuple: A tuple containing:
|
@@ -735,9 +730,10 @@ def mutation(weight,
|
|
735
730
|
row_end = weight.shape[0]
|
736
731
|
col_end = weight.shape[1]
|
737
732
|
|
738
|
-
threshold = threshold *
|
733
|
+
threshold = threshold * genome_fitness
|
739
734
|
new_threshold = threshold
|
740
|
-
|
735
|
+
performance_control = 0
|
736
|
+
|
741
737
|
while True:
|
742
738
|
|
743
739
|
selected_row = int(random.uniform(start, row_end))
|
@@ -747,10 +743,14 @@ def mutation(weight,
|
|
747
743
|
|
748
744
|
if int(row_end * col_end) > new_threshold:
|
749
745
|
new_threshold += threshold
|
746
|
+
performance_control += 1
|
750
747
|
pass
|
751
748
|
|
752
749
|
else:
|
753
750
|
break
|
751
|
+
|
752
|
+
if performance_control >= int(row_end * col_end):
|
753
|
+
break
|
754
754
|
|
755
755
|
activation_mutate_prob = 1 - activation_mutate_prob
|
756
756
|
potential_activation_mutation = random.uniform(0, 1)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.2.
|
3
|
+
Version: 4.2.2b0
|
4
4
|
Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,4 +1,4 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=zrYDqgxtudYD39hQyng05sCwRHDnFsW-I4nuTVisZ0E,641
|
2
2
|
pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
|
3
3
|
pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
|
4
4
|
pyerualjetwork/data_operations.py,sha256=pb5CqJ0Th6fCjTNMCtqQMiwH3KezTxAijacglsKUxmY,14730
|
@@ -6,19 +6,19 @@ pyerualjetwork/data_operations_cuda.py,sha256=UpoJoFhIwTU4xg9dVuLAxLAT4CkRaGsxvt
|
|
6
6
|
pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
|
7
7
|
pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
8
8
|
pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
9
|
-
pyerualjetwork/memory_operations.py,sha256=
|
9
|
+
pyerualjetwork/memory_operations.py,sha256=apcbFH87nQQTjAG6U9Hp7THrT8qW-6VCTKCnlzNq61E,13467
|
10
10
|
pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
|
11
11
|
pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
|
12
|
-
pyerualjetwork/model_operations.py,sha256=
|
12
|
+
pyerualjetwork/model_operations.py,sha256=RKqnh7-MByFosxqme4q4jC1lOndX26O-OVXYV6ZxoEE,12965
|
13
13
|
pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
|
14
|
-
pyerualjetwork/plan.py,sha256=
|
15
|
-
pyerualjetwork/plan_cuda.py,sha256=
|
16
|
-
pyerualjetwork/planeat.py,sha256=
|
17
|
-
pyerualjetwork/planeat_cuda.py,sha256=
|
14
|
+
pyerualjetwork/plan.py,sha256=mACNJnHlPkeqsHDYw0BVDXp8PCOlwWsF-1KD4a2Bk7U,30431
|
15
|
+
pyerualjetwork/plan_cuda.py,sha256=G-VbJVnli8b85R5aUTz356CJRpmEO_7RzO_LkY-TdsY,32043
|
16
|
+
pyerualjetwork/planeat.py,sha256=yLPQXO754lBv48f1MXdGodP93KstryeRG3HuUaRwj6g,40753
|
17
|
+
pyerualjetwork/planeat_cuda.py,sha256=9uopmM-gTZpSb0EOExrOZPT8FF5BqDdEfCX0zYQb9QU,40712
|
18
18
|
pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
|
19
19
|
pyerualjetwork/visualizations.py,sha256=QaYSIyVkJZ8NqpBKArQKkI1y37nCQo_KIM98IMssnRc,28766
|
20
20
|
pyerualjetwork/visualizations_cuda.py,sha256=F60vQ92AXlMgBka3InXnOtGoM25vQJAlBIU2AlYTwks,29200
|
21
|
-
pyerualjetwork-4.2.
|
22
|
-
pyerualjetwork-4.2.
|
23
|
-
pyerualjetwork-4.2.
|
24
|
-
pyerualjetwork-4.2.
|
21
|
+
pyerualjetwork-4.2.2b0.dist-info/METADATA,sha256=vB8iEJ4bSHzCc1dK4p1C-1RLp0Ap8dDJbVNuaelwDkE,7914
|
22
|
+
pyerualjetwork-4.2.2b0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
23
|
+
pyerualjetwork-4.2.2b0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
24
|
+
pyerualjetwork-4.2.2b0.dist-info/RECORD,,
|
File without changes
|
File without changes
|