pyerualjetwork 5.52__py3-none-any.whl → 5.54__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +7 -4
- pyerualjetwork/nn.py +227 -136
- {pyerualjetwork-5.52.dist-info → pyerualjetwork-5.54.dist-info}/METADATA +1 -1
- {pyerualjetwork-5.52.dist-info → pyerualjetwork-5.54.dist-info}/RECORD +6 -6
- {pyerualjetwork-5.52.dist-info → pyerualjetwork-5.54.dist-info}/WHEEL +0 -0
- {pyerualjetwork-5.52.dist-info → pyerualjetwork-5.54.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -42,7 +42,7 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
42
42
|
- Contact: tchasancan@gmail.com
|
43
43
|
"""
|
44
44
|
|
45
|
-
__version__ = "5.
|
45
|
+
__version__ = "5.54"
|
46
46
|
__update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
|
47
47
|
* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
|
48
48
|
* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
@@ -54,8 +54,11 @@ def print_version(version):
|
|
54
54
|
def print_update_notes(update):
|
55
55
|
print(f"Notes:\n{update}")
|
56
56
|
|
57
|
-
print_version(__version__)
|
58
|
-
print_update_notes(__update__)
|
57
|
+
#print_version(__version__)
|
58
|
+
#print_update_notes(__update__)
|
59
|
+
|
60
|
+
import warnings
|
61
|
+
warnings.filterwarnings('ignore')
|
59
62
|
|
60
63
|
required_modules = ["scipy", "tqdm", "pandas", "networkx", "seaborn", "numpy", "matplotlib", "colorama", "cupy", "psutil"]
|
61
64
|
|
@@ -71,7 +74,7 @@ if missing_modules:
|
|
71
74
|
f"Missing modules detected: {', '.join(missing_modules)}\n"
|
72
75
|
"Please run the following command to install the missing packages:\n\n"
|
73
76
|
f" pip install {' '.join(missing_modules)}\n\n"
|
74
|
-
"NOTE: needed numpy version --> numpy==1.26.4 and if you have cuda toolkit version 12 you need to install with pip --> pip install cupy-cuda12x. If you have not cuda gpu or toolkit and you want a CPU training doesnt matter. You need to install random cupy version. This is NECESSARY module."
|
77
|
+
"NOTE: needed numpy version --> numpy==1.26.4 and if you have cuda toolkit version 12 you need to install with pip --> pip install cupy-cuda12x. If you have not cuda gpu or toolkit and you want to use a CPU for training doesnt matter. You need to install random cupy version. This is NECESSARY module."
|
75
78
|
"For more information, visit the PyerualJetwork GitHub README.md file:\n"
|
76
79
|
"https://github.com/HCB06/PyerualJetwork/blob/main/README.md"
|
77
80
|
|
pyerualjetwork/nn.py
CHANGED
@@ -51,6 +51,8 @@ import numpy as np
|
|
51
51
|
import cupy as cp
|
52
52
|
import copy
|
53
53
|
import random
|
54
|
+
import multiprocessing
|
55
|
+
from multiprocessing import Pool, cpu_count
|
54
56
|
|
55
57
|
### LIBRARY IMPORTS ###
|
56
58
|
from .ui import loading_bars, initialize_loading_bar
|
@@ -125,12 +127,13 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
125
127
|
weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False,
|
126
128
|
neurons_history=False, early_stop=False, show_history=False, target_loss=None,
|
127
129
|
interval=33.33, target_acc=None, loss='categorical_crossentropy', acc_impact=0.9, loss_impact=0.1,
|
128
|
-
start_this_act=None, start_this_W=None, neurons=[], activation_functions=[],
|
130
|
+
start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], parallel_training=False,
|
131
|
+
thread_count=cpu_count(), decision_boundary_history=False, cuda=False, dtype=np.float32):
|
129
132
|
"""
|
130
133
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
131
134
|
the most accurate combination of activation potentiation(or activation function) & weight values for the labeled classificaiton dataset.
|
132
135
|
|
133
|
-
Why genetic optimization
|
136
|
+
Why genetic optimization FBN(Fitness Biased NeuroEvolution) and not backpropagation?
|
134
137
|
Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
|
135
138
|
Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: ENE.
|
136
139
|
|
@@ -191,7 +194,11 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
191
194
|
:param decision_boundary_history: (bool, optional): At the end of the training, the decision boundary history is shown in animation form. Note: If you are sure of your memory, set it to True. Default: False
|
192
195
|
:param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP or PTNN. if neurons is not [] --> uses default: ['linear'] * len(neurons). if neurons is [] --> uses [].
|
193
196
|
:param dtype: (numpy.dtype): Data type for the Weight matrices. np.float32 by default. Example: np.float64 or np.float16.
|
197
|
+
:param parallel_training: (bool, optional): Parallel training process ? Default = False.
|
198
|
+
- Recommended for large populations (pop_size) to significantly speed up training.
|
199
|
+
:param thread_count: (int, optional): If parallel_training is True you can give max thread number for parallel training. Default: (automaticly selects maximum thread count of your system).
|
194
200
|
:param cuda: (bool, optional): CUDA GPU acceleration ? Default = False.
|
201
|
+
- Recommended for large neural net architectures or big datasets to significantly speed up training.
|
195
202
|
|
196
203
|
Returns:
|
197
204
|
tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]].
|
@@ -359,156 +366,240 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
359
366
|
progress.last_print_n = 0
|
360
367
|
progress.update(0)
|
361
368
|
|
362
|
-
|
363
|
-
|
364
|
-
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
369
|
+
if parallel_training:
|
365
370
|
|
366
|
-
|
367
|
-
|
368
|
-
pass
|
369
|
-
else:
|
370
|
-
|
371
|
-
act_pop[j] = activations[j]
|
372
|
-
W = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], cuda=cuda, auto_normalization=auto_normalization, dtype=dtype)
|
373
|
-
weight_pop[j] = W
|
371
|
+
eval_params = []
|
372
|
+
fit_params = []
|
374
373
|
|
374
|
+
with Pool(processes=thread_count) as pool:
|
375
375
|
|
376
|
-
|
377
|
-
weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], cuda=cuda, auto_normalization=auto_normalization, dtype=dtype)
|
376
|
+
batches = pool.starmap(batcher, [(x_train, y_train)] * pop_size)
|
378
377
|
|
378
|
+
eval_params = [
|
379
|
+
(
|
380
|
+
x_b, y_b, None, model_type, weight_pop[j], act_pop[j],
|
381
|
+
activation_potentiations[j], auto_normalization, None, cuda
|
382
|
+
)
|
383
|
+
for j, (x_b, y_b) in enumerate(batches)
|
384
|
+
]
|
379
385
|
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
y_pred_batch=model[get_preds_softmax()])
|
386
|
-
else:
|
387
|
-
train_loss = binary_crossentropy(y_true_batch=y_train_batch,
|
388
|
-
y_pred_batch=model[get_preds_softmax()])
|
386
|
+
if (model_type == 'PLAN' and weight_evolve is False) or (model_type == 'PLAN' and fit_start is True and i == 0):
|
387
|
+
fit_params = [
|
388
|
+
(x_b, y_b, act_pop[j], weight_pop[j], auto_normalization, cuda, dtype)
|
389
|
+
for j, (x_b, y_b) in enumerate(batches)
|
390
|
+
]
|
389
391
|
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
if fitness >= best_fitness:
|
394
|
-
|
395
|
-
best_fitness = fitness
|
396
|
-
best_acc = acc
|
397
|
-
best_loss = train_loss
|
398
|
-
best_weight = np.copy(weight_pop[j]) if model_type == 'PLAN' else copy.deepcopy(weight_pop[j])
|
399
|
-
best_model = model
|
400
|
-
|
401
|
-
if isinstance(act_pop[j], list) and model_type == 'PLAN':
|
402
|
-
final_activations = act_pop[j].copy()
|
403
|
-
elif isinstance(act_pop[j], str):
|
404
|
-
final_activations = act_pop[j]
|
405
|
-
else:
|
406
|
-
final_activations = copy.deepcopy(act_pop[j])
|
407
|
-
|
408
|
-
if model_type == 'PLAN': final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
409
|
-
|
410
|
-
if batch_size == 1:
|
411
|
-
postfix_dict[f"{data} Accuracy"] = format_number(best_acc)
|
412
|
-
postfix_dict[f"{data} Loss"] = format_number(train_loss)
|
413
|
-
progress.set_postfix(postfix_dict)
|
414
|
-
|
415
|
-
if show_current_activations:
|
416
|
-
print(f", Current Activations={final_activations}", end='')
|
417
|
-
|
418
|
-
# Update visualizations during training
|
419
|
-
if show_history:
|
420
|
-
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
421
|
-
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
|
422
|
-
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
423
|
-
|
424
|
-
if neurons_history:
|
425
|
-
viz_objects['neurons']['artists'] = (
|
426
|
-
update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
|
427
|
-
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
428
|
-
y_train[0], viz_objects['neurons']['artists'],
|
429
|
-
data=data, fig1=viz_objects['neurons']['fig'],
|
430
|
-
acc=best_acc, loss=train_loss)
|
431
|
-
)
|
392
|
+
if start_this_act is not None and i == 0:
|
393
|
+
w_copy = copy.deepcopy(weight_pop[0])
|
432
394
|
|
433
|
-
|
434
|
-
|
435
|
-
G=viz_objects['web']['G'], return_objs=True)
|
436
|
-
art5_list = [art5_1] + [art5_2] + list(art5_3.values())
|
437
|
-
viz_objects['web']['artists'].append(art5_list)
|
438
|
-
|
439
|
-
# Check target accuracy
|
440
|
-
if target_acc is not None and best_acc >= target_acc:
|
441
|
-
progress.close()
|
442
|
-
train_model = evaluate(x_train, y_train, W=best_weight,
|
443
|
-
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
444
|
-
if loss == 'categorical_crossentropy':
|
445
|
-
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
446
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
447
|
-
else:
|
448
|
-
train_loss = binary_crossentropy(y_true_batch=y_train,
|
449
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
450
|
-
|
451
|
-
print('\nActivations: ', final_activations)
|
452
|
-
print('Activation Potentiation: ', activation_potentiation)
|
453
|
-
print('Train Accuracy:', train_model[get_acc()])
|
454
|
-
print('Train Loss: ', train_loss, '\n')
|
455
|
-
print('Model Type:', model_type)
|
456
|
-
|
457
|
-
if decision_boundary_history: display_decision_boundary_history(fig, artist, interval=interval)
|
395
|
+
W = pool.starmap(plan_fit, fit_params)
|
396
|
+
new_weights = W
|
458
397
|
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
predictions=best_model[get_preds()],
|
464
|
-
accuracy=best_acc,
|
465
|
-
activations=final_activations,
|
466
|
-
softmax_predictions=best_model[get_preds_softmax()],
|
467
|
-
model_type=model_type,
|
468
|
-
activation_potentiation=activation_potentiation)
|
398
|
+
eval_params = [
|
399
|
+
(*ep[:4], w, *ep[5:])
|
400
|
+
for ep, w in zip(eval_params, new_weights)
|
401
|
+
]
|
469
402
|
|
470
|
-
|
403
|
+
if start_this_act is not None and i == 0:
|
404
|
+
eval_params[0] = (
|
405
|
+
eval_params[0][0], eval_params[0][1], eval_params[0][2], eval_params[0][3],
|
406
|
+
w_copy, eval_params[0][5], eval_params[0][6], eval_params[0][7],
|
407
|
+
eval_params[0][8], eval_params[0][9]
|
408
|
+
)
|
409
|
+
|
410
|
+
models = pool.starmap(evaluate, eval_params)
|
411
|
+
|
412
|
+
y_preds = [model[get_preds_softmax()] for model in models]
|
413
|
+
loss_params = [(eval_params[f][1], y_preds[f]) for f in range(pop_size)]
|
414
|
+
loss_func = categorical_crossentropy if loss == 'categorical_crossentropy' else binary_crossentropy
|
415
|
+
losses = pool.starmap(loss_func, loss_params)
|
416
|
+
fitness_params = [(models[f][get_acc()], losses[f], acc_impact, loss_impact) for f in range(pop_size)]
|
417
|
+
target_pop = pool.starmap(wals, fitness_params)
|
418
|
+
|
419
|
+
if cuda:
|
420
|
+
target_pop = [fit.get() for fit in target_pop]
|
471
421
|
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
422
|
+
best_idx = np.argmax(target_pop)
|
423
|
+
best_fitness = target_pop[best_idx]
|
424
|
+
best_weight = models[best_idx][0]
|
425
|
+
best_loss = losses[best_idx]
|
426
|
+
best_acc = models[best_idx][get_acc()]
|
427
|
+
|
428
|
+
x_train_batch = eval_params[best_idx][0]
|
429
|
+
y_train_batch = eval_params[best_idx][1]
|
430
|
+
|
431
|
+
if isinstance(eval_params[best_idx][5], list) and model_type == 'PLAN':
|
432
|
+
final_activations = eval_params[best_idx][5].copy()
|
433
|
+
elif isinstance(eval_params[best_idx][5], str):
|
434
|
+
final_activations = eval_params[best_idx][5]
|
435
|
+
else:
|
436
|
+
final_activations = copy.deepcopy(eval_params[best_idx][5])
|
437
|
+
|
438
|
+
if model_type == 'PLAN': final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
484
439
|
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
print('Model Type:', model_type)
|
440
|
+
if batch_size == 1:
|
441
|
+
postfix_dict[f"{data} Accuracy"] = format_number(best_acc)
|
442
|
+
postfix_dict[f"{data} Loss"] = format_number(best_loss)
|
443
|
+
progress.set_postfix(postfix_dict)
|
490
444
|
|
491
|
-
|
445
|
+
if show_current_activations:
|
446
|
+
print(f", Current Activations={final_activations}", end='')
|
492
447
|
|
493
|
-
|
494
|
-
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
|
495
|
-
train_loss, y_train, interval)
|
496
|
-
|
497
|
-
model = template_model._replace(weights=best_weight,
|
498
|
-
predictions=best_model[get_preds()],
|
499
|
-
accuracy=best_acc,
|
500
|
-
activations=final_activations,
|
501
|
-
softmax_predictions=best_model[get_preds_softmax()],
|
502
|
-
model_type=model_type,
|
503
|
-
activation_potentiation=activation_potentiation)
|
504
|
-
|
505
|
-
return model
|
448
|
+
progress.update(1)
|
506
449
|
|
450
|
+
else:
|
451
|
+
|
452
|
+
for j in range(pop_size):
|
453
|
+
|
454
|
+
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
455
|
+
|
456
|
+
if fit_start is True and i == 0:
|
457
|
+
if start_this_act is not None and j == 0:
|
458
|
+
pass
|
459
|
+
else:
|
507
460
|
|
508
|
-
|
461
|
+
act_pop[j] = activations[j]
|
462
|
+
W = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], cuda=cuda, auto_normalization=auto_normalization, dtype=dtype)
|
463
|
+
weight_pop[j] = W
|
464
|
+
|
465
|
+
|
466
|
+
if weight_evolve is False:
|
467
|
+
weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], cuda=cuda, auto_normalization=auto_normalization, dtype=dtype)
|
468
|
+
|
469
|
+
|
470
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], activation_potentiations=activation_potentiations[j], auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
471
|
+
acc = model[get_acc()]
|
472
|
+
|
473
|
+
if loss == 'categorical_crossentropy':
|
474
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train_batch,
|
475
|
+
y_pred_batch=model[get_preds_softmax()])
|
476
|
+
else:
|
477
|
+
train_loss = binary_crossentropy(y_true_batch=y_train_batch,
|
478
|
+
y_pred_batch=model[get_preds_softmax()])
|
479
|
+
|
480
|
+
fitness = wals(acc, train_loss, acc_impact, loss_impact)
|
481
|
+
target_pop.append(fitness if not cuda else fitness.get())
|
482
|
+
|
483
|
+
if fitness >= best_fitness:
|
484
|
+
|
485
|
+
best_fitness = fitness
|
486
|
+
best_acc = acc
|
487
|
+
best_loss = train_loss
|
488
|
+
best_weight = np.copy(weight_pop[j]) if model_type == 'PLAN' else copy.deepcopy(weight_pop[j])
|
489
|
+
best_model = model
|
490
|
+
|
491
|
+
if isinstance(act_pop[j], list) and model_type == 'PLAN':
|
492
|
+
final_activations = act_pop[j].copy()
|
493
|
+
elif isinstance(act_pop[j], str):
|
494
|
+
final_activations = act_pop[j]
|
495
|
+
else:
|
496
|
+
final_activations = copy.deepcopy(act_pop[j])
|
497
|
+
|
498
|
+
if model_type == 'PLAN': final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
499
|
+
|
500
|
+
if batch_size == 1:
|
501
|
+
postfix_dict[f"{data} Accuracy"] = format_number(best_acc)
|
502
|
+
postfix_dict[f"{data} Loss"] = format_number(train_loss)
|
503
|
+
progress.set_postfix(postfix_dict)
|
504
|
+
|
505
|
+
if show_current_activations:
|
506
|
+
print(f", Current Activations={final_activations}", end='')
|
507
|
+
|
508
|
+
# Check target accuracy
|
509
|
+
if target_acc is not None and best_acc >= target_acc:
|
510
|
+
progress.close()
|
511
|
+
train_model = evaluate(x_train, y_train, W=best_weight,
|
512
|
+
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
513
|
+
if loss == 'categorical_crossentropy':
|
514
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
515
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
516
|
+
else:
|
517
|
+
train_loss = binary_crossentropy(y_true_batch=y_train,
|
518
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
519
|
+
|
520
|
+
print('\nActivations: ', final_activations)
|
521
|
+
print('Activation Potentiation: ', activation_potentiation)
|
522
|
+
print('Train Accuracy:', train_model[get_acc()])
|
523
|
+
print('Train Loss: ', train_loss, '\n')
|
524
|
+
print('Model Type:', model_type)
|
525
|
+
|
526
|
+
if decision_boundary_history: display_decision_boundary_history(fig, artist, interval=interval)
|
527
|
+
|
528
|
+
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
|
529
|
+
best_loss, y_train, interval)
|
530
|
+
|
531
|
+
model = template_model._replace(weights=best_weight,
|
532
|
+
predictions=best_model[get_preds()],
|
533
|
+
accuracy=best_acc,
|
534
|
+
activations=final_activations,
|
535
|
+
softmax_predictions=best_model[get_preds_softmax()],
|
536
|
+
model_type=model_type,
|
537
|
+
activation_potentiation=activation_potentiation)
|
538
|
+
|
539
|
+
return model
|
540
|
+
|
541
|
+
# Check target loss
|
542
|
+
if target_loss is not None and best_loss <= target_loss:
|
543
|
+
progress.close()
|
544
|
+
train_model = evaluate(x_train, y_train, W=best_weight,
|
545
|
+
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
546
|
+
|
547
|
+
if loss == 'categorical_crossentropy':
|
548
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
549
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
550
|
+
else:
|
551
|
+
train_loss = binary_crossentropy(y_true_batch=y_train,
|
552
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
553
|
+
|
554
|
+
print('\nActivations: ', final_activations)
|
555
|
+
print('Activation Potentiation: ', activation_potentiation)
|
556
|
+
print('Train Accuracy:', train_model[get_acc()])
|
557
|
+
print('Train Loss: ', train_loss, '\n')
|
558
|
+
print('Model Type:', model_type)
|
559
|
+
|
560
|
+
if decision_boundary_history: display_decision_boundary_history(fig, artist, interval=interval)
|
561
|
+
|
562
|
+
# Display final visualizations
|
563
|
+
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
|
564
|
+
train_loss, y_train, interval)
|
565
|
+
|
566
|
+
model = template_model._replace(weights=best_weight,
|
567
|
+
predictions=best_model[get_preds()],
|
568
|
+
accuracy=best_acc,
|
569
|
+
activations=final_activations,
|
570
|
+
softmax_predictions=best_model[get_preds_softmax()],
|
571
|
+
model_type=model_type,
|
572
|
+
activation_potentiation=activation_potentiation)
|
573
|
+
|
574
|
+
return model
|
575
|
+
|
576
|
+
|
577
|
+
progress.update(1)
|
578
|
+
|
579
|
+
# Update visualizations during training
|
580
|
+
if show_history:
|
581
|
+
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
582
|
+
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
|
583
|
+
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
584
|
+
|
585
|
+
if neurons_history:
|
586
|
+
viz_objects['neurons']['artists'] = (
|
587
|
+
update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
|
588
|
+
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
589
|
+
y_train[0], viz_objects['neurons']['artists'],
|
590
|
+
data=data, fig1=viz_objects['neurons']['fig'],
|
591
|
+
acc=best_acc, loss=train_loss)
|
592
|
+
)
|
593
|
+
|
594
|
+
if neural_web_history:
|
595
|
+
art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
|
596
|
+
G=viz_objects['web']['G'], return_objs=True)
|
597
|
+
art5_list = [art5_1] + [art5_2] + list(art5_3.values())
|
598
|
+
viz_objects['web']['artists'].append(art5_list)
|
599
|
+
|
509
600
|
|
510
601
|
if decision_boundary_history:
|
511
|
-
if i == 0:
|
602
|
+
if i == 0:
|
512
603
|
fig, ax = create_decision_boundary_hist()
|
513
604
|
artist = []
|
514
605
|
artist = plot_decision_boundary(x_train_batch, y_train_batch, activations=act_pop[np.argmax(target_pop)], W=weight_pop[np.argmax(target_pop)], cuda=cuda, model_type=model_type, ax=ax, artist=artist)
|
@@ -517,7 +608,7 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
517
608
|
train_model = evaluate(x_train, y_train, W=best_weight, activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
518
609
|
|
519
610
|
if loss == 'categorical_crossentropy':
|
520
|
-
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
611
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
521
612
|
y_pred_batch=train_model[get_preds_softmax()])
|
522
613
|
else:
|
523
614
|
train_loss = binary_crossentropy(y_true_batch=y_train,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 5.
|
3
|
+
Version: 5.54
|
4
4
|
Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,11 +1,11 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=ZiTcxYl47_8ru1CJcJTjK9izr4zpY-u4bSMVnuEZTwY,3087
|
2
2
|
pyerualjetwork/ene.py,sha256=luTvspHRTose6s3uRas40pNXyKoxU9siaHiMBNI5yoc,42136
|
3
3
|
pyerualjetwork/fitness_functions.py,sha256=D9JVCr9DFid_xXgBD4uCKxdW2k10MVDE5HZRSOK4Igg,1237
|
4
4
|
pyerualjetwork/help.py,sha256=sn9jBzXkQsTZvdgsUXUpSs_BbYYIgY3whofg6dj8peI,848
|
5
5
|
pyerualjetwork/issue_solver.py,sha256=uay_9XK6xWnLmK2P_BeyDQlyNXzg_zYffnXYd228wZk,4102
|
6
6
|
pyerualjetwork/memory_ops.py,sha256=TUFh9SYWCKL6N-vNdWId_EwU313TuZomQCHOrltrD-4,14280
|
7
7
|
pyerualjetwork/model_ops.py,sha256=WaP1XwKqXMfZl4Yop8a1Bg0xtmLYgap9JFOWHaLr7S4,25143
|
8
|
-
pyerualjetwork/nn.py,sha256=
|
8
|
+
pyerualjetwork/nn.py,sha256=WSolklnr3Rx4WadCDqo59IPFOpjdmZVjF9zHlgv-SXg,41405
|
9
9
|
pyerualjetwork/old_cpu_model_ops.py,sha256=1KNgjUeYCO_TsA5RtbNiuIiBJzq8-rL2dE6jxKqCBU0,21481
|
10
10
|
pyerualjetwork/old_cuda_model_ops.py,sha256=KAscAd8e_I8Vqdd9BJaHd6-IG6fhxFglAFxys0sqmEo,23079
|
11
11
|
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
@@ -21,7 +21,7 @@ pyerualjetwork/cuda/data_ops.py,sha256=BEXh4M7BWXaTpYlVS9D2i3CGgOmL5131vy7FZyuTQ
|
|
21
21
|
pyerualjetwork/cuda/loss_functions.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
22
22
|
pyerualjetwork/cuda/metrics.py,sha256=PjDBoRvr6va8vRvDIJJGBO4-I4uumrk3NCM1Vz4NJTo,5054
|
23
23
|
pyerualjetwork/cuda/visualizations.py,sha256=2mHE7iqqsN3K6xtCnemS4o_YWGS0bIV2IxF4cG6Ur9k,20090
|
24
|
-
pyerualjetwork-5.
|
25
|
-
pyerualjetwork-5.
|
26
|
-
pyerualjetwork-5.
|
27
|
-
pyerualjetwork-5.
|
24
|
+
pyerualjetwork-5.54.dist-info/METADATA,sha256=HI6CKiT5d1urRW8Y_hIt2G4EajdUTq_6AaTaB1B4Z3A,7988
|
25
|
+
pyerualjetwork-5.54.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
26
|
+
pyerualjetwork-5.54.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
27
|
+
pyerualjetwork-5.54.dist-info/RECORD,,
|
File without changes
|
File without changes
|