pyerualjetwork 5.54a3__py3-none-any.whl → 5.55__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/nn.py +88 -56
- {pyerualjetwork-5.54a3.dist-info → pyerualjetwork-5.55.dist-info}/METADATA +3 -3
- {pyerualjetwork-5.54a3.dist-info → pyerualjetwork-5.55.dist-info}/RECORD +6 -6
- {pyerualjetwork-5.54a3.dist-info → pyerualjetwork-5.55.dist-info}/WHEEL +0 -0
- {pyerualjetwork-5.54a3.dist-info → pyerualjetwork-5.55.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -42,7 +42,7 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
42
42
|
- Contact: tchasancan@gmail.com
|
43
43
|
"""
|
44
44
|
|
45
|
-
__version__ = "5.
|
45
|
+
__version__ = "5.55"
|
46
46
|
__update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
|
47
47
|
* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
|
48
48
|
* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
pyerualjetwork/nn.py
CHANGED
@@ -23,7 +23,7 @@ Currently, 3 types of models can be trained:
|
|
23
23
|
PTNN (Potentiation Transfer Neural Network) -- With non-bias
|
24
24
|
* Training Time for Small Projects: fast
|
25
25
|
* Training Time for Big Projects: fast
|
26
|
-
* Explainability:
|
26
|
+
* Explainability: medium
|
27
27
|
* Learning Capacity: high
|
28
28
|
|
29
29
|
Read learn function docstring for know how to use of these model architectures.
|
@@ -100,7 +100,7 @@ def plan_fit(
|
|
100
100
|
|
101
101
|
from .cpu.data_ops import normalization
|
102
102
|
if not cuda:
|
103
|
-
from cpu.activation_functions import apply_activation
|
103
|
+
from .cpu.activation_functions import apply_activation
|
104
104
|
array_type = np
|
105
105
|
|
106
106
|
else:
|
@@ -127,12 +127,13 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
127
127
|
weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False,
|
128
128
|
neurons_history=False, early_stop=False, show_history=False, target_loss=None,
|
129
129
|
interval=33.33, target_acc=None, loss='categorical_crossentropy', acc_impact=0.9, loss_impact=0.1,
|
130
|
-
start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], parallel_training=
|
130
|
+
start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], parallel_training=False,
|
131
|
+
thread_count=cpu_count(), decision_boundary_history=False, cuda=False, dtype=np.float32):
|
131
132
|
"""
|
132
133
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
133
134
|
the most accurate combination of activation potentiation(or activation function) & weight values for the labeled classificaiton dataset.
|
134
135
|
|
135
|
-
Why genetic optimization
|
136
|
+
Why genetic optimization FBN(Fitness Biased NeuroEvolution) and not backpropagation?
|
136
137
|
Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
|
137
138
|
Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: ENE.
|
138
139
|
|
@@ -193,8 +194,11 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
193
194
|
:param decision_boundary_history: (bool, optional): At the end of the training, the decision boundary history is shown in animation form. Note: If you are sure of your memory, set it to True. Default: False
|
194
195
|
:param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP or PTNN. if neurons is not [] --> uses default: ['linear'] * len(neurons). if neurons is [] --> uses [].
|
195
196
|
:param dtype: (numpy.dtype): Data type for the Weight matrices. np.float32 by default. Example: np.float64 or np.float16.
|
196
|
-
:param parallel_training: (bool, optional): Parallel training process ?
|
197
|
+
:param parallel_training: (bool, optional): Parallel training process ? Default = False.
|
198
|
+
- Recommended for large populations (pop_size) to significantly speed up training.
|
199
|
+
:param thread_count: (int, optional): If parallel_training is True you can give max thread number for parallel training. Default: (automaticly selects maximum thread count of your system).
|
197
200
|
:param cuda: (bool, optional): CUDA GPU acceleration ? Default = False.
|
201
|
+
- Recommended for large neural net architectures or big datasets to significantly speed up training.
|
198
202
|
|
199
203
|
Returns:
|
200
204
|
tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]].
|
@@ -364,30 +368,56 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
364
368
|
|
365
369
|
if parallel_training:
|
366
370
|
|
367
|
-
|
368
|
-
|
369
|
-
for j in range(pop_size)
|
370
|
-
]
|
371
|
+
eval_params = []
|
372
|
+
fit_params = []
|
371
373
|
|
372
|
-
with Pool(processes=
|
373
|
-
models = pool.starmap(evaluate, params)
|
374
|
+
with Pool(processes=thread_count) as pool:
|
374
375
|
|
375
|
-
|
376
|
-
losses = []
|
376
|
+
batches = pool.starmap(batcher, [(x_train, y_train)] * pop_size)
|
377
377
|
|
378
|
-
|
378
|
+
eval_params = [
|
379
|
+
(
|
380
|
+
x_b, y_b, None, model_type, weight_pop[j], act_pop[j],
|
381
|
+
activation_potentiations[j], auto_normalization, None, cuda
|
382
|
+
)
|
383
|
+
for j, (x_b, y_b) in enumerate(batches)
|
384
|
+
]
|
379
385
|
|
380
|
-
if
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
y_pred_batch=models[f][get_preds_softmax()])
|
386
|
-
|
387
|
-
losses.append(train_loss)
|
386
|
+
if (model_type == 'PLAN' and weight_evolve is False) or (model_type == 'PLAN' and fit_start is True and i == 0):
|
387
|
+
fit_params = [
|
388
|
+
(x_b, y_b, act_pop[j], weight_pop[j], auto_normalization, cuda, dtype)
|
389
|
+
for j, (x_b, y_b) in enumerate(batches)
|
390
|
+
]
|
388
391
|
|
389
|
-
|
390
|
-
|
392
|
+
if start_this_act is not None and i == 0:
|
393
|
+
w_copy = copy.deepcopy(weight_pop[0])
|
394
|
+
|
395
|
+
W = pool.starmap(plan_fit, fit_params)
|
396
|
+
new_weights = W
|
397
|
+
|
398
|
+
eval_params = [
|
399
|
+
(*ep[:4], w, *ep[5:])
|
400
|
+
for ep, w in zip(eval_params, new_weights)
|
401
|
+
]
|
402
|
+
|
403
|
+
if start_this_act is not None and i == 0:
|
404
|
+
eval_params[0] = (
|
405
|
+
eval_params[0][0], eval_params[0][1], eval_params[0][2], eval_params[0][3],
|
406
|
+
w_copy, eval_params[0][5], eval_params[0][6], eval_params[0][7],
|
407
|
+
eval_params[0][8], eval_params[0][9]
|
408
|
+
)
|
409
|
+
|
410
|
+
models = pool.starmap(evaluate, eval_params)
|
411
|
+
|
412
|
+
y_preds = [model[get_preds_softmax()] for model in models]
|
413
|
+
loss_params = [(eval_params[f][1], y_preds[f]) for f in range(pop_size)]
|
414
|
+
loss_func = categorical_crossentropy if loss == 'categorical_crossentropy' else binary_crossentropy
|
415
|
+
losses = pool.starmap(loss_func, loss_params)
|
416
|
+
fitness_params = [(models[f][get_acc()], losses[f], acc_impact, loss_impact) for f in range(pop_size)]
|
417
|
+
target_pop = pool.starmap(wals, fitness_params)
|
418
|
+
|
419
|
+
if cuda:
|
420
|
+
target_pop = [fit.get() for fit in target_pop]
|
391
421
|
|
392
422
|
best_idx = np.argmax(target_pop)
|
393
423
|
best_fitness = target_pop[best_idx]
|
@@ -395,25 +425,26 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
395
425
|
best_loss = losses[best_idx]
|
396
426
|
best_acc = models[best_idx][get_acc()]
|
397
427
|
|
398
|
-
x_train_batch =
|
399
|
-
y_train_batch =
|
428
|
+
x_train_batch = eval_params[best_idx][0]
|
429
|
+
y_train_batch = eval_params[best_idx][1]
|
400
430
|
|
401
|
-
if isinstance(
|
402
|
-
final_activations =
|
403
|
-
elif isinstance(
|
404
|
-
final_activations =
|
431
|
+
if isinstance(eval_params[best_idx][5], list) and model_type == 'PLAN':
|
432
|
+
final_activations = eval_params[best_idx][5].copy()
|
433
|
+
elif isinstance(eval_params[best_idx][5], str):
|
434
|
+
final_activations = eval_params[best_idx][5]
|
405
435
|
else:
|
406
|
-
final_activations = copy.deepcopy(
|
436
|
+
final_activations = copy.deepcopy(eval_params[best_idx][5])
|
407
437
|
|
408
438
|
if model_type == 'PLAN': final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
409
439
|
|
410
|
-
losses = []
|
411
|
-
|
412
440
|
if batch_size == 1:
|
413
441
|
postfix_dict[f"{data} Accuracy"] = format_number(best_acc)
|
414
442
|
postfix_dict[f"{data} Loss"] = format_number(best_loss)
|
415
443
|
progress.set_postfix(postfix_dict)
|
416
444
|
|
445
|
+
if show_current_activations:
|
446
|
+
print(f", Current Activations={final_activations}", end='')
|
447
|
+
|
417
448
|
progress.update(1)
|
418
449
|
|
419
450
|
else:
|
@@ -474,27 +505,6 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
474
505
|
if show_current_activations:
|
475
506
|
print(f", Current Activations={final_activations}", end='')
|
476
507
|
|
477
|
-
# Update visualizations during training
|
478
|
-
if show_history:
|
479
|
-
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
480
|
-
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
|
481
|
-
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
482
|
-
|
483
|
-
if neurons_history:
|
484
|
-
viz_objects['neurons']['artists'] = (
|
485
|
-
update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
|
486
|
-
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
487
|
-
y_train[0], viz_objects['neurons']['artists'],
|
488
|
-
data=data, fig1=viz_objects['neurons']['fig'],
|
489
|
-
acc=best_acc, loss=train_loss)
|
490
|
-
)
|
491
|
-
|
492
|
-
if neural_web_history:
|
493
|
-
art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
|
494
|
-
G=viz_objects['web']['G'], return_objs=True)
|
495
|
-
art5_list = [art5_1] + [art5_2] + list(art5_3.values())
|
496
|
-
viz_objects['web']['artists'].append(art5_list)
|
497
|
-
|
498
508
|
# Check target accuracy
|
499
509
|
if target_acc is not None and best_acc >= target_acc:
|
500
510
|
progress.close()
|
@@ -563,9 +573,31 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
563
573
|
|
564
574
|
return model
|
565
575
|
|
566
|
-
|
576
|
+
|
567
577
|
progress.update(1)
|
568
578
|
|
579
|
+
# Update visualizations during training
|
580
|
+
if show_history:
|
581
|
+
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
582
|
+
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
|
583
|
+
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
584
|
+
|
585
|
+
if neurons_history:
|
586
|
+
viz_objects['neurons']['artists'] = (
|
587
|
+
update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
|
588
|
+
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
589
|
+
y_train[0], viz_objects['neurons']['artists'],
|
590
|
+
data=data, fig1=viz_objects['neurons']['fig'],
|
591
|
+
acc=best_acc, loss=train_loss)
|
592
|
+
)
|
593
|
+
|
594
|
+
if neural_web_history:
|
595
|
+
art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
|
596
|
+
G=viz_objects['web']['G'], return_objs=True)
|
597
|
+
art5_list = [art5_1] + [art5_2] + list(art5_3.values())
|
598
|
+
viz_objects['web']['artists'].append(art5_list)
|
599
|
+
|
600
|
+
|
569
601
|
if decision_boundary_history:
|
570
602
|
if i == 0:
|
571
603
|
fig, ax = create_decision_boundary_hist()
|
@@ -576,7 +608,7 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
576
608
|
train_model = evaluate(x_train, y_train, W=best_weight, activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
577
609
|
|
578
610
|
if loss == 'categorical_crossentropy':
|
579
|
-
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
611
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
580
612
|
y_pred_batch=train_model[get_preds_softmax()])
|
581
613
|
else:
|
582
614
|
train_loss = binary_crossentropy(y_true_batch=y_train,
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 5.
|
4
|
-
Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
|
3
|
+
Version: 5.55
|
4
|
+
Summary: PyereualJetwork is a GPU-accelerated + Parallel Threading Supported machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
7
7
|
Keywords: model evaluation,classification,potentiation learning artificial neural networks,NEAT,genetic algorithms,reinforcement learning,neural networks
|
@@ -61,7 +61,7 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
|
|
61
61
|
|
62
62
|
ABOUT PYERUALJETWORK:
|
63
63
|
|
64
|
-
PyereualJetwork is a large wide GPU-accelerated machine learning library in Python designed for professionals and researchers.
|
64
|
+
PyereualJetwork is a large wide GPU-accelerated + Parallel Threading Supported machine learning library in Python designed for professionals and researchers.
|
65
65
|
It features PLAN, MLP Deep Learning and PTNN training, as well as ENE (Eugenic NeuroEvolution) for genetic optimization,
|
66
66
|
which can also be applied to genetic algorithms or Reinforcement Learning (RL) problems.
|
67
67
|
The library includes functions for data pre-processing, visualizations, model saving and loading, prediction and evaluation,
|
@@ -1,11 +1,11 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=j2PJ8ap2XNId9eE4qknm8J4A7HrmCfw_r499HGMYSI4,3087
|
2
2
|
pyerualjetwork/ene.py,sha256=luTvspHRTose6s3uRas40pNXyKoxU9siaHiMBNI5yoc,42136
|
3
3
|
pyerualjetwork/fitness_functions.py,sha256=D9JVCr9DFid_xXgBD4uCKxdW2k10MVDE5HZRSOK4Igg,1237
|
4
4
|
pyerualjetwork/help.py,sha256=sn9jBzXkQsTZvdgsUXUpSs_BbYYIgY3whofg6dj8peI,848
|
5
5
|
pyerualjetwork/issue_solver.py,sha256=uay_9XK6xWnLmK2P_BeyDQlyNXzg_zYffnXYd228wZk,4102
|
6
6
|
pyerualjetwork/memory_ops.py,sha256=TUFh9SYWCKL6N-vNdWId_EwU313TuZomQCHOrltrD-4,14280
|
7
7
|
pyerualjetwork/model_ops.py,sha256=WaP1XwKqXMfZl4Yop8a1Bg0xtmLYgap9JFOWHaLr7S4,25143
|
8
|
-
pyerualjetwork/nn.py,sha256=
|
8
|
+
pyerualjetwork/nn.py,sha256=nA26datlq7QMPm9BgpneivLlV2xHKzAMK4toYZeBChA,41409
|
9
9
|
pyerualjetwork/old_cpu_model_ops.py,sha256=1KNgjUeYCO_TsA5RtbNiuIiBJzq8-rL2dE6jxKqCBU0,21481
|
10
10
|
pyerualjetwork/old_cuda_model_ops.py,sha256=KAscAd8e_I8Vqdd9BJaHd6-IG6fhxFglAFxys0sqmEo,23079
|
11
11
|
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
@@ -21,7 +21,7 @@ pyerualjetwork/cuda/data_ops.py,sha256=BEXh4M7BWXaTpYlVS9D2i3CGgOmL5131vy7FZyuTQ
|
|
21
21
|
pyerualjetwork/cuda/loss_functions.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
22
22
|
pyerualjetwork/cuda/metrics.py,sha256=PjDBoRvr6va8vRvDIJJGBO4-I4uumrk3NCM1Vz4NJTo,5054
|
23
23
|
pyerualjetwork/cuda/visualizations.py,sha256=2mHE7iqqsN3K6xtCnemS4o_YWGS0bIV2IxF4cG6Ur9k,20090
|
24
|
-
pyerualjetwork-5.
|
25
|
-
pyerualjetwork-5.
|
26
|
-
pyerualjetwork-5.
|
27
|
-
pyerualjetwork-5.
|
24
|
+
pyerualjetwork-5.55.dist-info/METADATA,sha256=Y_ya6IN7QEh2EAPa34LalZccl-Gsiesh2JK-p3V28WE,8050
|
25
|
+
pyerualjetwork-5.55.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
26
|
+
pyerualjetwork-5.55.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
27
|
+
pyerualjetwork-5.55.dist-info/RECORD,,
|
File without changes
|
File without changes
|