pyerualjetwork 5.54a2__py3-none-any.whl → 5.54a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/nn.py +76 -28
- {pyerualjetwork-5.54a2.dist-info → pyerualjetwork-5.54a4.dist-info}/METADATA +1 -1
- {pyerualjetwork-5.54a2.dist-info → pyerualjetwork-5.54a4.dist-info}/RECORD +6 -6
- {pyerualjetwork-5.54a2.dist-info → pyerualjetwork-5.54a4.dist-info}/WHEEL +0 -0
- {pyerualjetwork-5.54a2.dist-info → pyerualjetwork-5.54a4.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -42,7 +42,7 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
42
42
|
- Contact: tchasancan@gmail.com
|
43
43
|
"""
|
44
44
|
|
45
|
-
__version__ = "5.
|
45
|
+
__version__ = "5.54a4"
|
46
46
|
__update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
|
47
47
|
* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
|
48
48
|
* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
pyerualjetwork/nn.py
CHANGED
@@ -364,30 +364,56 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
364
364
|
|
365
365
|
if parallel_training:
|
366
366
|
|
367
|
-
|
368
|
-
|
369
|
-
for j in range(pop_size)
|
370
|
-
]
|
367
|
+
eval_params = []
|
368
|
+
fit_params = []
|
371
369
|
|
372
370
|
with Pool(processes=cpu_count()) as pool:
|
373
|
-
models = pool.starmap(evaluate, params)
|
374
371
|
|
375
|
-
|
376
|
-
losses = []
|
372
|
+
batches = pool.starmap(batcher, [(x_train, y_train)] * pop_size)
|
377
373
|
|
378
|
-
|
374
|
+
eval_params = [
|
375
|
+
(
|
376
|
+
x_b, y_b, None, model_type, weight_pop[j], act_pop[j],
|
377
|
+
activation_potentiations[j], auto_normalization, None, cuda
|
378
|
+
)
|
379
|
+
for j, (x_b, y_b) in enumerate(batches)
|
380
|
+
]
|
379
381
|
|
380
|
-
if
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
y_pred_batch=models[f][get_preds_softmax()])
|
386
|
-
|
387
|
-
losses.append(train_loss)
|
382
|
+
if (model_type == 'PLAN' and weight_evolve is False) or (model_type == 'PLAN' and fit_start is True and i == 0):
|
383
|
+
fit_params = [
|
384
|
+
(x_b, y_b, act_pop[j], weight_pop[j], auto_normalization, cuda, dtype)
|
385
|
+
for j, (x_b, y_b) in enumerate(batches)
|
386
|
+
]
|
388
387
|
|
389
|
-
|
390
|
-
|
388
|
+
if start_this_act is not None and i == 0:
|
389
|
+
w_copy = copy.deepcopy(weight_pop[0])
|
390
|
+
|
391
|
+
W = pool.starmap(plan_fit, fit_params)
|
392
|
+
new_weights = W
|
393
|
+
|
394
|
+
eval_params = [
|
395
|
+
(*ep[:4], w, *ep[5:])
|
396
|
+
for ep, w in zip(eval_params, new_weights)
|
397
|
+
]
|
398
|
+
|
399
|
+
if start_this_act is not None and i == 0:
|
400
|
+
eval_params[0] = (
|
401
|
+
eval_params[0][0], eval_params[0][1], eval_params[0][2], eval_params[0][3],
|
402
|
+
w_copy, eval_params[0][5], eval_params[0][6], eval_params[0][7],
|
403
|
+
eval_params[0][8], eval_params[0][9]
|
404
|
+
)
|
405
|
+
|
406
|
+
models = pool.starmap(evaluate, eval_params)
|
407
|
+
|
408
|
+
y_preds = [model[get_preds_softmax()] for model in models]
|
409
|
+
loss_params = [(eval_params[f][1], y_preds[f]) for f in range(pop_size)]
|
410
|
+
loss_func = categorical_crossentropy if loss == 'categorical_crossentropy' else binary_crossentropy
|
411
|
+
losses = pool.starmap(loss_func, loss_params)
|
412
|
+
fitness_params = [(models[f][get_acc()], losses[f], acc_impact, loss_impact) for f in range(pop_size)]
|
413
|
+
target_pop = pool.starmap(wals, fitness_params)
|
414
|
+
|
415
|
+
if cuda:
|
416
|
+
target_pop = [fit.get() for fit in target_pop]
|
391
417
|
|
392
418
|
best_idx = np.argmax(target_pop)
|
393
419
|
best_fitness = target_pop[best_idx]
|
@@ -395,25 +421,47 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
395
421
|
best_loss = losses[best_idx]
|
396
422
|
best_acc = models[best_idx][get_acc()]
|
397
423
|
|
398
|
-
x_train_batch =
|
399
|
-
y_train_batch =
|
424
|
+
x_train_batch = eval_params[best_idx][0]
|
425
|
+
y_train_batch = eval_params[best_idx][1]
|
400
426
|
|
401
|
-
if isinstance(
|
402
|
-
final_activations =
|
403
|
-
elif isinstance(
|
404
|
-
final_activations =
|
427
|
+
if isinstance(eval_params[best_idx][5], list) and model_type == 'PLAN':
|
428
|
+
final_activations = eval_params[best_idx][5].copy()
|
429
|
+
elif isinstance(eval_params[best_idx][5], str):
|
430
|
+
final_activations = eval_params[best_idx][5]
|
405
431
|
else:
|
406
|
-
final_activations = copy.deepcopy(
|
432
|
+
final_activations = copy.deepcopy(eval_params[best_idx][5])
|
407
433
|
|
408
434
|
if model_type == 'PLAN': final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
409
435
|
|
410
|
-
losses = []
|
411
|
-
|
412
436
|
if batch_size == 1:
|
413
437
|
postfix_dict[f"{data} Accuracy"] = format_number(best_acc)
|
414
438
|
postfix_dict[f"{data} Loss"] = format_number(best_loss)
|
415
439
|
progress.set_postfix(postfix_dict)
|
416
440
|
|
441
|
+
if show_current_activations:
|
442
|
+
print(f", Current Activations={final_activations}", end='')
|
443
|
+
|
444
|
+
# Update visualizations during training
|
445
|
+
if show_history:
|
446
|
+
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
447
|
+
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
|
448
|
+
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
449
|
+
|
450
|
+
if neurons_history:
|
451
|
+
viz_objects['neurons']['artists'] = (
|
452
|
+
update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
|
453
|
+
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
454
|
+
y_train[0], viz_objects['neurons']['artists'],
|
455
|
+
data=data, fig1=viz_objects['neurons']['fig'],
|
456
|
+
acc=best_acc, loss=train_loss)
|
457
|
+
)
|
458
|
+
|
459
|
+
if neural_web_history:
|
460
|
+
art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
|
461
|
+
G=viz_objects['web']['G'], return_objs=True)
|
462
|
+
art5_list = [art5_1] + [art5_2] + list(art5_3.values())
|
463
|
+
viz_objects['web']['artists'].append(art5_list)
|
464
|
+
|
417
465
|
progress.update(1)
|
418
466
|
|
419
467
|
else:
|
@@ -576,7 +624,7 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
576
624
|
train_model = evaluate(x_train, y_train, W=best_weight, activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
577
625
|
|
578
626
|
if loss == 'categorical_crossentropy':
|
579
|
-
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
627
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
580
628
|
y_pred_batch=train_model[get_preds_softmax()])
|
581
629
|
else:
|
582
630
|
train_loss = binary_crossentropy(y_true_batch=y_train,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 5.
|
3
|
+
Version: 5.54a4
|
4
4
|
Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,11 +1,11 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=d7tzdeWje0FkOPL_9la8YzHcPrBoROMM9kZgLt26X-w,3089
|
2
2
|
pyerualjetwork/ene.py,sha256=luTvspHRTose6s3uRas40pNXyKoxU9siaHiMBNI5yoc,42136
|
3
3
|
pyerualjetwork/fitness_functions.py,sha256=D9JVCr9DFid_xXgBD4uCKxdW2k10MVDE5HZRSOK4Igg,1237
|
4
4
|
pyerualjetwork/help.py,sha256=sn9jBzXkQsTZvdgsUXUpSs_BbYYIgY3whofg6dj8peI,848
|
5
5
|
pyerualjetwork/issue_solver.py,sha256=uay_9XK6xWnLmK2P_BeyDQlyNXzg_zYffnXYd228wZk,4102
|
6
6
|
pyerualjetwork/memory_ops.py,sha256=TUFh9SYWCKL6N-vNdWId_EwU313TuZomQCHOrltrD-4,14280
|
7
7
|
pyerualjetwork/model_ops.py,sha256=WaP1XwKqXMfZl4Yop8a1Bg0xtmLYgap9JFOWHaLr7S4,25143
|
8
|
-
pyerualjetwork/nn.py,sha256=
|
8
|
+
pyerualjetwork/nn.py,sha256=EpYN3jCtYNfNCCPX0BN8JaL42JrkxLiopQkL6_j1Hio,42513
|
9
9
|
pyerualjetwork/old_cpu_model_ops.py,sha256=1KNgjUeYCO_TsA5RtbNiuIiBJzq8-rL2dE6jxKqCBU0,21481
|
10
10
|
pyerualjetwork/old_cuda_model_ops.py,sha256=KAscAd8e_I8Vqdd9BJaHd6-IG6fhxFglAFxys0sqmEo,23079
|
11
11
|
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
@@ -21,7 +21,7 @@ pyerualjetwork/cuda/data_ops.py,sha256=BEXh4M7BWXaTpYlVS9D2i3CGgOmL5131vy7FZyuTQ
|
|
21
21
|
pyerualjetwork/cuda/loss_functions.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
22
22
|
pyerualjetwork/cuda/metrics.py,sha256=PjDBoRvr6va8vRvDIJJGBO4-I4uumrk3NCM1Vz4NJTo,5054
|
23
23
|
pyerualjetwork/cuda/visualizations.py,sha256=2mHE7iqqsN3K6xtCnemS4o_YWGS0bIV2IxF4cG6Ur9k,20090
|
24
|
-
pyerualjetwork-5.
|
25
|
-
pyerualjetwork-5.
|
26
|
-
pyerualjetwork-5.
|
27
|
-
pyerualjetwork-5.
|
24
|
+
pyerualjetwork-5.54a4.dist-info/METADATA,sha256=lgXzJZWTOmS-29lcI2QENAxRR-zXkP4GTVt8VdvzG7A,7990
|
25
|
+
pyerualjetwork-5.54a4.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
26
|
+
pyerualjetwork-5.54a4.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
27
|
+
pyerualjetwork-5.54a4.dist-info/RECORD,,
|
File without changes
|
File without changes
|