pyerualjetwork 5.53__py3-none-any.whl → 5.54a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +2 -2
- pyerualjetwork/nn.py +194 -135
- {pyerualjetwork-5.53.dist-info → pyerualjetwork-5.54a1.dist-info}/METADATA +1 -1
- {pyerualjetwork-5.53.dist-info → pyerualjetwork-5.54a1.dist-info}/RECORD +6 -6
- {pyerualjetwork-5.53.dist-info → pyerualjetwork-5.54a1.dist-info}/WHEEL +0 -0
- {pyerualjetwork-5.53.dist-info → pyerualjetwork-5.54a1.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -42,7 +42,7 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
42
42
|
- Contact: tchasancan@gmail.com
|
43
43
|
"""
|
44
44
|
|
45
|
-
__version__ = "5.
|
45
|
+
__version__ = "5.54a1"
|
46
46
|
__update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
|
47
47
|
* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
|
48
48
|
* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
@@ -74,7 +74,7 @@ if missing_modules:
|
|
74
74
|
f"Missing modules detected: {', '.join(missing_modules)}\n"
|
75
75
|
"Please run the following command to install the missing packages:\n\n"
|
76
76
|
f" pip install {' '.join(missing_modules)}\n\n"
|
77
|
-
"NOTE: needed numpy version --> numpy==1.26.4 and if you have cuda toolkit version 12 you need to install with pip --> pip install cupy-cuda12x. If you have not cuda gpu or toolkit and you want a CPU training doesnt matter. You need to install random cupy version. This is NECESSARY module."
|
77
|
+
"NOTE: needed numpy version --> numpy==1.26.4 and if you have cuda toolkit version 12 you need to install with pip --> pip install cupy-cuda12x. If you have not cuda gpu or toolkit and you want to use a CPU for training doesnt matter. You need to install random cupy version. This is NECESSARY module."
|
78
78
|
"For more information, visit the PyerualJetwork GitHub README.md file:\n"
|
79
79
|
"https://github.com/HCB06/PyerualJetwork/blob/main/README.md"
|
80
80
|
|
pyerualjetwork/nn.py
CHANGED
@@ -51,6 +51,8 @@ import numpy as np
|
|
51
51
|
import cupy as cp
|
52
52
|
import copy
|
53
53
|
import random
|
54
|
+
import multiprocessing
|
55
|
+
from multiprocessing import Pool, cpu_count
|
54
56
|
|
55
57
|
### LIBRARY IMPORTS ###
|
56
58
|
from .ui import loading_bars, initialize_loading_bar
|
@@ -125,7 +127,7 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
125
127
|
weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False,
|
126
128
|
neurons_history=False, early_stop=False, show_history=False, target_loss=None,
|
127
129
|
interval=33.33, target_acc=None, loss='categorical_crossentropy', acc_impact=0.9, loss_impact=0.1,
|
128
|
-
start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], decision_boundary_history=False, cuda=False, dtype=np.float32):
|
130
|
+
start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], parallel_training=True, decision_boundary_history=False, cuda=False, dtype=np.float32):
|
129
131
|
"""
|
130
132
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
131
133
|
the most accurate combination of activation potentiation(or activation function) & weight values for the labeled classificaiton dataset.
|
@@ -191,6 +193,7 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
191
193
|
:param decision_boundary_history: (bool, optional): At the end of the training, the decision boundary history is shown in animation form. Note: If you are sure of your memory, set it to True. Default: False
|
192
194
|
:param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP or PTNN. if neurons is not [] --> uses default: ['linear'] * len(neurons). if neurons is [] --> uses [].
|
193
195
|
:param dtype: (numpy.dtype): Data type for the Weight matrices. np.float32 by default. Example: np.float64 or np.float16.
|
196
|
+
:param parallel_training: (bool, optional): Parallel training process ? (automaticly selects maximum thread count of your system) Default = True.
|
194
197
|
:param cuda: (bool, optional): CUDA GPU acceleration ? Default = False.
|
195
198
|
|
196
199
|
Returns:
|
@@ -359,156 +362,212 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
359
362
|
progress.last_print_n = 0
|
360
363
|
progress.update(0)
|
361
364
|
|
362
|
-
|
363
|
-
|
364
|
-
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
365
|
+
if parallel_training:
|
365
366
|
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
weight_pop[j] = W
|
367
|
+
params = [
|
368
|
+
(*batcher(x_train, y_train), None, model_type, weight_pop[j], act_pop[j], activation_potentiations[j], auto_normalization, None, cuda)
|
369
|
+
for j in range(pop_size)
|
370
|
+
]
|
371
|
+
|
372
|
+
with Pool(processes=cpu_count()) as pool:
|
373
|
+
models = pool.map(evaluate, params)
|
374
374
|
|
375
|
+
target_pop = []
|
376
|
+
losses = []
|
375
377
|
|
376
|
-
|
377
|
-
weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], cuda=cuda, auto_normalization=auto_normalization, dtype=dtype)
|
378
|
+
for f in range(pop_size):
|
378
379
|
|
380
|
+
if loss == 'categorical_crossentropy':
|
381
|
+
train_loss = categorical_crossentropy(y_true_batch=params[f][1],
|
382
|
+
y_pred_batch=models[f][get_preds_softmax()])
|
383
|
+
else:
|
384
|
+
train_loss = binary_crossentropy(y_true_batch=params[f][1],
|
385
|
+
y_pred_batch=models[f][get_preds_softmax()])
|
386
|
+
|
387
|
+
losses.append(train_loss)
|
379
388
|
|
380
|
-
|
381
|
-
|
389
|
+
fitness = wals(models[f][get_acc()], train_loss, acc_impact, loss_impact)
|
390
|
+
target_pop.append(fitness if not cuda else fitness.get())
|
382
391
|
|
383
|
-
|
384
|
-
|
385
|
-
|
392
|
+
best_idx = np.argmax(target_pop)
|
393
|
+
best_fitness = target_pop[best_idx]
|
394
|
+
best_weight = models[best_idx][0]
|
395
|
+
best_loss = losses[best_idx]
|
396
|
+
best_acc = models[best_idx][get_acc()]
|
397
|
+
|
398
|
+
x_train_batch = params[best_idx][0]
|
399
|
+
y_train_batch = params[best_idx][1]
|
400
|
+
|
401
|
+
if isinstance(params[best_idx][5], list) and model_type == 'PLAN':
|
402
|
+
final_activations = params[best_idx][5].copy()
|
403
|
+
elif isinstance(params[best_idx][5], str):
|
404
|
+
final_activations = params[best_idx][5]
|
386
405
|
else:
|
387
|
-
|
388
|
-
|
406
|
+
final_activations = copy.deepcopy(params[best_idx][5])
|
407
|
+
|
408
|
+
if model_type == 'PLAN': final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
389
409
|
|
390
|
-
|
391
|
-
target_pop.append(fitness if not cuda else fitness.get())
|
392
|
-
|
393
|
-
if fitness >= best_fitness:
|
394
|
-
|
395
|
-
best_fitness = fitness
|
396
|
-
best_acc = acc
|
397
|
-
best_loss = train_loss
|
398
|
-
best_weight = np.copy(weight_pop[j]) if model_type == 'PLAN' else copy.deepcopy(weight_pop[j])
|
399
|
-
best_model = model
|
400
|
-
|
401
|
-
if isinstance(act_pop[j], list) and model_type == 'PLAN':
|
402
|
-
final_activations = act_pop[j].copy()
|
403
|
-
elif isinstance(act_pop[j], str):
|
404
|
-
final_activations = act_pop[j]
|
405
|
-
else:
|
406
|
-
final_activations = copy.deepcopy(act_pop[j])
|
407
|
-
|
408
|
-
if model_type == 'PLAN': final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
409
|
-
|
410
|
-
if batch_size == 1:
|
411
|
-
postfix_dict[f"{data} Accuracy"] = format_number(best_acc)
|
412
|
-
postfix_dict[f"{data} Loss"] = format_number(train_loss)
|
413
|
-
progress.set_postfix(postfix_dict)
|
414
|
-
|
415
|
-
if show_current_activations:
|
416
|
-
print(f", Current Activations={final_activations}", end='')
|
417
|
-
|
418
|
-
# Update visualizations during training
|
419
|
-
if show_history:
|
420
|
-
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
421
|
-
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
|
422
|
-
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
423
|
-
|
424
|
-
if neurons_history:
|
425
|
-
viz_objects['neurons']['artists'] = (
|
426
|
-
update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
|
427
|
-
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
428
|
-
y_train[0], viz_objects['neurons']['artists'],
|
429
|
-
data=data, fig1=viz_objects['neurons']['fig'],
|
430
|
-
acc=best_acc, loss=train_loss)
|
431
|
-
)
|
432
|
-
|
433
|
-
if neural_web_history:
|
434
|
-
art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
|
435
|
-
G=viz_objects['web']['G'], return_objs=True)
|
436
|
-
art5_list = [art5_1] + [art5_2] + list(art5_3.values())
|
437
|
-
viz_objects['web']['artists'].append(art5_list)
|
438
|
-
|
439
|
-
# Check target accuracy
|
440
|
-
if target_acc is not None and best_acc >= target_acc:
|
441
|
-
progress.close()
|
442
|
-
train_model = evaluate(x_train, y_train, W=best_weight,
|
443
|
-
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
444
|
-
if loss == 'categorical_crossentropy':
|
445
|
-
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
446
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
447
|
-
else:
|
448
|
-
train_loss = binary_crossentropy(y_true_batch=y_train,
|
449
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
450
|
-
|
451
|
-
print('\nActivations: ', final_activations)
|
452
|
-
print('Activation Potentiation: ', activation_potentiation)
|
453
|
-
print('Train Accuracy:', train_model[get_acc()])
|
454
|
-
print('Train Loss: ', train_loss, '\n')
|
455
|
-
print('Model Type:', model_type)
|
456
|
-
|
457
|
-
if decision_boundary_history: display_decision_boundary_history(fig, artist, interval=interval)
|
410
|
+
losses = []
|
458
411
|
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
predictions=best_model[get_preds()],
|
464
|
-
accuracy=best_acc,
|
465
|
-
activations=final_activations,
|
466
|
-
softmax_predictions=best_model[get_preds_softmax()],
|
467
|
-
model_type=model_type,
|
468
|
-
activation_potentiation=activation_potentiation)
|
469
|
-
|
470
|
-
return model
|
471
|
-
|
472
|
-
# Check target loss
|
473
|
-
if target_loss is not None and best_loss <= target_loss:
|
474
|
-
progress.close()
|
475
|
-
train_model = evaluate(x_train, y_train, W=best_weight,
|
476
|
-
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
477
|
-
|
478
|
-
if loss == 'categorical_crossentropy':
|
479
|
-
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
480
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
481
|
-
else:
|
482
|
-
train_loss = binary_crossentropy(y_true_batch=y_train,
|
483
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
412
|
+
if batch_size == 1:
|
413
|
+
postfix_dict[f"{data} Accuracy"] = format_number(best_acc)
|
414
|
+
postfix_dict[f"{data} Loss"] = format_number(best_loss)
|
415
|
+
progress.set_postfix(postfix_dict)
|
484
416
|
|
485
|
-
|
486
|
-
print('Activation Potentiation: ', activation_potentiation)
|
487
|
-
print('Train Accuracy:', train_model[get_acc()])
|
488
|
-
print('Train Loss: ', train_loss, '\n')
|
489
|
-
print('Model Type:', model_type)
|
417
|
+
progress.update(1)
|
490
418
|
|
491
|
-
|
419
|
+
else:
|
492
420
|
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
model = template_model._replace(weights=best_weight,
|
498
|
-
predictions=best_model[get_preds()],
|
499
|
-
accuracy=best_acc,
|
500
|
-
activations=final_activations,
|
501
|
-
softmax_predictions=best_model[get_preds_softmax()],
|
502
|
-
model_type=model_type,
|
503
|
-
activation_potentiation=activation_potentiation)
|
504
|
-
|
505
|
-
return model
|
421
|
+
for j in range(pop_size):
|
422
|
+
|
423
|
+
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
506
424
|
|
425
|
+
if fit_start is True and i == 0:
|
426
|
+
if start_this_act is not None and j == 0:
|
427
|
+
pass
|
428
|
+
else:
|
507
429
|
|
508
|
-
|
430
|
+
act_pop[j] = activations[j]
|
431
|
+
W = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], cuda=cuda, auto_normalization=auto_normalization, dtype=dtype)
|
432
|
+
weight_pop[j] = W
|
433
|
+
|
434
|
+
|
435
|
+
if weight_evolve is False:
|
436
|
+
weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], cuda=cuda, auto_normalization=auto_normalization, dtype=dtype)
|
437
|
+
|
438
|
+
|
439
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], activation_potentiations=activation_potentiations[j], auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
440
|
+
acc = model[get_acc()]
|
441
|
+
|
442
|
+
if loss == 'categorical_crossentropy':
|
443
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train_batch,
|
444
|
+
y_pred_batch=model[get_preds_softmax()])
|
445
|
+
else:
|
446
|
+
train_loss = binary_crossentropy(y_true_batch=y_train_batch,
|
447
|
+
y_pred_batch=model[get_preds_softmax()])
|
448
|
+
|
449
|
+
fitness = wals(acc, train_loss, acc_impact, loss_impact)
|
450
|
+
target_pop.append(fitness if not cuda else fitness.get())
|
451
|
+
|
452
|
+
if fitness >= best_fitness:
|
453
|
+
|
454
|
+
best_fitness = fitness
|
455
|
+
best_acc = acc
|
456
|
+
best_loss = train_loss
|
457
|
+
best_weight = np.copy(weight_pop[j]) if model_type == 'PLAN' else copy.deepcopy(weight_pop[j])
|
458
|
+
best_model = model
|
459
|
+
|
460
|
+
if isinstance(act_pop[j], list) and model_type == 'PLAN':
|
461
|
+
final_activations = act_pop[j].copy()
|
462
|
+
elif isinstance(act_pop[j], str):
|
463
|
+
final_activations = act_pop[j]
|
464
|
+
else:
|
465
|
+
final_activations = copy.deepcopy(act_pop[j])
|
466
|
+
|
467
|
+
if model_type == 'PLAN': final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
468
|
+
|
469
|
+
if batch_size == 1:
|
470
|
+
postfix_dict[f"{data} Accuracy"] = format_number(best_acc)
|
471
|
+
postfix_dict[f"{data} Loss"] = format_number(train_loss)
|
472
|
+
progress.set_postfix(postfix_dict)
|
473
|
+
|
474
|
+
if show_current_activations:
|
475
|
+
print(f", Current Activations={final_activations}", end='')
|
476
|
+
|
477
|
+
# Update visualizations during training
|
478
|
+
if show_history:
|
479
|
+
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
480
|
+
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
|
481
|
+
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
482
|
+
|
483
|
+
if neurons_history:
|
484
|
+
viz_objects['neurons']['artists'] = (
|
485
|
+
update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
|
486
|
+
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
487
|
+
y_train[0], viz_objects['neurons']['artists'],
|
488
|
+
data=data, fig1=viz_objects['neurons']['fig'],
|
489
|
+
acc=best_acc, loss=train_loss)
|
490
|
+
)
|
491
|
+
|
492
|
+
if neural_web_history:
|
493
|
+
art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
|
494
|
+
G=viz_objects['web']['G'], return_objs=True)
|
495
|
+
art5_list = [art5_1] + [art5_2] + list(art5_3.values())
|
496
|
+
viz_objects['web']['artists'].append(art5_list)
|
497
|
+
|
498
|
+
# Check target accuracy
|
499
|
+
if target_acc is not None and best_acc >= target_acc:
|
500
|
+
progress.close()
|
501
|
+
train_model = evaluate(x_train, y_train, W=best_weight,
|
502
|
+
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
503
|
+
if loss == 'categorical_crossentropy':
|
504
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
505
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
506
|
+
else:
|
507
|
+
train_loss = binary_crossentropy(y_true_batch=y_train,
|
508
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
509
|
+
|
510
|
+
print('\nActivations: ', final_activations)
|
511
|
+
print('Activation Potentiation: ', activation_potentiation)
|
512
|
+
print('Train Accuracy:', train_model[get_acc()])
|
513
|
+
print('Train Loss: ', train_loss, '\n')
|
514
|
+
print('Model Type:', model_type)
|
515
|
+
|
516
|
+
if decision_boundary_history: display_decision_boundary_history(fig, artist, interval=interval)
|
517
|
+
|
518
|
+
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
|
519
|
+
best_loss, y_train, interval)
|
520
|
+
|
521
|
+
model = template_model._replace(weights=best_weight,
|
522
|
+
predictions=best_model[get_preds()],
|
523
|
+
accuracy=best_acc,
|
524
|
+
activations=final_activations,
|
525
|
+
softmax_predictions=best_model[get_preds_softmax()],
|
526
|
+
model_type=model_type,
|
527
|
+
activation_potentiation=activation_potentiation)
|
528
|
+
|
529
|
+
return model
|
530
|
+
|
531
|
+
# Check target loss
|
532
|
+
if target_loss is not None and best_loss <= target_loss:
|
533
|
+
progress.close()
|
534
|
+
train_model = evaluate(x_train, y_train, W=best_weight,
|
535
|
+
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
536
|
+
|
537
|
+
if loss == 'categorical_crossentropy':
|
538
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
539
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
540
|
+
else:
|
541
|
+
train_loss = binary_crossentropy(y_true_batch=y_train,
|
542
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
543
|
+
|
544
|
+
print('\nActivations: ', final_activations)
|
545
|
+
print('Activation Potentiation: ', activation_potentiation)
|
546
|
+
print('Train Accuracy:', train_model[get_acc()])
|
547
|
+
print('Train Loss: ', train_loss, '\n')
|
548
|
+
print('Model Type:', model_type)
|
549
|
+
|
550
|
+
if decision_boundary_history: display_decision_boundary_history(fig, artist, interval=interval)
|
551
|
+
|
552
|
+
# Display final visualizations
|
553
|
+
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
|
554
|
+
train_loss, y_train, interval)
|
555
|
+
|
556
|
+
model = template_model._replace(weights=best_weight,
|
557
|
+
predictions=best_model[get_preds()],
|
558
|
+
accuracy=best_acc,
|
559
|
+
activations=final_activations,
|
560
|
+
softmax_predictions=best_model[get_preds_softmax()],
|
561
|
+
model_type=model_type,
|
562
|
+
activation_potentiation=activation_potentiation)
|
563
|
+
|
564
|
+
return model
|
565
|
+
|
566
|
+
|
567
|
+
progress.update(1)
|
509
568
|
|
510
569
|
if decision_boundary_history:
|
511
|
-
if i == 0:
|
570
|
+
if i == 0:
|
512
571
|
fig, ax = create_decision_boundary_hist()
|
513
572
|
artist = []
|
514
573
|
artist = plot_decision_boundary(x_train_batch, y_train_batch, activations=act_pop[np.argmax(target_pop)], W=weight_pop[np.argmax(target_pop)], cuda=cuda, model_type=model_type, ax=ax, artist=artist)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 5.
|
3
|
+
Version: 5.54a1
|
4
4
|
Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,11 +1,11 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=qNREYfQ5WH5GL-EjJTNP9z5vuBEuXaZRfljsH4eczYM,3089
|
2
2
|
pyerualjetwork/ene.py,sha256=luTvspHRTose6s3uRas40pNXyKoxU9siaHiMBNI5yoc,42136
|
3
3
|
pyerualjetwork/fitness_functions.py,sha256=D9JVCr9DFid_xXgBD4uCKxdW2k10MVDE5HZRSOK4Igg,1237
|
4
4
|
pyerualjetwork/help.py,sha256=sn9jBzXkQsTZvdgsUXUpSs_BbYYIgY3whofg6dj8peI,848
|
5
5
|
pyerualjetwork/issue_solver.py,sha256=uay_9XK6xWnLmK2P_BeyDQlyNXzg_zYffnXYd228wZk,4102
|
6
6
|
pyerualjetwork/memory_ops.py,sha256=TUFh9SYWCKL6N-vNdWId_EwU313TuZomQCHOrltrD-4,14280
|
7
7
|
pyerualjetwork/model_ops.py,sha256=WaP1XwKqXMfZl4Yop8a1Bg0xtmLYgap9JFOWHaLr7S4,25143
|
8
|
-
pyerualjetwork/nn.py,sha256=
|
8
|
+
pyerualjetwork/nn.py,sha256=PhSbus7rPkP_abgipBOpPvWKItjILT-zzz2SVq7tMUQ,39846
|
9
9
|
pyerualjetwork/old_cpu_model_ops.py,sha256=1KNgjUeYCO_TsA5RtbNiuIiBJzq8-rL2dE6jxKqCBU0,21481
|
10
10
|
pyerualjetwork/old_cuda_model_ops.py,sha256=KAscAd8e_I8Vqdd9BJaHd6-IG6fhxFglAFxys0sqmEo,23079
|
11
11
|
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
@@ -21,7 +21,7 @@ pyerualjetwork/cuda/data_ops.py,sha256=BEXh4M7BWXaTpYlVS9D2i3CGgOmL5131vy7FZyuTQ
|
|
21
21
|
pyerualjetwork/cuda/loss_functions.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
22
22
|
pyerualjetwork/cuda/metrics.py,sha256=PjDBoRvr6va8vRvDIJJGBO4-I4uumrk3NCM1Vz4NJTo,5054
|
23
23
|
pyerualjetwork/cuda/visualizations.py,sha256=2mHE7iqqsN3K6xtCnemS4o_YWGS0bIV2IxF4cG6Ur9k,20090
|
24
|
-
pyerualjetwork-5.
|
25
|
-
pyerualjetwork-5.
|
26
|
-
pyerualjetwork-5.
|
27
|
-
pyerualjetwork-5.
|
24
|
+
pyerualjetwork-5.54a1.dist-info/METADATA,sha256=ZlQDyAE3Tl0_VhqOycWrFwKhTbRjqMp5PbCJh9zyzT4,7990
|
25
|
+
pyerualjetwork-5.54a1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
26
|
+
pyerualjetwork-5.54a1.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
27
|
+
pyerualjetwork-5.54a1.dist-info/RECORD,,
|
File without changes
|
File without changes
|