pyerualjetwork 4.1.8b7__py3-none-any.whl → 4.1.8b8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/plan.py +21 -57
- pyerualjetwork/plan_cuda.py +20 -34
- {pyerualjetwork-4.1.8b7.dist-info → pyerualjetwork-4.1.8b8.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.1.8b7.dist-info → pyerualjetwork-4.1.8b8.dist-info}/RECORD +7 -7
- {pyerualjetwork-4.1.8b7.dist-info → pyerualjetwork-4.1.8b8.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.1.8b7.dist-info → pyerualjetwork-4.1.8b8.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -48,7 +48,7 @@ for package_name in package_names:
|
|
48
48
|
|
49
49
|
print(f"PyerualJetwork is ready to use with {err} errors")
|
50
50
|
|
51
|
-
__version__ = "4.1.
|
51
|
+
__version__ = "4.1.8b8"
|
52
52
|
__update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
|
53
53
|
|
54
54
|
def print_version(__version__):
|
pyerualjetwork/plan.py
CHANGED
@@ -24,6 +24,7 @@ from .loss_functions import binary_crossentropy, categorical_crossentropy
|
|
24
24
|
from .activation_functions import apply_activation, Softmax, all_activations
|
25
25
|
from .metrics import metrics
|
26
26
|
from .model_operations import get_acc, get_preds, get_preds_softmax
|
27
|
+
from .memory_operations import optimize_labels
|
27
28
|
from .visualizations import (
|
28
29
|
draw_neural_web,
|
29
30
|
update_neural_web_for_fit,
|
@@ -261,45 +262,30 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
261
262
|
# Pre-checks
|
262
263
|
|
263
264
|
x_train = x_train.astype(dtype, copy=False)
|
264
|
-
|
265
|
-
if len(y_train[0]) < 256:
|
266
|
-
if y_train.dtype != np.uint8:
|
267
|
-
y_train = np.array(y_train, copy=False).astype(np.uint8, copy=False)
|
268
|
-
elif len(y_train[0]) <= 32767:
|
269
|
-
if y_train.dtype != np.uint16:
|
270
|
-
y_train = np.array(y_train, copy=False).astype(np.uint16, copy=False)
|
271
|
-
else:
|
272
|
-
if y_train.dtype != np.uint32:
|
273
|
-
y_train = np.array(y_train, copy=False).astype(np.uint32, copy=False)
|
274
|
-
|
275
|
-
if x_test is not None:
|
276
|
-
x_test = x_test.astype(dtype, copy=False)
|
277
|
-
|
278
|
-
if len(y_test[0]) < 256:
|
279
|
-
if y_test.dtype != np.uint8:
|
280
|
-
y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
|
281
|
-
elif len(y_test[0]) <= 32767:
|
282
|
-
if y_test.dtype != np.uint16:
|
283
|
-
y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
|
284
|
-
else:
|
285
|
-
if y_test.dtype != np.uint32:
|
286
|
-
y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
|
265
|
+
y_train = optimize_labels(y_train, cuda=False)
|
287
266
|
|
288
267
|
if x_test is None and y_test is None:
|
289
268
|
x_test = x_train
|
290
269
|
y_test = y_train
|
291
270
|
data = 'Train'
|
292
271
|
else:
|
272
|
+
x_test = x_test.astype(dtype, copy=False)
|
273
|
+
y_test = optimize_labels(y_test, cuda=False)
|
293
274
|
data = 'Test'
|
294
275
|
|
295
276
|
# Filter activation functions
|
296
|
-
if only_this
|
277
|
+
if only_this is not None:
|
297
278
|
activation_potentiation = only_this
|
298
|
-
if except_this
|
279
|
+
if except_this is not None:
|
299
280
|
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
300
281
|
if gen is None:
|
301
282
|
gen = len(activation_potentiation)
|
302
283
|
|
284
|
+
if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
|
285
|
+
|
286
|
+
if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
287
|
+
if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
288
|
+
|
303
289
|
# Initialize visualization components
|
304
290
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
305
291
|
|
@@ -336,34 +322,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
336
322
|
loss_list = []
|
337
323
|
target_pop = []
|
338
324
|
|
339
|
-
for i in range(len(activation_potentiation)):
|
340
|
-
|
341
|
-
if i == 0 and start_this_act is not None:
|
342
|
-
act_pop[0] = activation_potentiation[i]
|
343
|
-
|
344
|
-
else:
|
345
|
-
act_pop.append(activation_potentiation[i])
|
346
|
-
|
347
|
-
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
348
|
-
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
349
|
-
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=act_pop[-1], dtype=dtype)
|
350
|
-
|
351
|
-
if i == 0 and start_this_W is not None:
|
352
|
-
weight_pop[0] = W
|
353
|
-
|
354
|
-
else:
|
355
|
-
weight_pop.append(W)
|
356
|
-
|
357
|
-
if strategy == 'accuracy': target_pop.append(model[get_acc()])
|
358
|
-
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
359
|
-
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
360
|
-
|
361
|
-
if strategy == 'precision': target_pop.append(precision_score)
|
362
|
-
if strategy == 'recall': target_pop.append(recall_score)
|
363
|
-
if strategy == 'f1': target_pop.append(f1_score)
|
364
|
-
|
365
|
-
print(f"\rPre-Fit {i}/{len(activation_potentiation)}, {data} {strategy}: {target_pop[-1]}",end='')
|
366
|
-
|
367
325
|
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
368
326
|
|
369
327
|
for i in range(gen):
|
@@ -373,16 +331,19 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
373
331
|
progress.n = 0
|
374
332
|
progress.last_print_n = 0
|
375
333
|
progress.update(0)
|
376
|
-
|
377
|
-
weight_pop, act_pop = optimizer(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
378
|
-
target_pop = []
|
379
334
|
|
380
335
|
for j in range(len(activation_potentiation)):
|
381
336
|
|
382
337
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
383
|
-
|
338
|
+
|
339
|
+
if i == 0:
|
340
|
+
act_pop.append(activation_potentiation[j])
|
341
|
+
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
342
|
+
weight_pop.append(W)
|
384
343
|
|
344
|
+
model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
|
385
345
|
acc = model[get_acc()]
|
346
|
+
|
386
347
|
if strategy == 'accuracy': target_pop.append(acc)
|
387
348
|
|
388
349
|
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
@@ -516,6 +477,9 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
516
477
|
best_acc_per_gen_list.append(best_acc)
|
517
478
|
loss_list.append(best_loss)
|
518
479
|
|
480
|
+
weight_pop, act_pop = optimizer(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
481
|
+
target_pop = []
|
482
|
+
|
519
483
|
# Early stopping check
|
520
484
|
if early_stop == True and i > 0:
|
521
485
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
pyerualjetwork/plan_cuda.py
CHANGED
@@ -25,7 +25,7 @@ from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
|
|
25
25
|
from .activation_functions_cuda import apply_activation, Softmax, all_activations
|
26
26
|
from .metrics_cuda import metrics
|
27
27
|
from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
|
28
|
-
from .memory_operations import transfer_to_gpu, transfer_to_cpu
|
28
|
+
from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
|
29
29
|
from .visualizations_cuda import (
|
30
30
|
draw_neural_web,
|
31
31
|
update_neural_web_for_fit,
|
@@ -277,12 +277,15 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
277
277
|
|
278
278
|
activation_potentiation = all_activations()
|
279
279
|
|
280
|
+
y_train = optimize_labels(y_train, cuda=True)
|
281
|
+
|
280
282
|
if x_test is None and y_test is None:
|
281
283
|
x_test = x_train
|
282
284
|
y_test = y_train
|
283
285
|
data = 'Train'
|
284
286
|
else:
|
285
287
|
data = 'Test'
|
288
|
+
y_test = optimize_labels(y_test, cuda=True)
|
286
289
|
|
287
290
|
if memory == 'gpu':
|
288
291
|
x_train = transfer_to_gpu(x_train, dtype=dtype)
|
@@ -305,14 +308,19 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
305
308
|
else:
|
306
309
|
raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
|
307
310
|
|
311
|
+
if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
|
312
|
+
|
308
313
|
# Filter activation functions
|
309
|
-
if only_this
|
314
|
+
if only_this is not None:
|
310
315
|
activation_potentiation = only_this
|
311
|
-
if except_this
|
316
|
+
if except_this is not None:
|
312
317
|
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
313
318
|
if gen is None:
|
314
319
|
gen = len(activation_potentiation)
|
315
320
|
|
321
|
+
if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
322
|
+
if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
323
|
+
|
316
324
|
# Initialize visualization components
|
317
325
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
318
326
|
|
@@ -348,34 +356,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
348
356
|
loss_list = []
|
349
357
|
target_pop = []
|
350
358
|
|
351
|
-
for i in range(len(activation_potentiation)):
|
352
|
-
|
353
|
-
if i == 0 and start_this_act is not None:
|
354
|
-
act_pop[0] = activation_potentiation[i]
|
355
|
-
|
356
|
-
else:
|
357
|
-
act_pop.append(activation_potentiation[i])
|
358
|
-
|
359
|
-
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
360
|
-
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype, memory=memory)
|
361
|
-
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=act_pop[-1], dtype=dtype, memory=memory)
|
362
|
-
|
363
|
-
if i == 0 and start_this_W is not None:
|
364
|
-
weight_pop[0] = W
|
365
|
-
|
366
|
-
else:
|
367
|
-
weight_pop.append(W)
|
368
|
-
|
369
|
-
if strategy == 'accuracy': target_pop.append(model[get_acc()])
|
370
|
-
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
371
|
-
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
372
|
-
|
373
|
-
if strategy == 'precision': target_pop.append(precision_score)
|
374
|
-
if strategy == 'recall': target_pop.append(recall_score)
|
375
|
-
if strategy == 'f1': target_pop.append(f1_score)
|
376
|
-
|
377
|
-
print(f"\rPre-Fit {i}/{len(activation_potentiation)}, {data} {strategy}: {target_pop[-1]}",end='')
|
378
|
-
|
379
359
|
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
380
360
|
|
381
361
|
for i in range(gen):
|
@@ -386,12 +366,15 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
386
366
|
progress.last_print_n = 0
|
387
367
|
progress.update(0)
|
388
368
|
|
389
|
-
weight_pop, act_pop = optimizer(cp.array(weight_pop, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
390
|
-
target_pop = []
|
391
|
-
|
392
369
|
for j in range(len(activation_potentiation)):
|
393
370
|
|
394
371
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
372
|
+
|
373
|
+
if i == 0:
|
374
|
+
act_pop.append(activation_potentiation[j])
|
375
|
+
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
376
|
+
weight_pop.append(W)
|
377
|
+
|
395
378
|
model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
|
396
379
|
|
397
380
|
acc = model[get_acc()]
|
@@ -527,6 +510,9 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
527
510
|
|
528
511
|
best_acc_per_gen_list.append(best_acc)
|
529
512
|
loss_list.append(best_loss)
|
513
|
+
|
514
|
+
weight_pop, act_pop = optimizer(cp.array(weight_pop, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
515
|
+
target_pop = []
|
530
516
|
|
531
517
|
# Early stopping check
|
532
518
|
if early_stop == True and i > 0:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.1.
|
3
|
+
Version: 4.1.8b8
|
4
4
|
Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,4 +1,4 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=yLAnyJhLj2rr99TNrfahjlm_wXoBPhNtJZ_LwI0Wqt4,2177
|
2
2
|
pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
|
3
3
|
pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
|
4
4
|
pyerualjetwork/data_operations.py,sha256=ZM24BuPsIAtI0a_Exr4HgCjmlb285wEeO8juFY9sJr0,14680
|
@@ -11,14 +11,14 @@ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,607
|
|
11
11
|
pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
|
12
12
|
pyerualjetwork/model_operations.py,sha256=hnhR8dtoICNJWIwGgJ65-LN3GYN_DYH4LMe6YpZVbnI,12967
|
13
13
|
pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
|
14
|
-
pyerualjetwork/plan.py,sha256=
|
15
|
-
pyerualjetwork/plan_cuda.py,sha256=
|
14
|
+
pyerualjetwork/plan.py,sha256=5znxbnGO-3aL_SRDMRNNrDPJs_6hgU8yGiFnwPy69EY,34320
|
15
|
+
pyerualjetwork/plan_cuda.py,sha256=l90LFEraFEsryZnuqxd1C-d8TDIlMnb5mzgVzSOEP7w,36051
|
16
16
|
pyerualjetwork/planeat.py,sha256=VtWtWndbKoFNYTWd1EsyKBV4Vp5U6cc7uWDgQ4WjHqo,40248
|
17
17
|
pyerualjetwork/planeat_cuda.py,sha256=fSn28ZbxctPvBjpKgtv_uGwwUdTEXkBizy76mMlZYJ0,40237
|
18
18
|
pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
|
19
19
|
pyerualjetwork/visualizations.py,sha256=QaYSIyVkJZ8NqpBKArQKkI1y37nCQo_KIM98IMssnRc,28766
|
20
20
|
pyerualjetwork/visualizations_cuda.py,sha256=9qw46Y4bo67l0nVVF1FSNS8ksyzbIAJdaPDFOhN5J8Y,29188
|
21
|
-
pyerualjetwork-4.1.
|
22
|
-
pyerualjetwork-4.1.
|
23
|
-
pyerualjetwork-4.1.
|
24
|
-
pyerualjetwork-4.1.
|
21
|
+
pyerualjetwork-4.1.8b8.dist-info/METADATA,sha256=XoD9Pa3QKwU_t6FSuY0bGNg-2p-J2xM0oagjSzs6a9g,7795
|
22
|
+
pyerualjetwork-4.1.8b8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
23
|
+
pyerualjetwork-4.1.8b8.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
24
|
+
pyerualjetwork-4.1.8b8.dist-info/RECORD,,
|
File without changes
|
File without changes
|