pyerualjetwork 4.3.13.dev0__py3-none-any.whl → 4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/data_operations.py +20 -0
- pyerualjetwork/data_operations_cuda.py +21 -0
- pyerualjetwork/fitness_functions.py +6 -1
- pyerualjetwork/plan.py +3 -3
- pyerualjetwork/plan_cuda.py +3 -3
- pyerualjetwork/planeat.py +12 -22
- pyerualjetwork/planeat_cuda.py +12 -22
- {pyerualjetwork-4.3.13.dev0.dist-info → pyerualjetwork-4.5.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.3.13.dev0.dist-info → pyerualjetwork-4.5.dist-info}/RECORD +12 -12
- {pyerualjetwork-4.3.13.dev0.dist-info → pyerualjetwork-4.5.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.3.13.dev0.dist-info → pyerualjetwork-4.5.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
__version__ = "4.
|
1
|
+
__version__ = "4.5"
|
2
2
|
__update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
|
3
3
|
* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
|
4
4
|
* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
@@ -373,6 +373,26 @@ dtype=np.float32):
|
|
373
373
|
return (Input / MaxAbs)
|
374
374
|
|
375
375
|
|
376
|
+
def non_neg_normalization(
|
377
|
+
Input,
|
378
|
+
dtype=np.float32
|
379
|
+
):
|
380
|
+
"""
|
381
|
+
Normalizes the input data [0-1] range.
|
382
|
+
|
383
|
+
Args:
|
384
|
+
Input (cupy): Input data to be normalized.
|
385
|
+
|
386
|
+
dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
387
|
+
|
388
|
+
Returns:
|
389
|
+
(cupy) Scaled input data after normalization.
|
390
|
+
"""
|
391
|
+
|
392
|
+
MaxAbs = np.max(np.abs(Input.astype(dtype, copy=False)))
|
393
|
+
return (Input + MaxAbs) / (2 * MaxAbs)
|
394
|
+
|
395
|
+
|
376
396
|
def find_closest_factors(a):
|
377
397
|
|
378
398
|
root = int(math.sqrt(a))
|
@@ -421,6 +421,27 @@ def normalization(
|
|
421
421
|
MaxAbs = cp.max(cp.abs(Input.astype(dtype, copy=False)))
|
422
422
|
return (Input / MaxAbs)
|
423
423
|
|
424
|
+
|
425
|
+
def non_neg_normalization(
|
426
|
+
Input,
|
427
|
+
dtype=cp.float32
|
428
|
+
):
|
429
|
+
"""
|
430
|
+
Normalizes the input data [0-1] range.
|
431
|
+
|
432
|
+
Args:
|
433
|
+
Input (cupy): Input data to be normalized.
|
434
|
+
|
435
|
+
dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
436
|
+
|
437
|
+
Returns:
|
438
|
+
(cupy) Scaled input data after normalization.
|
439
|
+
"""
|
440
|
+
|
441
|
+
MaxAbs = cp.max(cp.abs(Input.astype(dtype, copy=False)))
|
442
|
+
return (Input + MaxAbs) / (2 * MaxAbs)
|
443
|
+
|
444
|
+
|
424
445
|
def find_closest_factors(a):
|
425
446
|
|
426
447
|
root = int(math.sqrt(a))
|
@@ -1,3 +1,5 @@
|
|
1
|
+
import numpy as np
|
2
|
+
|
1
3
|
def wals(acc, loss, acc_impact, loss_impact):
|
2
4
|
"""
|
3
5
|
The WALS(weighted accuracy-loss score) function calculates a weighted sum of accuracy and loss based on their respective impacts.
|
@@ -13,4 +15,7 @@ def wals(acc, loss, acc_impact, loss_impact):
|
|
13
15
|
much the loss affects the final result compared to the accuracy impact
|
14
16
|
:return: the weighted sum of accuracy and loss based on their respective impacts.
|
15
17
|
"""
|
16
|
-
|
18
|
+
loss += np.finfo(float).eps
|
19
|
+
loss_impact += np.finfo(float).eps
|
20
|
+
|
21
|
+
return (acc * acc_impact) + ((loss_impact / loss) * loss_impact)
|
pyerualjetwork/plan.py
CHANGED
@@ -84,7 +84,7 @@ def fit(
|
|
84
84
|
|
85
85
|
|
86
86
|
def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
|
87
|
-
weight_evolve=
|
87
|
+
weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False,
|
88
88
|
neurons_history=False, early_stop=False, show_history=False, target_loss=None,
|
89
89
|
interval=33.33, target_acc=None, loss='categorical_crossentropy', acc_impact=0.9, loss_impact=0.1,
|
90
90
|
start_this_act=None, start_this_W=None, dtype=np.float32):
|
@@ -120,7 +120,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
120
120
|
interval=16.67)
|
121
121
|
```
|
122
122
|
|
123
|
-
weight_evolve (bool, optional): Activation combinations already optimizes by
|
123
|
+
weight_evolve (bool, optional): Activation combinations already optimizes by PLANEAT genetic search algorithm. Should the weight parameters also evolve or should the weights be determined according to the aggregating learning principle of the PLAN algorithm? Default: True (Evolves Weights)
|
124
124
|
|
125
125
|
loss (str, optional): options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
126
126
|
|
@@ -136,7 +136,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
136
136
|
|
137
137
|
gen (int, optional): The generation count for genetic optimization.
|
138
138
|
|
139
|
-
batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the
|
139
|
+
batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire train set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents %8 of the train set. Default is 1. (%100 of train)
|
140
140
|
|
141
141
|
pop_size (int, optional): Population size of each generation. Default: count of activation functions
|
142
142
|
|
pyerualjetwork/plan_cuda.py
CHANGED
@@ -83,7 +83,7 @@ def fit(
|
|
83
83
|
|
84
84
|
|
85
85
|
def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
|
86
|
-
weight_evolve=
|
86
|
+
weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False, target_acc=None,
|
87
87
|
neurons_history=False, early_stop=False, show_history=False, loss='categorical_crossentropy',
|
88
88
|
interval=33.33, target_loss=None, loss_impact=0.1, acc_impact=0.9,
|
89
89
|
start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
|
@@ -119,7 +119,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
119
119
|
interval=16.67)
|
120
120
|
```
|
121
121
|
|
122
|
-
weight_evolve (bool, optional): Activation combinations already optimizes by
|
122
|
+
weight_evolve (bool, optional): Activation combinations already optimizes by PLANEAT genetic search algorithm. Should the weight parameters also evolve or should the weights be determined according to the aggregating learning principle of the PLAN algorithm? Default: True (Evolves Weights)
|
123
123
|
|
124
124
|
loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
125
125
|
|
@@ -131,7 +131,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
|
|
131
131
|
|
132
132
|
gen (int, optional): The generation count for genetic optimization.
|
133
133
|
|
134
|
-
batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire
|
134
|
+
batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire train set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents %8 of the train set. Default is 1. (%100 of train)
|
135
135
|
|
136
136
|
acc_impact (float, optional): Impact of accuracy for optimization [0-1]. Default: 0.9
|
137
137
|
|
pyerualjetwork/planeat.py
CHANGED
@@ -17,7 +17,7 @@ import random
|
|
17
17
|
import math
|
18
18
|
|
19
19
|
### LIBRARY IMPORTS ###
|
20
|
-
from .data_operations import normalization
|
20
|
+
from .data_operations import normalization, non_neg_normalization
|
21
21
|
from .ui import loading_bars, initialize_loading_bar
|
22
22
|
from .activation_functions import apply_activation, all_activations
|
23
23
|
|
@@ -291,7 +291,7 @@ def evolver(weights,
|
|
291
291
|
bar_format = loading_bars()[0]
|
292
292
|
|
293
293
|
if bar_status: progress = initialize_loading_bar(len(bad_weights), desc="GENERATION: " + str(what_gen), bar_format=bar_format, ncols=50)
|
294
|
-
normalized_fitness =
|
294
|
+
normalized_fitness = non_neg_normalization(fitness, dtype=dtype)
|
295
295
|
|
296
296
|
best_fitness = normalized_fitness[-1]
|
297
297
|
epsilon = np.finfo(float).eps
|
@@ -708,28 +708,18 @@ def mutation(weight,
|
|
708
708
|
|
709
709
|
if potential_weight_mutation > weight_mutate_prob:
|
710
710
|
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
for _ in range(max_threshold):
|
711
|
+
row_end, col_end = weight.shape
|
712
|
+
max_threshold = row_end * col_end
|
713
|
+
threshold = weight_mutate_threshold / (genome_fitness + epsilon)
|
714
|
+
|
715
|
+
n_mutations = min(int(threshold), max_threshold)
|
716
|
+
|
717
|
+
row_indices = np.random.randint(0, row_end, size=n_mutations)
|
718
|
+
col_indices = np.random.randint(0, col_end, size=n_mutations)
|
721
719
|
|
722
|
-
|
723
|
-
selected_col = int(random.uniform(start, col_end))
|
724
|
-
|
725
|
-
weight[selected_row, selected_col] = random.uniform(-1, 1)
|
726
|
-
new_threshold += threshold
|
727
|
-
|
728
|
-
if max_threshold > new_threshold:
|
729
|
-
pass
|
720
|
+
new_values = np.random.uniform(-1, 1, size=n_mutations)
|
730
721
|
|
731
|
-
|
732
|
-
break
|
722
|
+
weight[row_indices, col_indices] = new_values
|
733
723
|
|
734
724
|
activation_mutate_prob = 1 - activation_mutate_prob
|
735
725
|
potential_activation_mutation = random.uniform(0, 1)
|
pyerualjetwork/planeat_cuda.py
CHANGED
@@ -19,7 +19,7 @@ import math
|
|
19
19
|
|
20
20
|
|
21
21
|
### LIBRARY IMPORTS ###
|
22
|
-
from .data_operations_cuda import normalization
|
22
|
+
from .data_operations_cuda import normalization, non_neg_normalization
|
23
23
|
from .ui import loading_bars, initialize_loading_bar
|
24
24
|
from .activation_functions_cuda import apply_activation, all_activations
|
25
25
|
|
@@ -292,7 +292,7 @@ def evolver(weights,
|
|
292
292
|
bar_format = loading_bars()[0]
|
293
293
|
|
294
294
|
if bar_status: progress = initialize_loading_bar(len(bad_weights), desc="GENERATION: " + str(what_gen), bar_format=bar_format, ncols=50)
|
295
|
-
normalized_fitness =
|
295
|
+
normalized_fitness = non_neg_normalization(fitness, dtype=dtype)
|
296
296
|
|
297
297
|
best_fitness = normalized_fitness[-1]
|
298
298
|
epsilon = cp.finfo(float).eps
|
@@ -709,28 +709,18 @@ def mutation(weight,
|
|
709
709
|
|
710
710
|
if potential_weight_mutation > weight_mutate_prob:
|
711
711
|
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
for _ in range(max_threshold):
|
712
|
+
row_end, col_end = weight.shape
|
713
|
+
max_threshold = row_end * col_end
|
714
|
+
threshold = weight_mutate_threshold / (genome_fitness + epsilon)
|
715
|
+
|
716
|
+
n_mutations = min(int(threshold), max_threshold)
|
717
|
+
|
718
|
+
row_indices = cp.random.randint(0, row_end, size=n_mutations)
|
719
|
+
col_indices = cp.random.randint(0, col_end, size=n_mutations)
|
722
720
|
|
723
|
-
|
724
|
-
selected_col = int(random.uniform(start, col_end))
|
725
|
-
|
726
|
-
weight[selected_row, selected_col] = random.uniform(-1, 1)
|
727
|
-
new_threshold += threshold
|
728
|
-
|
729
|
-
if max_threshold > new_threshold:
|
730
|
-
pass
|
721
|
+
new_values = cp.random.uniform(-1, 1, size=n_mutations)
|
731
722
|
|
732
|
-
|
733
|
-
break
|
723
|
+
weight[row_indices, col_indices] = new_values
|
734
724
|
|
735
725
|
activation_mutate_prob = 1 - activation_mutate_prob
|
736
726
|
potential_activation_mutation = random.uniform(0, 1)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.
|
3
|
+
Version: 4.5
|
4
4
|
Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,9 +1,9 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=xOgL47nXk4gItE1UKQ_uEBevdRI2RUjN5RVuB-BRlHM,1277
|
2
2
|
pyerualjetwork/activation_functions.py,sha256=Ms0AGBqkJuCA42ht64MSQnO54Td_1eDGquedpoBDVbc,7642
|
3
3
|
pyerualjetwork/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
|
4
|
-
pyerualjetwork/data_operations.py,sha256=
|
5
|
-
pyerualjetwork/data_operations_cuda.py,sha256=
|
6
|
-
pyerualjetwork/fitness_functions.py,sha256=
|
4
|
+
pyerualjetwork/data_operations.py,sha256=c2FWIdHJQw_h6QP_ir0yx22zP7a9CtRp249V8Ro9V-0,15357
|
5
|
+
pyerualjetwork/data_operations_cuda.py,sha256=IqS0JoXGM0XiYfoFSAj9li1WWiBroNXIcN52JWhlNFk,18224
|
6
|
+
pyerualjetwork/fitness_functions.py,sha256=GisM8mDJAivw8YailXXDAw2M-lW1MHwRnIlWUVe-UEg,1261
|
7
7
|
pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
|
8
8
|
pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
9
9
|
pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
@@ -12,14 +12,14 @@ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,607
|
|
12
12
|
pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
|
13
13
|
pyerualjetwork/model_operations.py,sha256=MCSCNYiiICRVZITobtS3ZIWmH5Q9gjyELuH32sAdgg4,12649
|
14
14
|
pyerualjetwork/model_operations_cuda.py,sha256=NT01BK5nrDYE7H1x3KnSI8gmx0QTGGB0mP_LqEb1uuU,13157
|
15
|
-
pyerualjetwork/plan.py,sha256=
|
16
|
-
pyerualjetwork/plan_cuda.py,sha256=
|
17
|
-
pyerualjetwork/planeat.py,sha256=
|
18
|
-
pyerualjetwork/planeat_cuda.py,sha256
|
15
|
+
pyerualjetwork/plan.py,sha256=XEptIEWfWbqXEy92Eil5q_uujEiIBWnjejAGD7Lh0-w,22975
|
16
|
+
pyerualjetwork/plan_cuda.py,sha256=rFHZa8jsgfol0uUo1rfCKAGtb-B4ivfzqqlnynsfMzQ,23966
|
17
|
+
pyerualjetwork/planeat.py,sha256=DVJGptIPYKyz4ePwqRnbbwgHwQzbxMoBci0Te8kfCzk,38802
|
18
|
+
pyerualjetwork/planeat_cuda.py,sha256=-GxZY8aMPayuYhwhcsRVn4LWq604o36VxkEtdoBum98,38835
|
19
19
|
pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
|
20
20
|
pyerualjetwork/visualizations.py,sha256=1QSisYAaGnO5kug6qo1qOssTkQM-MgR7L8h4c3sczzU,28294
|
21
21
|
pyerualjetwork/visualizations_cuda.py,sha256=PYRqj4QYUbuYMYcNwO8yaTPB-jK7E6kZHhTrAi0lwPU,28749
|
22
|
-
pyerualjetwork-4.
|
23
|
-
pyerualjetwork-4.
|
24
|
-
pyerualjetwork-4.
|
25
|
-
pyerualjetwork-4.
|
22
|
+
pyerualjetwork-4.5.dist-info/METADATA,sha256=njuvH-FdU7-KudHvxUEJvBeyOvDWzyWi52ZIpRjR9K4,7503
|
23
|
+
pyerualjetwork-4.5.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
24
|
+
pyerualjetwork-4.5.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
25
|
+
pyerualjetwork-4.5.dist-info/RECORD,,
|
File without changes
|
File without changes
|