pyerualjetwork 5.28a0__py3-none-any.whl → 5.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +10 -10
- pyerualjetwork/cpu/__init__.py +27 -0
- pyerualjetwork/{activation_functions_cpu.py → cpu/activation_functions.py} +39 -2
- pyerualjetwork/{data_operations_cpu.py → cpu/data_ops.py} +8 -8
- pyerualjetwork/{ene_cpu.py → cpu/ene.py} +4 -4
- pyerualjetwork/{metrics_cpu.py → cpu/metrics.py} +1 -1
- pyerualjetwork/{model_operations_cpu.py → cpu/model_ops.py} +9 -9
- pyerualjetwork/{neu_cpu.py → cpu/nn.py} +25 -14
- pyerualjetwork/{visualizations_cpu.py → cpu/visualizations.py} +11 -11
- pyerualjetwork/cuda/__init__.py +27 -0
- pyerualjetwork/{activation_functions_cuda.py → cuda/activation_functions.py} +39 -0
- pyerualjetwork/{data_operations_cuda.py → cuda/data_ops.py} +9 -9
- pyerualjetwork/{ene_cuda.py → cuda/ene.py} +4 -4
- pyerualjetwork/{metrics_cuda.py → cuda/metrics.py} +1 -1
- pyerualjetwork/{model_operations_cuda.py → cuda/model_ops.py} +9 -9
- pyerualjetwork/{neu_cuda.py → cuda/nn.py} +27 -16
- pyerualjetwork/{visualizations_cuda.py → cuda/visualizations.py} +11 -11
- pyerualjetwork/help.py +5 -5
- pyerualjetwork/issue_solver.py +2 -2
- pyerualjetwork/{memory_operations.py → memory_ops.py} +1 -1
- {pyerualjetwork-5.28a0.dist-info → pyerualjetwork-5.31.dist-info}/METADATA +11 -14
- pyerualjetwork-5.31.dist-info/RECORD +28 -0
- pyerualjetwork-5.28a0.dist-info/RECORD +0 -26
- /pyerualjetwork/{loss_functions_cpu.py → cpu/loss_functions.py} +0 -0
- /pyerualjetwork/{loss_functions_cuda.py → cuda/loss_functions.py} +0 -0
- {pyerualjetwork-5.28a0.dist-info → pyerualjetwork-5.31.dist-info}/WHEEL +0 -0
- {pyerualjetwork-5.28a0.dist-info → pyerualjetwork-5.31.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -11,17 +11,17 @@ training, and both detailed and simplified memory management.
|
|
11
11
|
|
12
12
|
Library (CPU) Main Modules:
|
13
13
|
---------------------------
|
14
|
-
-
|
15
|
-
-
|
16
|
-
-
|
17
|
-
-
|
14
|
+
- cpu.nn
|
15
|
+
- cpu.ene
|
16
|
+
- cpu.data_ops
|
17
|
+
- cpu.model_ops
|
18
18
|
|
19
19
|
Library (GPU) Main Modules:
|
20
20
|
---------------------------
|
21
|
-
-
|
22
|
-
-
|
23
|
-
-
|
24
|
-
-
|
21
|
+
- cuda.nn
|
22
|
+
- cuda.ene
|
23
|
+
- cuda.data_ops
|
24
|
+
- cuda.model_ops
|
25
25
|
|
26
26
|
Memory Module:
|
27
27
|
--------------
|
@@ -35,14 +35,14 @@ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJe
|
|
35
35
|
|
36
36
|
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
37
37
|
|
38
|
-
-
|
38
|
+
- Creator: Hasan Can Beydili
|
39
39
|
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
40
40
|
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
41
41
|
- Instagram: https://www.instagram.com/canbeydilj
|
42
42
|
- Contact: tchasancan@gmail.com
|
43
43
|
"""
|
44
44
|
|
45
|
-
__version__ = "5.
|
45
|
+
__version__ = "5.31"
|
46
46
|
__update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
|
47
47
|
* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
|
48
48
|
* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
@@ -0,0 +1,27 @@
|
|
1
|
+
"""
|
2
|
+
CPU
|
3
|
+
===
|
4
|
+
The modules contained in this folder and their functions compute data on the central processing unit and store it in the CPU's RAM..
|
5
|
+
|
6
|
+
Modules in the folder:
|
7
|
+
----------------------
|
8
|
+
- activation_functions
|
9
|
+
- data_operations
|
10
|
+
- ene
|
11
|
+
- loss_functions
|
12
|
+
- metrics
|
13
|
+
- model_operations
|
14
|
+
- nn
|
15
|
+
- visualizations
|
16
|
+
|
17
|
+
Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
|
18
|
+
|
19
|
+
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
20
|
+
|
21
|
+
- Creator: Hasan Can Beydili
|
22
|
+
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
23
|
+
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
24
|
+
- Instagram: https://www.instagram.com/canbeydilj
|
25
|
+
- Contact: tchasancan@gmail.com
|
26
|
+
|
27
|
+
"""
|
@@ -1,8 +1,46 @@
|
|
1
|
+
"""
|
2
|
+
|
3
|
+
Activation Functions on CPU
|
4
|
+
===========================
|
5
|
+
This module contains activation functions that run on the CPU.
|
6
|
+
|
7
|
+
|
8
|
+
Module functions:
|
9
|
+
-----------------
|
10
|
+
- 'sigmoid': Sigmoid,
|
11
|
+
- 'mod_circular': modular_circular_activation,
|
12
|
+
- 'tanh_circular': tanh_circular_activation,
|
13
|
+
- 'leaky_relu': leaky_relu,
|
14
|
+
- 'relu': Relu,
|
15
|
+
- 'gelu': gelu,
|
16
|
+
- 'tanh': tanh,
|
17
|
+
- 'sinakt': sinakt,
|
18
|
+
- 'p_squared': p_squared,
|
19
|
+
- 'sglu': lambda x: sglu(x, alpha=1.0),
|
20
|
+
- 'dlrelu': dlrelu,
|
21
|
+
- 'sin_plus': sin_plus,
|
22
|
+
- 'acos': lambda x: acos(x, alpha=1.0, beta=0.0),
|
23
|
+
- 'isra': isra,
|
24
|
+
- 'waveakt': waveakt,
|
25
|
+
- 'arctan': arctan,
|
26
|
+
- 'bent_identity': bent_identity,
|
27
|
+
- 'softsign': softsign,
|
28
|
+
- 'pwl': pwl,
|
29
|
+
- 'sine': sine,
|
30
|
+
- 'tanh_square': tanh_square,
|
31
|
+
- 'linear':,
|
32
|
+
- 'sine_square': sine_square,
|
33
|
+
- 'logarithmic': logarithmic,
|
34
|
+
- 'sine_offset': lambda x: sine_offset(x, 1.0),
|
35
|
+
- 'spiral': spiral_activation,
|
36
|
+
- 'circular': circular_activation
|
37
|
+
- Softmax()
|
38
|
+
"""
|
39
|
+
|
1
40
|
import numpy as np
|
2
41
|
from scipy.special import expit, softmax
|
3
42
|
import warnings
|
4
43
|
|
5
|
-
|
6
44
|
# ACTIVATION FUNCTIONS -----
|
7
45
|
|
8
46
|
def all_activations():
|
@@ -71,7 +109,6 @@ def Relu(
|
|
71
109
|
|
72
110
|
return np.maximum(0, x)
|
73
111
|
|
74
|
-
|
75
112
|
def tanh(x):
|
76
113
|
return np.tanh(x)
|
77
114
|
|
@@ -21,7 +21,7 @@ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJe
|
|
21
21
|
|
22
22
|
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
23
23
|
|
24
|
-
-
|
24
|
+
- Creator: Hasan Can Beydili
|
25
25
|
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
26
26
|
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
27
27
|
- Instagram: https://www.instagram.com/canbeydilj
|
@@ -46,7 +46,7 @@ def encode_one_hot(y_train, y_test=None, summary=False):
|
|
46
46
|
Returns:
|
47
47
|
tuple: One-hot encoded y_train and (if given) y_test.
|
48
48
|
"""
|
49
|
-
from
|
49
|
+
from ..memory_ops import optimize_labels
|
50
50
|
|
51
51
|
classes = np.unique(y_train)
|
52
52
|
class_count = len(classes)
|
@@ -154,8 +154,8 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=np.float32
|
|
154
154
|
x_balanced -- Balanced input dataset (numpy array format)
|
155
155
|
y_balanced -- Balanced class labels (one-hot encoded, numpy array format)
|
156
156
|
"""
|
157
|
-
from
|
158
|
-
from
|
157
|
+
from ..ui import loading_bars, get_loading_bar_style
|
158
|
+
from ..memory_ops import transfer_to_cpu
|
159
159
|
|
160
160
|
x_train = transfer_to_cpu(x_train, dtype=dtype)
|
161
161
|
|
@@ -229,8 +229,8 @@ def auto_balancer(x_train, y_train, dtype=np.float32):
|
|
229
229
|
Returns:
|
230
230
|
tuple: A tuple containing balanced input data and labels.
|
231
231
|
"""
|
232
|
-
from
|
233
|
-
from
|
232
|
+
from ..ui import loading_bars, get_loading_bar_style
|
233
|
+
from ..memory_ops import transfer_to_cpu
|
234
234
|
|
235
235
|
x_train = transfer_to_cpu(x_train, dtype=dtype)
|
236
236
|
|
@@ -295,8 +295,8 @@ def synthetic_augmentation(x, y, dtype=np.float32):
|
|
295
295
|
Returns:
|
296
296
|
x_train_balanced, y_train_balanced (numpy array format)
|
297
297
|
"""
|
298
|
-
from
|
299
|
-
from
|
298
|
+
from ..ui import loading_bars, get_loading_bar_style
|
299
|
+
from ..memory_ops import transfer_to_cpu
|
300
300
|
|
301
301
|
x = transfer_to_cpu(x, dtype=dtype)
|
302
302
|
|
@@ -21,7 +21,7 @@ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJe
|
|
21
21
|
|
22
22
|
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
23
23
|
|
24
|
-
-
|
24
|
+
- Creator: Hasan Can Beydili
|
25
25
|
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
26
26
|
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
27
27
|
- Instagram: https://www.instagram.com/canbeydilj
|
@@ -34,9 +34,9 @@ import math
|
|
34
34
|
import copy
|
35
35
|
|
36
36
|
### LIBRARY IMPORTS ###
|
37
|
-
from .
|
38
|
-
from
|
39
|
-
from .
|
37
|
+
from .data_ops import non_neg_normalization
|
38
|
+
from ..ui import loading_bars, initialize_loading_bar
|
39
|
+
from .activation_functions import apply_activation, all_activations
|
40
40
|
|
41
41
|
def define_genomes(input_shape, output_shape, population_size, neurons=[], activation_functions=[], dtype=np.float32):
|
42
42
|
"""
|
@@ -13,7 +13,7 @@ def metrics(y_ts, test_preds, average='weighted'):
|
|
13
13
|
tuple: Precision, recall, F1 score.
|
14
14
|
"""
|
15
15
|
|
16
|
-
from .
|
16
|
+
from .data_ops import decode_one_hot
|
17
17
|
|
18
18
|
y_test_d = decode_one_hot(y_ts)
|
19
19
|
y_test_d = np.array(y_test_d)
|
@@ -38,7 +38,7 @@ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJe
|
|
38
38
|
|
39
39
|
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
40
40
|
|
41
|
-
-
|
41
|
+
- Creator: Hasan Can Beydili
|
42
42
|
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
43
43
|
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
44
44
|
- Instagram: https://www.instagram.com/canbeydilj
|
@@ -62,7 +62,7 @@ def save_model(model_name,
|
|
62
62
|
test_acc=None,
|
63
63
|
model_path='',
|
64
64
|
activations=['linear'],
|
65
|
-
activation_potentiation=
|
65
|
+
activation_potentiation=None,
|
66
66
|
weights_type='npy',
|
67
67
|
weights_format='raw',
|
68
68
|
show_architecture=False,
|
@@ -100,8 +100,8 @@ def save_model(model_name,
|
|
100
100
|
No return.
|
101
101
|
"""
|
102
102
|
|
103
|
-
from .
|
104
|
-
from
|
103
|
+
from .visualizations import draw_model_architecture
|
104
|
+
from .. import __version__
|
105
105
|
|
106
106
|
if model_type != 'PLAN' and model_type != 'MLP' and model_type != 'PTNN':
|
107
107
|
raise ValueError("model_type parameter must be 'PLAN', 'MLP' or 'PTNN'.")
|
@@ -303,7 +303,7 @@ def load_model(model_name,
|
|
303
303
|
lists: Weights, None, test_accuracy, activations, scaler_params, None, model_type, weight_type, weight_format, device_version, (list[df_elements])=Pandas DataFrame of the model
|
304
304
|
"""
|
305
305
|
|
306
|
-
from
|
306
|
+
from .. import __version__
|
307
307
|
|
308
308
|
try:
|
309
309
|
|
@@ -395,8 +395,8 @@ def predict_from_storage(Input, model_name, model_path=''):
|
|
395
395
|
ndarray: Output from the model.
|
396
396
|
"""
|
397
397
|
|
398
|
-
from .
|
399
|
-
from .
|
398
|
+
from .activation_functions import apply_activation
|
399
|
+
from .data_ops import standard_scaler
|
400
400
|
|
401
401
|
try:
|
402
402
|
|
@@ -505,8 +505,8 @@ def predict_from_memory(Input, W, scaler_params=None, activations=['linear'], ac
|
|
505
505
|
ndarray: Output from the model.
|
506
506
|
"""
|
507
507
|
|
508
|
-
from .
|
509
|
-
from .
|
508
|
+
from .data_ops import standard_scaler
|
509
|
+
from .activation_functions import apply_activation
|
510
510
|
|
511
511
|
if model_type != 'PLAN' and model_type != 'MLP' and model_type != 'PTNN': raise ValueError("model_type parameter must be 'PLAN', 'MLP' or 'PTNN'.")
|
512
512
|
|
@@ -2,7 +2,7 @@
|
|
2
2
|
"""
|
3
3
|
|
4
4
|
|
5
|
-
|
5
|
+
NN (Neural Networks) on CPU
|
6
6
|
============================
|
7
7
|
This module hosts functions for training and evaluating artificial neural networks on CPU for labeled classification tasks (for now).
|
8
8
|
|
@@ -40,7 +40,7 @@ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJe
|
|
40
40
|
|
41
41
|
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
42
42
|
|
43
|
-
-
|
43
|
+
- Creator: Hasan Can Beydili
|
44
44
|
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
45
45
|
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
46
46
|
- Instagram: https://www.instagram.com/canbeydilj
|
@@ -52,14 +52,14 @@ import copy
|
|
52
52
|
import random
|
53
53
|
|
54
54
|
### LIBRARY IMPORTS ###
|
55
|
-
from
|
56
|
-
from .
|
57
|
-
from .
|
58
|
-
from .
|
59
|
-
from
|
60
|
-
from .
|
61
|
-
from
|
62
|
-
from .
|
55
|
+
from ..ui import loading_bars, initialize_loading_bar
|
56
|
+
from .data_ops import normalization, batcher
|
57
|
+
from .activation_functions import apply_activation, all_activations
|
58
|
+
from .model_ops import get_acc, get_preds_softmax
|
59
|
+
from ..memory_ops import optimize_labels
|
60
|
+
from .loss_functions import categorical_crossentropy, binary_crossentropy
|
61
|
+
from ..fitness_functions import wals
|
62
|
+
from .visualizations import (
|
63
63
|
draw_neural_web,
|
64
64
|
display_visualizations_for_learner,
|
65
65
|
update_history_plots_for_learner,
|
@@ -132,18 +132,29 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
132
132
|
* This function also able to train classic MLP model architectures.
|
133
133
|
* And my newest innovative architecture: PTNN (Potentiation Transfer Neural Network).
|
134
134
|
|
135
|
+
Examples:
|
136
|
+
|
137
|
+
This creates a PLAN model:
|
138
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=100, fit_start=True) ```
|
139
|
+
|
140
|
+
This creates a MLP model(with 2 hidden layer):
|
141
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=100, fit_start=False, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
|
142
|
+
|
143
|
+
This creates a PTNN model(with 2 hidden layer & 1 aggregation layer(comes with PLAN)):
|
144
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=[10, 100], fit_start=True, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
|
145
|
+
|
135
146
|
:Args:
|
136
147
|
:param x_train: (array-like): Training input data.
|
137
148
|
:param y_train: (array-like): Labels for training data. one-hot encoded.
|
138
|
-
:param optimizer: (function): Optimization technique with hyperparameters. (PLAN, MLP & PTNN (all) using ENE for optimization. Gradient based technique's will added in the future.) Please use this: from pyerualjetwork.
|
149
|
+
:param optimizer: (function): Optimization technique with hyperparameters. (PLAN, MLP & PTNN (all) using ENE for optimization. Gradient based technique's will added in the future.) Please use this: from pyerualjetwork.cpu.ene import evolver (and) optimizer = lambda *args, **kwargs: evolver(*args, 'here give your hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
139
150
|
```python
|
140
|
-
optimizer = lambda *args, **kwargs:
|
151
|
+
optimizer = lambda *args, **kwargs: ene.evolver(*args,
|
141
152
|
activation_add_prob=0.05,
|
142
153
|
strategy='aggressive',
|
143
154
|
policy='more_selective',
|
144
155
|
**kwargs)
|
145
156
|
|
146
|
-
model =
|
157
|
+
model = nn.learn(x_train,
|
147
158
|
y_train,
|
148
159
|
optimizer,
|
149
160
|
fit_start=True,
|
@@ -179,7 +190,7 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
179
190
|
tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]].
|
180
191
|
"""
|
181
192
|
|
182
|
-
from .
|
193
|
+
from .ene import define_genomes
|
183
194
|
|
184
195
|
data = 'Train'
|
185
196
|
|
@@ -86,7 +86,7 @@ def draw_model_architecture(model_name, model_path=''):
|
|
86
86
|
Visualizes the architecture of a neural network model with multiple inputs based on activation functions.
|
87
87
|
"""
|
88
88
|
|
89
|
-
from .
|
89
|
+
from .model_ops import load_model, get_scaler, get_act, get_weights
|
90
90
|
|
91
91
|
model = load_model(model_name=model_name, model_path=model_path)
|
92
92
|
|
@@ -182,7 +182,7 @@ def draw_model_architecture(model_name, model_path=''):
|
|
182
182
|
|
183
183
|
def draw_activations(x_train, activation):
|
184
184
|
|
185
|
-
from . import
|
185
|
+
from . import activation_functions as af
|
186
186
|
|
187
187
|
if activation == 'sigmoid':
|
188
188
|
result = af.Sigmoid(x_train)
|
@@ -327,10 +327,10 @@ def draw_activations(x_train, activation):
|
|
327
327
|
|
328
328
|
def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activations):
|
329
329
|
|
330
|
-
from .
|
331
|
-
from
|
332
|
-
from .
|
333
|
-
from .
|
330
|
+
from .metrics import metrics, confusion_matrix, roc_curve
|
331
|
+
from ..ui import loading_bars, initialize_loading_bar
|
332
|
+
from .data_ops import decode_one_hot
|
333
|
+
from .model_ops import predict_model_ram
|
334
334
|
|
335
335
|
bar_format_normal = loading_bars()[0]
|
336
336
|
|
@@ -455,8 +455,8 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activations):
|
|
455
455
|
|
456
456
|
def plot_decision_boundary(x, y, activations, W, artist=None, ax=None):
|
457
457
|
|
458
|
-
from .
|
459
|
-
from .
|
458
|
+
from .model_ops import predict_model_ram
|
459
|
+
from .data_ops import decode_one_hot
|
460
460
|
|
461
461
|
feature_indices = [0, 1]
|
462
462
|
|
@@ -513,8 +513,8 @@ def plot_decision_boundary(x, y, activations, W, artist=None, ax=None):
|
|
513
513
|
|
514
514
|
def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
|
515
515
|
|
516
|
-
from .
|
517
|
-
from .
|
516
|
+
from .metrics import pca
|
517
|
+
from .data_ops import decode_one_hot
|
518
518
|
|
519
519
|
if x.shape[1] > 2:
|
520
520
|
|
@@ -707,7 +707,7 @@ def show():
|
|
707
707
|
|
708
708
|
def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
|
709
709
|
|
710
|
-
from .
|
710
|
+
from .data_ops import find_closest_factors
|
711
711
|
viz_objects = {}
|
712
712
|
|
713
713
|
if show_history:
|
@@ -0,0 +1,27 @@
|
|
1
|
+
"""
|
2
|
+
CUDA
|
3
|
+
====
|
4
|
+
The modules contained in this folder and their functions compute data on a graphics processing unit with CUDA technology and a installed CUDA toolkit, storing it in the GPU's VRAM.
|
5
|
+
|
6
|
+
Modules in the folder:
|
7
|
+
----------------------
|
8
|
+
- activation_functions
|
9
|
+
- data_operations
|
10
|
+
- ene
|
11
|
+
- loss_functions
|
12
|
+
- metrics
|
13
|
+
- model_operations
|
14
|
+
- nn
|
15
|
+
- visualizations
|
16
|
+
|
17
|
+
Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
|
18
|
+
|
19
|
+
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
20
|
+
|
21
|
+
- Creator: Hasan Can Beydili
|
22
|
+
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
23
|
+
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
24
|
+
- Instagram: https://www.instagram.com/canbeydilj
|
25
|
+
- Contact: tchasancan@gmail.com
|
26
|
+
|
27
|
+
"""
|
@@ -1,3 +1,42 @@
|
|
1
|
+
"""
|
2
|
+
|
3
|
+
Activation Functions on CUDA
|
4
|
+
============================
|
5
|
+
This module contains activation functions that run on the CUDA GPU.
|
6
|
+
|
7
|
+
|
8
|
+
Module functions:
|
9
|
+
-----------------
|
10
|
+
- 'sigmoid': Sigmoid,
|
11
|
+
- 'mod_circular': modular_circular_activation,
|
12
|
+
- 'tanh_circular': tanh_circular_activation,
|
13
|
+
- 'leaky_relu': leaky_relu,
|
14
|
+
- 'relu': Relu,
|
15
|
+
- 'gelu': gelu,
|
16
|
+
- 'tanh': tanh,
|
17
|
+
- 'sinakt': sinakt,
|
18
|
+
- 'p_squared': p_squared,
|
19
|
+
- 'sglu': lambda x: sglu(x, alpha=1.0),
|
20
|
+
- 'dlrelu': dlrelu,
|
21
|
+
- 'sin_plus': sin_plus,
|
22
|
+
- 'acos': lambda x: acos(x, alpha=1.0, beta=0.0),
|
23
|
+
- 'isra': isra,
|
24
|
+
- 'waveakt': waveakt,
|
25
|
+
- 'arctan': arctan,
|
26
|
+
- 'bent_identity': bent_identity,
|
27
|
+
- 'softsign': softsign,
|
28
|
+
- 'pwl': pwl,
|
29
|
+
- 'sine': sine,
|
30
|
+
- 'tanh_square': tanh_square,
|
31
|
+
- 'linear':,
|
32
|
+
- 'sine_square': sine_square,
|
33
|
+
- 'logarithmic': logarithmic,
|
34
|
+
- 'sine_offset': lambda x: sine_offset(x, 1.0),
|
35
|
+
- 'spiral': spiral_activation,
|
36
|
+
- 'circular': circular_activation
|
37
|
+
- Softmax()
|
38
|
+
"""
|
39
|
+
|
1
40
|
import cupy as cp
|
2
41
|
import numpy as np
|
3
42
|
from scipy.special import expit, softmax
|
@@ -21,7 +21,7 @@ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJe
|
|
21
21
|
|
22
22
|
PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
23
23
|
|
24
|
-
-
|
24
|
+
- Creator: Hasan Can Beydili
|
25
25
|
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
26
26
|
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
27
27
|
- Instagram: https://www.instagram.com/canbeydilj
|
@@ -47,7 +47,7 @@ def encode_one_hot(y_train, y_test=None, summary=False):
|
|
47
47
|
tuple: One-hot encoded y_train and (if given: y_test).
|
48
48
|
"""
|
49
49
|
|
50
|
-
from
|
50
|
+
from ..memory_ops import optimize_labels, transfer_to_cpu
|
51
51
|
|
52
52
|
y_train = transfer_to_cpu(y_train,dtype=y_train.dtype)
|
53
53
|
y_test = transfer_to_cpu(y_test,dtype=y_test.dtype)
|
@@ -113,7 +113,7 @@ def split(X, y, test_size, random_state=42, dtype=cp.float32, shuffle_in_cpu=Fal
|
|
113
113
|
Returns:
|
114
114
|
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
115
115
|
"""
|
116
|
-
from
|
116
|
+
from ..memory_ops import transfer_to_gpu, optimize_labels
|
117
117
|
|
118
118
|
X = transfer_to_gpu(X, dtype=dtype)
|
119
119
|
y = optimize_labels(y, one_hot_encoded=False, cuda=True)
|
@@ -172,8 +172,8 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
|
|
172
172
|
x_balanced -- Balanced input dataset (cupy array format)
|
173
173
|
y_balanced -- Balanced class labels (one-hot encoded, cupy array format)
|
174
174
|
"""
|
175
|
-
from
|
176
|
-
from
|
175
|
+
from ..ui import loading_bars, get_loading_bar_style
|
176
|
+
from ..memory_ops import transfer_to_gpu
|
177
177
|
|
178
178
|
bar_format = loading_bars()[0]
|
179
179
|
x_train = transfer_to_gpu(x_train, dtype=dtype)
|
@@ -261,8 +261,8 @@ def auto_balancer(x_train, y_train, dtype=cp.float32, shuffle_in_cpu=False):
|
|
261
261
|
tuple: A tuple containing balanced input data and labels.
|
262
262
|
"""
|
263
263
|
|
264
|
-
from
|
265
|
-
from
|
264
|
+
from ..ui import loading_bars, get_loading_bar_style
|
265
|
+
from ..memory_ops import transfer_to_gpu
|
266
266
|
|
267
267
|
x_train = transfer_to_gpu(x_train, dtype=dtype)
|
268
268
|
y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
|
@@ -331,8 +331,8 @@ def synthetic_augmentation(x_train, y_train, dtype=cp.float32, shuffle_in_cpu=Fa
|
|
331
331
|
Returns:
|
332
332
|
x_train_balanced, y_train_balanced (cupy array format)
|
333
333
|
"""
|
334
|
-
from
|
335
|
-
from
|
334
|
+
from ..ui import loading_bars, get_loading_bar_style
|
335
|
+
from ..memory_ops import transfer_to_gpu
|
336
336
|
|
337
337
|
x = transfer_to_gpu(x_train, dtype=dtype)
|
338
338
|
y = transfer_to_gpu(y_train, dtype=y_train.dtype)
|
@@ -21,7 +21,7 @@ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJe
|
|
21
21
|
|
22
22
|
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
23
23
|
|
24
|
-
-
|
24
|
+
- Creator: Hasan Can Beydili
|
25
25
|
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
26
26
|
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
27
27
|
- Instagram: https://www.instagram.com/canbeydilj
|
@@ -35,9 +35,9 @@ import math
|
|
35
35
|
import copy
|
36
36
|
|
37
37
|
### LIBRARY IMPORTS ###
|
38
|
-
from .
|
39
|
-
from
|
40
|
-
from .
|
38
|
+
from .data_ops import non_neg_normalization
|
39
|
+
from ..ui import loading_bars, initialize_loading_bar
|
40
|
+
from .activation_functions import apply_activation, all_activations
|
41
41
|
|
42
42
|
def define_genomes(input_shape, output_shape, population_size, neurons=[], activation_functions=[], dtype=cp.float32):
|
43
43
|
"""
|
@@ -39,7 +39,7 @@ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJe
|
|
39
39
|
|
40
40
|
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
41
41
|
|
42
|
-
-
|
42
|
+
- Creator: Hasan Can Beydili
|
43
43
|
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
44
44
|
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
45
45
|
- Instagram: https://www.instagram.com/canbeydilj
|
@@ -64,7 +64,7 @@ def save_model(model_name,
|
|
64
64
|
test_acc=None,
|
65
65
|
model_path='',
|
66
66
|
activations=['linear'],
|
67
|
-
activation_potentiation=
|
67
|
+
activation_potentiation=None,
|
68
68
|
weights_type='npy',
|
69
69
|
weights_format='raw',
|
70
70
|
show_architecture=False,
|
@@ -105,8 +105,8 @@ def save_model(model_name,
|
|
105
105
|
No return.
|
106
106
|
"""
|
107
107
|
|
108
|
-
from .
|
109
|
-
from
|
108
|
+
from .visualizations import draw_model_architecture
|
109
|
+
from .. import __version__
|
110
110
|
|
111
111
|
if model_type != 'PLAN' and model_type != 'MLP' and model_type != 'PTNN':
|
112
112
|
raise ValueError("model_type parameter must be 'PLAN', 'MLP' or 'PTNN'.")
|
@@ -316,7 +316,7 @@ def load_model(model_name,
|
|
316
316
|
lists: Weights, None, test_accuracy, activations, scaler_params, None, model_type, weight_type, weight_format, device_version, (list[df_elements])=Pandas DataFrame of the model
|
317
317
|
"""
|
318
318
|
|
319
|
-
from
|
319
|
+
from .. import __version__
|
320
320
|
|
321
321
|
try:
|
322
322
|
|
@@ -419,8 +419,8 @@ def predict_from_storage(Input, model_name, model_path='', dtype=cp.float32):
|
|
419
419
|
|
420
420
|
Input = cp.array(Input, dtype=dtype, copy=False)
|
421
421
|
|
422
|
-
from .
|
423
|
-
from .
|
422
|
+
from .activation_functions import apply_activation
|
423
|
+
from .data_ops import standard_scaler
|
424
424
|
|
425
425
|
try:
|
426
426
|
|
@@ -533,8 +533,8 @@ def predict_from_memory(Input, W, scaler_params=None, activations=['linear'], ac
|
|
533
533
|
cupyarray: Output from the model.
|
534
534
|
"""
|
535
535
|
|
536
|
-
from .
|
537
|
-
from .
|
536
|
+
from .data_ops import standard_scaler
|
537
|
+
from .activation_functions import apply_activation
|
538
538
|
|
539
539
|
if isinstance(activations, str):
|
540
540
|
activations = [activations]
|
@@ -2,7 +2,7 @@
|
|
2
2
|
"""
|
3
3
|
|
4
4
|
|
5
|
-
|
5
|
+
NN (Neural Networks) on CUDA
|
6
6
|
=============================
|
7
7
|
This module hosts functions for training and evaluating artificial neural networks on CUDA GPU for labeled classification tasks (for now).
|
8
8
|
|
@@ -41,7 +41,7 @@ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJe
|
|
41
41
|
|
42
42
|
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
43
43
|
|
44
|
-
-
|
44
|
+
- Creator: Hasan Can Beydili
|
45
45
|
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
46
46
|
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
47
47
|
- Instagram: https://www.instagram.com/canbeydilj
|
@@ -54,14 +54,14 @@ import copy
|
|
54
54
|
import random
|
55
55
|
|
56
56
|
### LIBRARY IMPORTS ###
|
57
|
-
from
|
58
|
-
from .
|
59
|
-
from .
|
60
|
-
from .
|
61
|
-
from
|
62
|
-
from .
|
63
|
-
from
|
64
|
-
from .
|
57
|
+
from ..ui import loading_bars, initialize_loading_bar
|
58
|
+
from .data_ops import normalization
|
59
|
+
from .activation_functions import apply_activation, all_activations
|
60
|
+
from .model_ops import get_acc, get_preds_softmax
|
61
|
+
from ..memory_ops import transfer_to_gpu, transfer_to_cpu, optimize_labels
|
62
|
+
from .loss_functions import categorical_crossentropy, binary_crossentropy
|
63
|
+
from ..fitness_functions import wals
|
64
|
+
from .visualizations import (
|
65
65
|
draw_neural_web,
|
66
66
|
display_visualizations_for_learner,
|
67
67
|
update_history_plots_for_learner,
|
@@ -127,19 +127,30 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
127
127
|
* This function also able to train classic MLP model architectures.
|
128
128
|
* And my newest innovative architecture: PTNN (Potentiation Transfer Neural Network).
|
129
129
|
|
130
|
+
Examples:
|
131
|
+
|
132
|
+
This creates a PLAN model:
|
133
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=100, fit_start=True) ```
|
134
|
+
|
135
|
+
This creates a MLP model(with 2 hidden layer):
|
136
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=100, fit_start=False, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
|
137
|
+
|
138
|
+
This creates a PTNN model(with 2 hidden layer & 1 aggregation layer(comes with PLAN)):
|
139
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=[10, 100], fit_start=True, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
|
140
|
+
|
130
141
|
:Args:
|
131
142
|
:param x_train: (array-like): Training input data.
|
132
143
|
:param y_train: (array-like): Labels for training data.
|
133
|
-
:param optimizer: (function): Optimization technique with hyperparameters. (PLAN, MLP & PTNN (all) using ENE for optimization. Gradient based technique's will added in the future.) Please use this: from pyerualjetwork.
|
144
|
+
:param optimizer: (function): Optimization technique with hyperparameters. (PLAN, MLP & PTNN (all) using ENE for optimization. Gradient based technique's will added in the future.) Please use this: from pyerualjetwork.cuda.ene import evolver (and) optimizer = lambda *args, **kwargs: evolver(*args, 'here give your hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
134
145
|
```python
|
135
146
|
|
136
|
-
optimizer = lambda *args, **kwargs:
|
147
|
+
optimizer = lambda *args, **kwargs: ene.evolver(*args,
|
137
148
|
activation_add_prob=0.05,
|
138
149
|
strategy='aggressive',
|
139
150
|
policy='more_selective',
|
140
151
|
**kwargs)
|
141
152
|
|
142
|
-
model =
|
153
|
+
model = nn.learn(x_train,
|
143
154
|
y_train,
|
144
155
|
optimizer,
|
145
156
|
fit_start=True,
|
@@ -176,7 +187,7 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
176
187
|
tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
|
177
188
|
"""
|
178
189
|
|
179
|
-
from .
|
190
|
+
from .ene import define_genomes
|
180
191
|
|
181
192
|
data = 'Train'
|
182
193
|
|
@@ -197,13 +208,13 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
197
208
|
x_train = transfer_to_gpu(x_train, dtype=x_train.dtype)
|
198
209
|
y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
|
199
210
|
|
200
|
-
from .
|
211
|
+
from .data_ops import batcher
|
201
212
|
|
202
213
|
elif memory == 'cpu':
|
203
214
|
x_train = transfer_to_cpu(x_train, dtype=x_train.dtype)
|
204
215
|
y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
|
205
216
|
|
206
|
-
from .
|
217
|
+
from pyerualjetwork.cpu.data_ops import batcher
|
207
218
|
|
208
219
|
else:
|
209
220
|
raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
|
@@ -87,7 +87,7 @@ def draw_model_architecture(model_name, model_path=''):
|
|
87
87
|
Visualizes the architecture of a neural network model with multiple inputs based on activation functions.
|
88
88
|
"""
|
89
89
|
|
90
|
-
from .
|
90
|
+
from .model_ops import load_model, get_scaler, get_act, get_weights
|
91
91
|
|
92
92
|
model = load_model(model_name=model_name, model_path=model_path)
|
93
93
|
|
@@ -183,7 +183,7 @@ def draw_model_architecture(model_name, model_path=''):
|
|
183
183
|
|
184
184
|
def draw_activations(x_train, activation):
|
185
185
|
|
186
|
-
from . import
|
186
|
+
from . import activation_functions as af
|
187
187
|
|
188
188
|
if activation == 'sigmoid':
|
189
189
|
result = af.Sigmoid(x_train)
|
@@ -328,10 +328,10 @@ def draw_activations(x_train, activation):
|
|
328
328
|
|
329
329
|
def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activations):
|
330
330
|
|
331
|
-
from .
|
332
|
-
from
|
333
|
-
from .
|
334
|
-
from .
|
331
|
+
from .metrics import metrics, confusion_matrix, roc_curve
|
332
|
+
from ..ui import loading_bars, initialize_loading_bar
|
333
|
+
from .data_ops import decode_one_hot
|
334
|
+
from .model_ops import predict_model_ram
|
335
335
|
|
336
336
|
bar_format_normal = loading_bars()[0]
|
337
337
|
|
@@ -451,8 +451,8 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activations):
|
|
451
451
|
|
452
452
|
def plot_decision_boundary(x, y, activations, W, artist=None, ax=None):
|
453
453
|
|
454
|
-
from .
|
455
|
-
from .
|
454
|
+
from .model_ops import predict_model_ram
|
455
|
+
from .data_ops import decode_one_hot
|
456
456
|
|
457
457
|
feature_indices = [0, 1]
|
458
458
|
|
@@ -509,8 +509,8 @@ def plot_decision_boundary(x, y, activations, W, artist=None, ax=None):
|
|
509
509
|
|
510
510
|
def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
|
511
511
|
|
512
|
-
from .
|
513
|
-
from .
|
512
|
+
from .metrics import pca
|
513
|
+
from .data_ops import decode_one_hot
|
514
514
|
|
515
515
|
if x.shape[1] > 2:
|
516
516
|
|
@@ -699,7 +699,7 @@ def update_neuron_history_for_learner(LTPW, ax1, row, col, class_count, artist5,
|
|
699
699
|
|
700
700
|
def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
|
701
701
|
|
702
|
-
from .
|
702
|
+
from .data_ops import find_closest_factors
|
703
703
|
viz_objects = {}
|
704
704
|
|
705
705
|
if show_history:
|
pyerualjetwork/help.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
from pyerualjetwork.
|
1
|
+
from pyerualjetwork.cuda.nn import all_activations
|
2
2
|
|
3
3
|
|
4
4
|
def activation_potentiation():
|
@@ -11,7 +11,7 @@ def activation_potentiation():
|
|
11
11
|
|
12
12
|
def docs_and_examples():
|
13
13
|
|
14
|
-
print('PLAN document: https://github.com/HCB06/
|
15
|
-
print('PLAN examples: https://github.com/HCB06/
|
16
|
-
print('
|
17
|
-
print('
|
14
|
+
print('PLAN & ENE document: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PLAN\n')
|
15
|
+
print('PLAN examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes\n')
|
16
|
+
print('ENE examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes/ENE\n')
|
17
|
+
print('PyerualJetwork document and examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork')
|
pyerualjetwork/issue_solver.py
CHANGED
@@ -38,7 +38,7 @@ def update_model_to_v5(model_name, model_path, is_cuda):
|
|
38
38
|
|
39
39
|
if is_cuda:
|
40
40
|
|
41
|
-
from .
|
41
|
+
from pyerualjetwork.cuda.model_ops import (get_act,
|
42
42
|
get_weights,
|
43
43
|
get_scaler,
|
44
44
|
get_acc,
|
@@ -49,7 +49,7 @@ def update_model_to_v5(model_name, model_path, is_cuda):
|
|
49
49
|
save_model)
|
50
50
|
else:
|
51
51
|
|
52
|
-
from .
|
52
|
+
from pyerualjetwork.cpu.model_ops import (get_act,
|
53
53
|
get_weights,
|
54
54
|
get_scaler,
|
55
55
|
get_acc,
|
@@ -17,7 +17,7 @@ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJe
|
|
17
17
|
|
18
18
|
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
19
19
|
|
20
|
-
-
|
20
|
+
- Creator: Hasan Can Beydili
|
21
21
|
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
22
22
|
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
23
23
|
- Instagram: https://www.instagram.com/canbeydilj
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 5.
|
3
|
+
Version: 5.31
|
4
4
|
Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -24,17 +24,14 @@ GitHub Page: https://github.com/HCB06/PyerualJetwork
|
|
24
24
|
|
25
25
|
YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7HbsBpCx2VTJ4SK9wcPyse-EHw
|
26
26
|
|
27
|
-
|
27
|
+
installation:
|
28
|
+
'pip install pyerualjetwork'
|
28
29
|
|
29
|
-
|
30
|
-
from pyerualjetwork import
|
31
|
-
from pyerualjetwork import
|
32
|
-
|
33
|
-
|
34
|
-
from pyerualjetwork import neu_cuda
|
35
|
-
from pyerualjetwork import ene_cuda
|
36
|
-
from pyerualjetwork import data_operations_cuda
|
37
|
-
from pyerualjetwork import model_operations_cuda
|
30
|
+
package modules:
|
31
|
+
'from pyerualjetwork.cpu import nn, ene, data_ops, model_ops, memory_ops'
|
32
|
+
'from pyerualjetwork.cuda import nn, ene, data_ops, model_ops, memory_ops'
|
33
|
+
|
34
|
+
please read docstrings.
|
38
35
|
|
39
36
|
PyerualJetwork has Issue Solver. This operation provides users ready-to-use functions to identify potential issues
|
40
37
|
caused by version incompatibilities in major updates, ensuring users are not affected by such problems.
|
@@ -74,10 +71,10 @@ PyerualJetwork is free to use for commercial business and individual users.
|
|
74
71
|
PyerualJetwork ready for both eager execution(like PyTorch) and static graph(like Tensorflow) concepts because PyerualJetwork using only functions.
|
75
72
|
For example:
|
76
73
|
|
77
|
-
|
74
|
+
plan_fit function only fits given training data(suitable for dynamic graph) but learn function learns and optimize entire architecture(suitable for static graph). Or more deeper eager executions PyerualJetwork have: cross_over function, mutation function, list of activation functions, loss functions. You can create your unique model architecture. Move your data to GPU or CPU or manage how much should in GPU, Its all up to you.
|
78
75
|
<br><br>
|
79
76
|
|
80
|
-
PyerualJetworket includes PLAN, MLP & ENE.<br>
|
77
|
+
PyerualJetworket includes PLAN, MLP, PTNN & ENE.<br>
|
81
78
|
|
82
79
|
PLAN VISION:<br>
|
83
80
|
|
@@ -122,6 +119,6 @@ HOW DO I IMPORT IT TO MY PROJECT?
|
|
122
119
|
|
123
120
|
Anaconda users can access the 'Anaconda Prompt' terminal from the Start menu and add the necessary library modules to the Python module search queue by typing "pip install pyerualjetwork" and pressing enter. If you are not using Anaconda, you can simply open the 'cmd' Windows command terminal from the Start menu and type "pip install PyerualJetwork". (Visual Studio Code reccomended) After installation, it's important to periodically open the terminal of the environment you are using and stay up to date by using the command "pip install PyerualJetwork --upgrade".
|
124
121
|
|
125
|
-
After installing the module using "pip" you can now call the library module in your project environment. Use: “from pyerualjetwork import
|
122
|
+
After installing the module using "pip" you can now call the library module in your project environment. Use: “from pyerualjetwork.cpu import nn. Now, you can call the necessary functions from the nn module.
|
126
123
|
|
127
124
|
The PLAN algorithm & ENE algorithm will not be explained in this document. This document focuses on how professionals can integrate and use PyerualJetwork in their systems. However, briefly, the PLAN algorithm can be described as a classification algorithm. PLAN algorithm achieves this task with an incredibly energy-efficient, fast, and hyperparameter-free user-friendly approach. For more detailed information, you can check out .pdf) file.
|
@@ -0,0 +1,28 @@
|
|
1
|
+
pyerualjetwork/__init__.py,sha256=virzpB1MRqS8c5NYr5Gy9N2IMzwq7PU2QgBPMxm0oLQ,2704
|
2
|
+
pyerualjetwork/fitness_functions.py,sha256=D9JVCr9DFid_xXgBD4uCKxdW2k10MVDE5HZRSOK4Igg,1237
|
3
|
+
pyerualjetwork/help.py,sha256=Nyi0gHAN9ZnO4wgQLeENt0n7tSCZ3hJmjaJ853eGjCE,831
|
4
|
+
pyerualjetwork/issue_solver.py,sha256=3pZTGotS29sy3pIuGQoJFUePibtSzS-tNoU80T_Usgk,3131
|
5
|
+
pyerualjetwork/memory_ops.py,sha256=TUFh9SYWCKL6N-vNdWId_EwU313TuZomQCHOrltrD-4,14280
|
6
|
+
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
7
|
+
pyerualjetwork/cpu/__init__.py,sha256=0yAYner_-v7SmT3P7JV2itU8xJUQdQpb40dhAMQiZkc,829
|
8
|
+
pyerualjetwork/cpu/activation_functions.py,sha256=zZSoOQ452Ykp_RsHVxklxesJmmFgufyIB4F3WQjudEQ,6689
|
9
|
+
pyerualjetwork/cpu/data_ops.py,sha256=-XeMLRTQ5g7GMJdKYVMKJA7bSj6PbKEEpbQDRRhAIT4,16166
|
10
|
+
pyerualjetwork/cpu/ene.py,sha256=ZLCaCxkpAmFLdxDS2OH-S8fT4jKq4HNVCHgpIufb8lg,44322
|
11
|
+
pyerualjetwork/cpu/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
12
|
+
pyerualjetwork/cpu/metrics.py,sha256=WhZ8iEqWehaygPRADUlhA5j_Qv3UwqV_eMxpyRVkeVs,6070
|
13
|
+
pyerualjetwork/cpu/model_ops.py,sha256=9iZgl2yPYH6m7d9C-QdBYnkDEZiXgutxofck2papRxU,20478
|
14
|
+
pyerualjetwork/cpu/nn.py,sha256=J_Y5us-vOIhcD_h4CgaY4aOza4xi9ISu2WAfii1AfFw,32020
|
15
|
+
pyerualjetwork/cpu/visualizations.py,sha256=rOQsc-W8b71z7ovXSoF49lx4fmpvlaHLsyj9ejWnhnI,28164
|
16
|
+
pyerualjetwork/cuda/__init__.py,sha256=NbqvAS4jlMdoFdXa5_hi5ukXQ5zAZR_5BQ4QAqtiKug,879
|
17
|
+
pyerualjetwork/cuda/activation_functions.py,sha256=FmoSAxDr9SGO4nkE6ZflXK4pmvZ0sL3Epe1Lz-3GOVI,6766
|
18
|
+
pyerualjetwork/cuda/data_ops.py,sha256=SiNodFNmWyTPY_KnKuAi9biPRdpTAYY3XM01bRSUPCs,18510
|
19
|
+
pyerualjetwork/cuda/ene.py,sha256=aSCPr9VFdgK2cxxfwuP7z0jbJL9gkKNM0rgu8ihLarQ,44830
|
20
|
+
pyerualjetwork/cuda/loss_functions.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
21
|
+
pyerualjetwork/cuda/metrics.py,sha256=PjDBoRvr6va8vRvDIJJGBO4-I4uumrk3NCM1Vz4NJTo,5054
|
22
|
+
pyerualjetwork/cuda/model_ops.py,sha256=lM6yT4ZMHs-0_M3Op8m8mQV_HRADm7ROHESgyTc7bCw,21204
|
23
|
+
pyerualjetwork/cuda/nn.py,sha256=7rbaIEcmssaFgcionWVRmKijlgFyftVjf-MMNaLO_28,33140
|
24
|
+
pyerualjetwork/cuda/visualizations.py,sha256=9l5BhXqXoeopdhLvVGvjH1TKYZb9JdKOsSE2IYD02zs,28569
|
25
|
+
pyerualjetwork-5.31.dist-info/METADATA,sha256=8xJBTVON9V34hEKYXvZQdPS1HtEDvLNRiif0A1pqRos,8020
|
26
|
+
pyerualjetwork-5.31.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
27
|
+
pyerualjetwork-5.31.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
28
|
+
pyerualjetwork-5.31.dist-info/RECORD,,
|
@@ -1,26 +0,0 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=wDb7sd2nFRlS8ty2Zrkq7FZd51YvCHbKHfMCibTrssA,2735
|
2
|
-
pyerualjetwork/activation_functions_cpu.py,sha256=axsVRSjw0GuRB709aBwyaNDgAi2vJBIqmJjTmcsCBBY,5743
|
3
|
-
pyerualjetwork/activation_functions_cuda.py,sha256=mNlecgmVX9G0_2yQ2_c6XQoMfvcdWIt9b1RUTdoLNBg,5809
|
4
|
-
pyerualjetwork/data_operations_cpu.py,sha256=HemqiYfSdlQKTTYNzpCh_9lTtS3AimMI4DvqJBAGjGw,16186
|
5
|
-
pyerualjetwork/data_operations_cuda.py,sha256=5zgyJGPjQuHyx6IHNkRwMguYhm-GcI6Hal49WNvw-bM,18536
|
6
|
-
pyerualjetwork/ene_cpu.py,sha256=35xz-KSmCigCg4lU7TD20EZbfuAN5PS21NcSywMTKhs,44350
|
7
|
-
pyerualjetwork/ene_cuda.py,sha256=9RyXC4JkRfDfhQUDkphFaKD89MiTp3QIia1brZTjsNA,44860
|
8
|
-
pyerualjetwork/fitness_functions.py,sha256=D9JVCr9DFid_xXgBD4uCKxdW2k10MVDE5HZRSOK4Igg,1237
|
9
|
-
pyerualjetwork/help.py,sha256=FcX8mxo1_mvoqONVXY0Kn7S09CDkhi0jwNmn8g9mYZc,804
|
10
|
-
pyerualjetwork/issue_solver.py,sha256=iY6hSsBxYI5l82RwnXQp2DrRUJyksk_7U9GUSnt2YfU,3117
|
11
|
-
pyerualjetwork/loss_functions_cpu.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
12
|
-
pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
13
|
-
pyerualjetwork/memory_operations.py,sha256=g24d-cDuUFc0fOEtk3AJe-z_EBctYV5S4cY1rQ6VGiE,14279
|
14
|
-
pyerualjetwork/metrics_cpu.py,sha256=vbfMwS0ay2heMSa0GNo-ydLjQ8cfexbLwaREp4FKAtY,6081
|
15
|
-
pyerualjetwork/metrics_cuda.py,sha256=PWyJyexeqlPKb09LAcF55JvhZVeXLCu3P_siYq5m2gg,5065
|
16
|
-
pyerualjetwork/model_operations_cpu.py,sha256=Y0uPkLVbdodP7lC-fOPdja3RWi2J9z2rwWIS2pxzotU,20523
|
17
|
-
pyerualjetwork/model_operations_cuda.py,sha256=B6vNYmqvrEJ3ZMGE1RWeJYn3V-JCsXhCHvS-aX4bWuU,21254
|
18
|
-
pyerualjetwork/neu_cpu.py,sha256=h97WXTdj0Sizgo-imcyeStxW4dixUITwrla34bd8EWQ,31432
|
19
|
-
pyerualjetwork/neu_cuda.py,sha256=NnQEnMRz5m3wPlotNKy6v4BfNkuzyaK6ZlAWcjBdZ8Y,32569
|
20
|
-
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
21
|
-
pyerualjetwork/visualizations_cpu.py,sha256=StyD1Hl1Gt55EMqR6tO3yVJZdPyGkOgCnQ75Zn8K6J8,28252
|
22
|
-
pyerualjetwork/visualizations_cuda.py,sha256=7lYrkOdrjwQGB3T4k_vI8UDxsm_TRjzaSSg9GhlNczs,28667
|
23
|
-
pyerualjetwork-5.28a0.dist-info/METADATA,sha256=Afcltg5ySVvZ_5LLsNKeUYk8xXQTRpbFqih9kNrut5w,8135
|
24
|
-
pyerualjetwork-5.28a0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
25
|
-
pyerualjetwork-5.28a0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
26
|
-
pyerualjetwork-5.28a0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|