pyerualjetwork 4.3.8.dev15__tar.gz → 4.3.9b1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/PKG-INFO +2 -19
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/README.md +1 -18
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork/__init__.py +1 -1
- {pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner → pyerualjetwork-4.3.9b1/pyerualjetwork}/plan.py +35 -19
- {pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner → pyerualjetwork-4.3.9b1/pyerualjetwork}/plan_cuda.py +33 -18
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork/planeat_cuda.py +9 -45
- {pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner → pyerualjetwork-4.3.9b1/pyerualjetwork}/visualizations.py +27 -24
- {pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner → pyerualjetwork-4.3.9b1/pyerualjetwork}/visualizations_cuda.py +19 -20
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork.egg-info/PKG-INFO +2 -19
- pyerualjetwork-4.3.9b1/pyerualjetwork.egg-info/SOURCES.txt +27 -0
- pyerualjetwork-4.3.9b1/pyerualjetwork.egg-info/top_level.txt +1 -0
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/setup.py +1 -1
- pyerualjetwork-4.3.8.dev15/pyerualjetwork/activation_functions.py +0 -290
- pyerualjetwork-4.3.8.dev15/pyerualjetwork/activation_functions_cuda.py +0 -340
- pyerualjetwork-4.3.8.dev15/pyerualjetwork/data_operations_cuda.py +0 -461
- pyerualjetwork-4.3.8.dev15/pyerualjetwork/model_operations.py +0 -408
- pyerualjetwork-4.3.8.dev15/pyerualjetwork/model_operations_cuda.py +0 -421
- pyerualjetwork-4.3.8.dev15/pyerualjetwork/plan.py +0 -627
- pyerualjetwork-4.3.8.dev15/pyerualjetwork/plan_cuda.py +0 -648
- pyerualjetwork-4.3.8.dev15/pyerualjetwork/planeat.py +0 -825
- pyerualjetwork-4.3.8.dev15/pyerualjetwork/visualizations.py +0 -823
- pyerualjetwork-4.3.8.dev15/pyerualjetwork/visualizations_cuda.py +0 -825
- pyerualjetwork-4.3.8.dev15/pyerualjetwork.egg-info/SOURCES.txt +0 -48
- pyerualjetwork-4.3.8.dev15/pyerualjetwork.egg-info/top_level.txt +0 -2
- pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner/__init__.py +0 -11
- pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner/data_operations.py +0 -406
- pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner/help.py +0 -17
- pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner/loss_functions.py +0 -21
- pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner/loss_functions_cuda.py +0 -21
- pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner/memory_operations.py +0 -298
- pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner/metrics.py +0 -190
- pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner/metrics_cuda.py +0 -163
- pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner/parallel.py +0 -118
- pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner/planeat_cuda.py +0 -752
- pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner/ui.py +0 -22
- {pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner → pyerualjetwork-4.3.9b1/pyerualjetwork}/activation_functions.py +0 -0
- {pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner → pyerualjetwork-4.3.9b1/pyerualjetwork}/activation_functions_cuda.py +0 -0
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork/data_operations.py +0 -0
- {pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner → pyerualjetwork-4.3.9b1/pyerualjetwork}/data_operations_cuda.py +0 -0
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork/help.py +0 -0
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork/loss_functions.py +0 -0
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork/loss_functions_cuda.py +0 -0
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork/memory_operations.py +0 -0
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork/metrics.py +0 -0
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork/metrics_cuda.py +0 -0
- {pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner → pyerualjetwork-4.3.9b1/pyerualjetwork}/model_operations.py +0 -0
- {pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner → pyerualjetwork-4.3.9b1/pyerualjetwork}/model_operations_cuda.py +0 -0
- {pyerualjetwork-4.3.8.dev15/pyerualjetwork_afterburner → pyerualjetwork-4.3.9b1/pyerualjetwork}/planeat.py +0 -0
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork/ui.py +0 -0
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-4.3.8.dev15 → pyerualjetwork-4.3.9b1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.3.
|
3
|
+
Version: 4.3.9b1
|
4
4
|
Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -25,8 +25,6 @@ GitHub Page: https://github.com/HCB06/PyerualJetwork
|
|
25
25
|
YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7HbsBpCx2VTJ4SK9wcPyse-EHw
|
26
26
|
|
27
27
|
pip install pyerualjetwork
|
28
|
-
|
29
|
-
'use this if your data small or memory management is a problem :'
|
30
28
|
|
31
29
|
from pyerualjetwork import plan
|
32
30
|
from pyerualjetwork import planeat
|
@@ -38,26 +36,11 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
|
|
38
36
|
from pyerualjetwork import data_operations_cuda
|
39
37
|
from pyerualjetwork import model_operations_cuda
|
40
38
|
|
41
|
-
'use this if your data large or memory management is not a problem : _afterburner package (afterburner package comes with powerful paralellism,
|
42
|
-
afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded.
|
43
|
-
Specially designed for LLM training and other massive model training)'
|
44
|
-
|
45
|
-
from pyerualjetwork_afterburner import plan
|
46
|
-
from pyerualjetwork_afterburner import planeat
|
47
|
-
from pyerualjetwork_afterburner import data_operations
|
48
|
-
from pyerualjetwork_afterburner import model_operations
|
49
|
-
|
50
|
-
from pyerualjetwork_afterburner import plan_cuda
|
51
|
-
from pyerualjetwork_afterburner import planeat_cuda
|
52
|
-
from pyerualjetwork_afterburner import data_operations_cuda
|
53
|
-
from pyerualjetwork_afterburner import model_operations_cuda
|
54
|
-
|
55
39
|
Optimized for Visual Studio Code
|
56
40
|
|
57
41
|
requires=[
|
58
42
|
'scipy==1.13.1',
|
59
43
|
'tqdm==4.66.4',
|
60
|
-
'seaborn==0.13.2',
|
61
44
|
'pandas==2.2.2',
|
62
45
|
'networkx==3.3',
|
63
46
|
'numpy==1.26.4',
|
@@ -67,7 +50,7 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
|
|
67
50
|
'psutil==6.1.1'
|
68
51
|
]
|
69
52
|
|
70
|
-
matplotlib,
|
53
|
+
matplotlib, networkx (optional).
|
71
54
|
|
72
55
|
##############################
|
73
56
|
|
@@ -16,8 +16,6 @@ GitHub Page: https://github.com/HCB06/PyerualJetwork
|
|
16
16
|
YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7HbsBpCx2VTJ4SK9wcPyse-EHw
|
17
17
|
|
18
18
|
pip install pyerualjetwork
|
19
|
-
|
20
|
-
'use this if your data small or memory management is a problem :'
|
21
19
|
|
22
20
|
from pyerualjetwork import plan
|
23
21
|
from pyerualjetwork import planeat
|
@@ -29,26 +27,11 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
|
|
29
27
|
from pyerualjetwork import data_operations_cuda
|
30
28
|
from pyerualjetwork import model_operations_cuda
|
31
29
|
|
32
|
-
'use this if your data large or memory management is not a problem : _afterburner package (afterburner package comes with powerful paralellism,
|
33
|
-
afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded.
|
34
|
-
Specially designed for LLM training and other massive model training)'
|
35
|
-
|
36
|
-
from pyerualjetwork_afterburner import plan
|
37
|
-
from pyerualjetwork_afterburner import planeat
|
38
|
-
from pyerualjetwork_afterburner import data_operations
|
39
|
-
from pyerualjetwork_afterburner import model_operations
|
40
|
-
|
41
|
-
from pyerualjetwork_afterburner import plan_cuda
|
42
|
-
from pyerualjetwork_afterburner import planeat_cuda
|
43
|
-
from pyerualjetwork_afterburner import data_operations_cuda
|
44
|
-
from pyerualjetwork_afterburner import model_operations_cuda
|
45
|
-
|
46
30
|
Optimized for Visual Studio Code
|
47
31
|
|
48
32
|
requires=[
|
49
33
|
'scipy==1.13.1',
|
50
34
|
'tqdm==4.66.4',
|
51
|
-
'seaborn==0.13.2',
|
52
35
|
'pandas==2.2.2',
|
53
36
|
'networkx==3.3',
|
54
37
|
'numpy==1.26.4',
|
@@ -58,7 +41,7 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
|
|
58
41
|
'psutil==6.1.1'
|
59
42
|
]
|
60
43
|
|
61
|
-
matplotlib,
|
44
|
+
matplotlib, networkx (optional).
|
62
45
|
|
63
46
|
##############################
|
64
47
|
|
@@ -1,4 +1,4 @@
|
|
1
|
-
__version__ = "4.3.
|
1
|
+
__version__ = "4.3.9b1"
|
2
2
|
__update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
|
3
3
|
|
4
4
|
def print_version(__version__):
|
@@ -44,6 +44,7 @@ def fit(
|
|
44
44
|
y_train,
|
45
45
|
activation_potentiation=['linear'],
|
46
46
|
W=None,
|
47
|
+
normalization=False,
|
47
48
|
dtype=np.float32
|
48
49
|
):
|
49
50
|
"""
|
@@ -59,6 +60,8 @@ def fit(
|
|
59
60
|
|
60
61
|
W (numpy.ndarray): If you want to re-continue or update model
|
61
62
|
|
63
|
+
normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
64
|
+
|
62
65
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
63
66
|
|
64
67
|
Returns:
|
@@ -67,19 +70,21 @@ def fit(
|
|
67
70
|
|
68
71
|
# Pre-check
|
69
72
|
|
70
|
-
if len(x_train) != len(y_train):
|
71
|
-
|
73
|
+
if len(x_train) != len(y_train): raise ValueError("x_train and y_train must have the same length.")
|
74
|
+
|
75
|
+
weight = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
|
72
76
|
|
73
|
-
|
77
|
+
if normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
|
78
|
+
elif normalization is False: x_train = apply_activation(x_train, activation_potentiation)
|
79
|
+
else: raise ValueError('normalization parameter only be True or False')
|
74
80
|
|
75
|
-
|
76
|
-
LTPW += np.array(y_train, dtype=optimize_labels(y_train, cuda=False).dtype).T @ x_train
|
81
|
+
weight += y_train.T @ x_train
|
77
82
|
|
78
|
-
return normalization(
|
83
|
+
return normalization(weight, dtype=dtype)
|
79
84
|
|
80
85
|
|
81
86
|
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
|
82
|
-
neural_web_history=False, show_current_activations=False,
|
87
|
+
neural_web_history=False, show_current_activations=False, normalization=False,
|
83
88
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
84
89
|
interval=33.33, target_acc=None, target_loss=None,
|
85
90
|
start_this_act=None, start_this_W=None, dtype=np.float32):
|
@@ -131,6 +136,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
131
136
|
|
132
137
|
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
133
138
|
|
139
|
+
normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
140
|
+
|
134
141
|
loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
135
142
|
|
136
143
|
interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
@@ -183,9 +190,9 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
183
190
|
|
184
191
|
# Initialize progress bar
|
185
192
|
if batch_size == 1:
|
186
|
-
ncols =
|
193
|
+
ncols = 78
|
187
194
|
else:
|
188
|
-
ncols =
|
195
|
+
ncols = 49
|
189
196
|
|
190
197
|
# Initialize variables
|
191
198
|
best_acc = 0
|
@@ -224,7 +231,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
224
231
|
|
225
232
|
if fit_start is True and i == 0 and j < activation_potentiation_len:
|
226
233
|
act_pop[j] = activation_potentiation[j]
|
227
|
-
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[
|
234
|
+
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], normalization=normalization, dtype=dtype)
|
228
235
|
weight_pop[j] = W
|
229
236
|
|
230
237
|
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
|
@@ -267,10 +274,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
267
274
|
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
268
275
|
|
269
276
|
if batch_size == 1:
|
270
|
-
postfix_dict[f"{data} Accuracy"] = best_acc
|
271
|
-
|
272
|
-
postfix_dict[f"{data} Batch Accuracy"] = acc
|
273
|
-
progress.set_postfix(postfix_dict)
|
277
|
+
postfix_dict[f"{data} Accuracy"] = np.round(best_acc, 4)
|
278
|
+
progress.set_postfix(postfix_dict)
|
274
279
|
|
275
280
|
if show_current_activations:
|
276
281
|
print(f", Current Activations={final_activations}", end='')
|
@@ -281,11 +286,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
281
286
|
train_loss = binary_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
|
282
287
|
|
283
288
|
if batch_size == 1:
|
284
|
-
postfix_dict[f"{data} Loss"] = train_loss
|
285
|
-
|
286
|
-
else:
|
287
|
-
postfix_dict[f"{data} Batch Loss"] = train_loss
|
288
|
-
progress.set_postfix(postfix_dict)
|
289
|
+
postfix_dict[f"{data} Loss"] = np.round(train_loss, 4)
|
290
|
+
progress.set_postfix(postfix_dict)
|
289
291
|
best_loss = train_loss
|
290
292
|
|
291
293
|
# Update visualizations during training
|
@@ -358,6 +360,20 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
358
360
|
best_acc_per_gen_list.append(best_acc)
|
359
361
|
loss_list.append(best_loss)
|
360
362
|
|
363
|
+
if batch_size != 1:
|
364
|
+
train_model = evaluate(x_train, y_train, best_weights, final_activations)
|
365
|
+
|
366
|
+
if loss == 'categorical_crossentropy':
|
367
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
368
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
369
|
+
else:
|
370
|
+
train_loss = binary_crossentropy(y_true_batch=y_train,
|
371
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
372
|
+
|
373
|
+
postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
|
374
|
+
postfix_dict[f"{data} Loss"] = np.round(train_loss, 4)
|
375
|
+
progress.set_postfix(postfix_dict)
|
376
|
+
|
361
377
|
weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
|
362
378
|
target_pop = []
|
363
379
|
|
@@ -44,6 +44,7 @@ def fit(
|
|
44
44
|
y_train,
|
45
45
|
activation_potentiation=['linear'],
|
46
46
|
W=None,
|
47
|
+
normalization=False,
|
47
48
|
dtype=cp.float32
|
48
49
|
):
|
49
50
|
"""
|
@@ -57,9 +58,11 @@ def fit(
|
|
57
58
|
|
58
59
|
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
|
59
60
|
|
60
|
-
W (cupy.ndarray): If you want to re-continue or update model
|
61
|
+
W (cupy.ndarray, optional): If you want to re-continue or update model
|
61
62
|
|
62
|
-
|
63
|
+
normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
64
|
+
|
65
|
+
dtype (cupy.dtype, optional): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
63
66
|
|
64
67
|
Returns:
|
65
68
|
cupyarray: (Weight matrix).
|
@@ -68,16 +71,19 @@ def fit(
|
|
68
71
|
|
69
72
|
if len(x_train) != len(y_train): raise ValueError("x_train and y_train must have the same length.")
|
70
73
|
|
71
|
-
|
74
|
+
weight = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
|
72
75
|
|
73
|
-
x_train = apply_activation(x_train, activation_potentiation)
|
74
|
-
|
76
|
+
if normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
|
77
|
+
elif normalization is False: x_train = apply_activation(x_train, activation_potentiation)
|
78
|
+
else: raise ValueError('normalization parameter only be True or False')
|
79
|
+
|
80
|
+
weight += y_train.T @ x_train
|
75
81
|
|
76
|
-
return normalization(
|
82
|
+
return normalization(weight, dtype=dtype)
|
77
83
|
|
78
84
|
|
79
85
|
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
|
80
|
-
neural_web_history=False, show_current_activations=False,
|
86
|
+
neural_web_history=False, show_current_activations=False, normalization=False,
|
81
87
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
82
88
|
interval=33.33, target_acc=None, target_loss=None,
|
83
89
|
start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
|
@@ -130,6 +136,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
130
136
|
|
131
137
|
loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
132
138
|
|
139
|
+
normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
140
|
+
|
133
141
|
interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
134
142
|
|
135
143
|
target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
|
@@ -192,7 +200,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
192
200
|
if batch_size == 1:
|
193
201
|
ncols = 76
|
194
202
|
else:
|
195
|
-
ncols =
|
203
|
+
ncols = 49
|
196
204
|
|
197
205
|
# Initialize variables
|
198
206
|
best_acc = 0
|
@@ -234,7 +242,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
234
242
|
|
235
243
|
if fit_start is True and i == 0 and j < activation_potentiation_len:
|
236
244
|
act_pop[j] = activation_potentiation[j]
|
237
|
-
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[
|
245
|
+
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], normalization=normalization, dtype=dtype)
|
238
246
|
weight_pop[j] = W
|
239
247
|
|
240
248
|
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
|
@@ -277,10 +285,8 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
277
285
|
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
278
286
|
|
279
287
|
if batch_size == 1:
|
280
|
-
postfix_dict[f"{data} Accuracy"] = cp.round(best_acc,
|
281
|
-
|
282
|
-
postfix_dict[f"{data} Batch Accuracy"] = cp.round(best_acc, 3)
|
283
|
-
progress.set_postfix(postfix_dict)
|
288
|
+
postfix_dict[f"{data} Accuracy"] = cp.round(best_acc, 4)
|
289
|
+
progress.set_postfix(postfix_dict)
|
284
290
|
|
285
291
|
if show_current_activations:
|
286
292
|
print(f", Current Activations={final_activations}", end='')
|
@@ -291,12 +297,9 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
291
297
|
train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
292
298
|
|
293
299
|
if batch_size == 1:
|
294
|
-
postfix_dict[f"{data} Loss"] = cp.round(train_loss,
|
300
|
+
postfix_dict[f"{data} Loss"] = cp.round(train_loss, 4)
|
295
301
|
best_loss = train_loss
|
296
|
-
|
297
|
-
postfix_dict[f"{data} Batch Loss"] = cp.round(train_loss, 3)
|
298
|
-
progress.set_postfix(postfix_dict)
|
299
|
-
best_loss = train_loss
|
302
|
+
progress.set_postfix(postfix_dict)
|
300
303
|
|
301
304
|
# Update visualizations during training
|
302
305
|
if show_history:
|
@@ -368,6 +371,18 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
368
371
|
best_acc_per_gen_list.append(best_acc)
|
369
372
|
loss_list.append(best_loss)
|
370
373
|
|
374
|
+
if batch_size != 1:
|
375
|
+
train_model = evaluate(x_train, y_train, best_weights, final_activations)
|
376
|
+
|
377
|
+
if loss == 'categorical_crossentropy':
|
378
|
+
train_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=train_model[get_preds_softmax()])
|
379
|
+
else:
|
380
|
+
train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=train_model[get_preds_softmax()])
|
381
|
+
|
382
|
+
postfix_dict[f"{data} Accuracy"] = cp.round(train_model[get_acc()], 4)
|
383
|
+
postfix_dict[f"{data} Loss"] = cp.round(train_loss, 4)
|
384
|
+
progress.set_postfix(postfix_dict)
|
385
|
+
|
371
386
|
weight_pop, act_pop = optimizer(cp.array(weight_pop, copy=False, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), bar_status=False)
|
372
387
|
target_pop = []
|
373
388
|
|
@@ -19,7 +19,6 @@ import math
|
|
19
19
|
|
20
20
|
|
21
21
|
### LIBRARY IMPORTS ###
|
22
|
-
from .plan_cuda import feed_forward
|
23
22
|
from .data_operations_cuda import normalization
|
24
23
|
from .ui import loading_bars, initialize_loading_bar
|
25
24
|
from .activation_functions_cuda import apply_activation, all_activations
|
@@ -399,7 +398,7 @@ def evolver(weights,
|
|
399
398
|
return weights, activation_potentiations
|
400
399
|
|
401
400
|
|
402
|
-
def evaluate(x_population, weights, activation_potentiations
|
401
|
+
def evaluate(x_population, weights, activation_potentiations):
|
403
402
|
"""
|
404
403
|
Evaluates the performance of a population of genomes, applying different activation functions
|
405
404
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
@@ -414,64 +413,29 @@ def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dty
|
|
414
413
|
activation_potentiations (list or str): A list where each entry represents an activation function
|
415
414
|
or a potentiation strategy applied to each genome. If only one
|
416
415
|
activation function is used, this can be a single string.
|
417
|
-
|
418
|
-
rl_mode (bool, optional): If True, reinforcement learning mode is activated, this accepts x_population is a single genome. (Also weights and activation_potentations a single genomes part.)
|
419
|
-
Default is False.
|
420
|
-
|
421
|
-
|
422
|
-
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
423
|
-
|
424
416
|
Returns:
|
425
417
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
426
418
|
activation function and weights.
|
427
419
|
|
428
|
-
Notes:
|
429
|
-
- If `rl_mode` is True:
|
430
|
-
- Accepts x_population is a single genom
|
431
|
-
- The inputs are flattened, and the activation function is applied across the single genom.
|
432
|
-
|
433
|
-
- If `rl_mode` is False:
|
434
|
-
- Accepts x_population is a list of genomes
|
435
|
-
- Each genome is processed individually, and the results are stored in the `outputs` list.
|
436
|
-
|
437
|
-
- `feed_forward()` function is the core function that processes the input with the given weights and activation function.
|
438
|
-
|
439
420
|
Example:
|
440
421
|
```python
|
441
|
-
outputs = evaluate(x_population, weights, activation_potentiations
|
422
|
+
outputs = evaluate(x_population, weights, activation_potentiations)
|
442
423
|
```
|
443
424
|
|
444
425
|
- The function returns a list of outputs after processing the population, where each element corresponds to
|
445
426
|
the output for each genome in `x_population`.
|
446
427
|
"""
|
447
|
-
|
448
|
-
### IF RL_MODE IS TRUE, A SINGLE GENOME IS ASSUMED AS INPUT, A FEEDFORWARD PREDICTION IS MADE, AND THE OUTPUT(NPARRAY) IS RETURNED:
|
449
|
-
|
450
|
-
### IF RL_MODE IS FALSE, PREDICTIONS ARE MADE FOR ALL GENOMES IN THE GROUP USING THEIR CORRESPONDING INDEXED INPUTS AND DATA.
|
451
428
|
### THE OUTPUTS ARE RETURNED AS A PYTHON LIST, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
|
452
429
|
|
453
|
-
if
|
454
|
-
|
455
|
-
Input = Input.ravel()
|
456
|
-
|
457
|
-
if isinstance(activation_potentiations, str):
|
458
|
-
activation_potentiations = [activation_potentiations]
|
459
|
-
|
460
|
-
outputs = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations, w=weights)
|
461
|
-
|
430
|
+
if isinstance(activation_potentiations, str):
|
431
|
+
activation_potentiations = [activation_potentiations]
|
462
432
|
else:
|
463
|
-
|
464
|
-
for i, genome in enumerate(x_population):
|
433
|
+
activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
|
465
434
|
|
466
|
-
|
467
|
-
|
435
|
+
x_population = apply_activation(x_population, activation_potentiations)
|
436
|
+
result = x_population @ weights.T
|
468
437
|
|
469
|
-
|
470
|
-
activation_potentiations[i] = [activation_potentiations[i]]
|
471
|
-
|
472
|
-
outputs[i] = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations[i], w=weights[i])
|
473
|
-
|
474
|
-
return outputs
|
438
|
+
return result
|
475
439
|
|
476
440
|
|
477
441
|
def cross_over(first_parent_W,
|
@@ -757,7 +721,7 @@ def mutation(weight,
|
|
757
721
|
max_threshold = len(activations)
|
758
722
|
|
759
723
|
new_threshold = threshold
|
760
|
-
|
724
|
+
|
761
725
|
except_this = ['spiral', 'circular']
|
762
726
|
all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
|
763
727
|
|
@@ -323,7 +323,8 @@ def draw_activations(x_train, activation):
|
|
323
323
|
except:
|
324
324
|
print('\rWARNING: error in drawing some activation.', end='')
|
325
325
|
return x_train
|
326
|
-
|
326
|
+
|
327
|
+
|
327
328
|
def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation):
|
328
329
|
|
329
330
|
from .metrics import metrics, confusion_matrix, roc_curve
|
@@ -447,8 +448,7 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
|
|
447
448
|
axs[1,1].set_title('Decision Boundary')
|
448
449
|
|
449
450
|
except Exception as e:
|
450
|
-
|
451
|
-
print(f"Hata oluştu: {e}")
|
451
|
+
print(f"Error: {e}")
|
452
452
|
|
453
453
|
plt.show()
|
454
454
|
|
@@ -614,9 +614,9 @@ def update_neuron_history(LTPW, ax1, row, col, class_count, artist5, fig1, acc=F
|
|
614
614
|
|
615
615
|
fig1.suptitle(suptitle_info, fontsize=16)
|
616
616
|
|
617
|
-
|
617
|
+
""" DISABLED
|
618
618
|
def initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train):
|
619
|
-
|
619
|
+
|
620
620
|
from .data_operations import find_closest_factors
|
621
621
|
visualization_objects = {}
|
622
622
|
|
@@ -649,32 +649,33 @@ def initialize_visualization_for_fit(val, show_training, neurons_history, x_trai
|
|
649
649
|
})
|
650
650
|
|
651
651
|
return visualization_objects
|
652
|
-
|
652
|
+
"""
|
653
653
|
|
654
|
-
|
654
|
+
""" DISABLED
|
655
655
|
def update_neural_web_for_fit(W, ax, G, artist):
|
656
|
-
|
657
|
-
The function `update_neural_web_for_fit` updates a neural web visualization for fitting.
|
658
|
-
"""
|
656
|
+
|
659
657
|
art5_1, art5_2, art5_3 = draw_neural_web(W=W, ax=ax, G=G, return_objs=True)
|
660
658
|
art5_list = [art5_1] + [art5_2] + list(art5_3.values())
|
661
659
|
artist.append(art5_list)
|
662
|
-
|
663
|
-
|
660
|
+
"""
|
661
|
+
|
662
|
+
""" DISABLED
|
664
663
|
def update_weight_visualization_for_fit(ax, LTPW, artist2):
|
665
|
-
|
664
|
+
|
666
665
|
art2 = ax.imshow(LTPW, interpolation='sinc', cmap='viridis')
|
667
666
|
artist2.append([art2])
|
667
|
+
"""
|
668
668
|
|
669
|
-
|
669
|
+
""" DISABLED
|
670
670
|
def update_decision_boundary_for_fit(ax, x_val, y_val, activation_potentiation, LTPW, artist1):
|
671
|
-
|
671
|
+
|
672
672
|
art1_1, art1_2 = plot_decision_boundary(x_val, y_val, activation_potentiation, LTPW, artist=artist1, ax=ax)
|
673
673
|
artist1.append([*art1_1.collections, art1_2])
|
674
|
+
"""
|
674
675
|
|
675
|
-
|
676
|
+
""" DISABLED
|
676
677
|
def update_validation_history_for_fit(ax, val_list, artist3):
|
677
|
-
|
678
|
+
|
678
679
|
period = list(range(1, len(val_list) + 1))
|
679
680
|
art3 = ax.plot(
|
680
681
|
period,
|
@@ -691,19 +692,21 @@ def update_validation_history_for_fit(ax, val_list, artist3):
|
|
691
692
|
ax.set_ylabel('Validation Accuracy')
|
692
693
|
ax.set_ylim([0, 1])
|
693
694
|
artist3.append(art3)
|
694
|
-
|
695
|
-
|
695
|
+
"""
|
696
|
+
|
697
|
+
""" DISABLED
|
696
698
|
def display_visualization_for_fit(fig, artist_list, interval):
|
697
|
-
|
699
|
+
|
698
700
|
ani = ArtistAnimation(fig, artist_list, interval=interval, blit=True)
|
699
701
|
return ani
|
700
|
-
|
702
|
+
"""
|
703
|
+
|
701
704
|
def show():
|
702
705
|
plt.tight_layout()
|
703
706
|
plt.show()
|
704
707
|
|
705
708
|
def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
|
706
|
-
|
709
|
+
|
707
710
|
from .data_operations import find_closest_factors
|
708
711
|
viz_objects = {}
|
709
712
|
|
@@ -745,7 +748,7 @@ def initialize_visualization_for_learner(show_history, neurons_history, neural_w
|
|
745
748
|
return viz_objects
|
746
749
|
|
747
750
|
def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_acc_per_depth_list, x_train, final_activations):
|
748
|
-
|
751
|
+
|
749
752
|
if 'history' not in viz_objects:
|
750
753
|
return
|
751
754
|
|
@@ -772,7 +775,7 @@ def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_ac
|
|
772
775
|
hist['artist3'].append(art3)
|
773
776
|
|
774
777
|
def display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval):
|
775
|
-
|
778
|
+
|
776
779
|
if 'history' in viz_objects:
|
777
780
|
hist = viz_objects['history']
|
778
781
|
for _ in range(30):
|