pyerualjetwork 5b0__py3-none-any.whl → 5b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/neu.py +1 -3
- pyerualjetwork/neu_cuda.py +32 -32
- {pyerualjetwork-5b0.dist-info → pyerualjetwork-5b2.dist-info}/METADATA +1 -1
- {pyerualjetwork-5b0.dist-info → pyerualjetwork-5b2.dist-info}/RECORD +7 -7
- {pyerualjetwork-5b0.dist-info → pyerualjetwork-5b2.dist-info}/WHEEL +0 -0
- {pyerualjetwork-5b0.dist-info → pyerualjetwork-5b2.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -38,7 +38,7 @@ PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_A
|
|
38
38
|
- Contact: tchasancan@gmail.com
|
39
39
|
"""
|
40
40
|
|
41
|
-
__version__ = "
|
41
|
+
__version__ = "5b2"
|
42
42
|
__update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
|
43
43
|
* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
|
44
44
|
* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
pyerualjetwork/neu.py
CHANGED
@@ -33,8 +33,6 @@ PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_A
|
|
33
33
|
import numpy as np
|
34
34
|
import copy
|
35
35
|
|
36
|
-
import neu
|
37
|
-
|
38
36
|
### LIBRARY IMPORTS ###
|
39
37
|
from .ui import loading_bars, initialize_loading_bar
|
40
38
|
from .data_operations import normalization, batcher
|
@@ -180,7 +178,7 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
180
178
|
if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
|
181
179
|
if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
|
182
180
|
|
183
|
-
if neurons
|
181
|
+
if neurons != []:
|
184
182
|
fit_start = False
|
185
183
|
weight_evolve = True
|
186
184
|
is_mlp = True
|
pyerualjetwork/neu_cuda.py
CHANGED
@@ -59,7 +59,7 @@ bar_format_learner = loading_bars()[1]
|
|
59
59
|
def plan_fit(
|
60
60
|
x_train,
|
61
61
|
y_train,
|
62
|
-
|
62
|
+
activations=['linear'],
|
63
63
|
W=None,
|
64
64
|
auto_normalization=False,
|
65
65
|
dtype=cp.float32
|
@@ -70,7 +70,7 @@ def plan_fit(
|
|
70
70
|
plan_fit Args:
|
71
71
|
:param (aray-like[cupy]) x_train: (aray-like[cupy]): List or cupy array of input data.
|
72
72
|
:param (aray-like[cupy]) y_train: List or cupy array of target labels. (one hot encoded)
|
73
|
-
:param (list)
|
73
|
+
:param (list) activations: For deeper PLAN networks, activation function parameters. For more information please run this code: nn.activations_list() default: [None] (optional)
|
74
74
|
W (cupy.ndarray, optional): If you want to re-continue or update model
|
75
75
|
auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
76
76
|
dtype (cupy.dtype, optional): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
|
@@ -84,8 +84,8 @@ def plan_fit(
|
|
84
84
|
|
85
85
|
weight = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
|
86
86
|
|
87
|
-
if auto_normalization is True: x_train = normalization(apply_activation(x_train,
|
88
|
-
elif auto_normalization is False: x_train = apply_activation(x_train,
|
87
|
+
if auto_normalization is True: x_train = normalization(apply_activation(x_train, activations))
|
88
|
+
elif auto_normalization is False: x_train = apply_activation(x_train, activations)
|
89
89
|
else: raise ValueError('normalization parameter only be True or False')
|
90
90
|
|
91
91
|
weight += y_train.T @ x_train
|
@@ -96,7 +96,7 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
96
96
|
weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False, target_acc=None,
|
97
97
|
neurons_history=False, early_stop=False, show_history=False, loss='categorical_crossentropy',
|
98
98
|
interval=33.33, target_loss=None, loss_impact=0.1, acc_impact=0.9,
|
99
|
-
start_this_act=None, start_this_W=None, neurons=[],
|
99
|
+
start_this_act=None, start_this_W=None, neurons=[], activations=[], dtype=cp.float32, memory='gpu'):
|
100
100
|
"""
|
101
101
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
102
102
|
the most accurate combination of activation potentiation(or activation function) & weight values for the given dataset.
|
@@ -146,7 +146,7 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
146
146
|
:param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
147
147
|
:param start_this_W: (cupy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
148
148
|
:param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Number of elements --> Layer count. Default: [] (No hidden layer) --> architecture setted to PLAN, if not --> architecture setted to MLP.
|
149
|
-
:param
|
149
|
+
:param activations: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. if neurons is not [] --> uses default: ['linear'] * len(neurons). if neurons is [] --> uses [].
|
150
150
|
:param dtype: (cupy.dtype): Data type for the Weight matrices. np.float32 by default. Example: cp.float64 or cp.float16.
|
151
151
|
:param memory: (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
|
152
152
|
|
@@ -159,15 +159,15 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
159
159
|
data = 'Train'
|
160
160
|
|
161
161
|
except_this = ['spiral', 'circular']
|
162
|
-
|
163
|
-
|
162
|
+
activations = [item for item in all_activations() if item not in except_this]
|
163
|
+
activations_len = len(activations)
|
164
164
|
|
165
|
-
if pop_size is None: pop_size =
|
165
|
+
if pop_size is None: pop_size = activations_len
|
166
166
|
y_train = optimize_labels(y_train, cuda=True)
|
167
167
|
|
168
|
-
if pop_size <
|
168
|
+
if pop_size < activations_len: raise ValueError(f"pop_size must be higher or equal to {activations_len}")
|
169
169
|
|
170
|
-
if gen is None: gen =
|
170
|
+
if gen is None: gen = activations_len
|
171
171
|
|
172
172
|
if memory == 'gpu':
|
173
173
|
x_train = transfer_to_gpu(x_train, dtype=x_train.dtype)
|
@@ -187,7 +187,7 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
187
187
|
if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
|
188
188
|
if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
|
189
189
|
|
190
|
-
if neurons
|
190
|
+
if neurons != []:
|
191
191
|
fit_start = False
|
192
192
|
weight_evolve = True
|
193
193
|
is_mlp = True
|
@@ -208,10 +208,10 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
208
208
|
loss_list = []
|
209
209
|
target_pop = []
|
210
210
|
|
211
|
-
progress = initialize_loading_bar(total=
|
211
|
+
progress = initialize_loading_bar(total=activations_len, desc="", ncols=79, bar_format=bar_format_learner)
|
212
212
|
|
213
|
-
if fit_start is False or pop_size >
|
214
|
-
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons,
|
213
|
+
if fit_start is False or pop_size > activations_len:
|
214
|
+
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, activations=activations, population_size=pop_size, dtype=dtype)
|
215
215
|
|
216
216
|
else:
|
217
217
|
weight_pop = [0] * pop_size
|
@@ -237,18 +237,18 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
237
237
|
x_train_batch = cp.array(x_train_batch, dtype=x_train_batch.dtype, copy=False)
|
238
238
|
y_train_batch = cp.array(y_train_batch, dtype=y_train.dtype)
|
239
239
|
|
240
|
-
if fit_start is True and i == 0 and j <
|
240
|
+
if fit_start is True and i == 0 and j < activations_len:
|
241
241
|
if start_this_act is not None and j == 0:
|
242
242
|
pass
|
243
243
|
else:
|
244
|
-
act_pop[j] =
|
245
|
-
W = plan_fit(x_train_batch, y_train_batch,
|
244
|
+
act_pop[j] = activations[j]
|
245
|
+
W = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
|
246
246
|
weight_pop[j] = W
|
247
247
|
|
248
248
|
if weight_evolve is False:
|
249
|
-
weight_pop[j] = plan_fit(x_train_batch, y_train_batch,
|
249
|
+
weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
|
250
250
|
|
251
|
-
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j],
|
251
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], auto_normalization=auto_normalization, is_mlp=is_mlp)
|
252
252
|
acc = model[get_acc()]
|
253
253
|
|
254
254
|
if loss == 'categorical_crossentropy':
|
@@ -306,7 +306,7 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
306
306
|
if target_acc is not None and best_acc >= target_acc:
|
307
307
|
progress.close()
|
308
308
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
309
|
-
|
309
|
+
activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
|
310
310
|
if loss == 'categorical_crossentropy':
|
311
311
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
312
312
|
y_pred_batch=train_model[get_preds_softmax()])
|
@@ -326,7 +326,7 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
326
326
|
if target_loss is not None and best_loss <= target_loss:
|
327
327
|
progress.close()
|
328
328
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
329
|
-
|
329
|
+
activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
|
330
330
|
|
331
331
|
if loss == 'categorical_crossentropy':
|
332
332
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -378,7 +378,7 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
378
378
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
379
379
|
progress.close()
|
380
380
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
381
|
-
|
381
|
+
activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
|
382
382
|
|
383
383
|
if loss == 'categorical_crossentropy':
|
384
384
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -399,7 +399,7 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
399
399
|
# Final evaluation
|
400
400
|
progress.close()
|
401
401
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
402
|
-
|
402
|
+
activations=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
|
403
403
|
|
404
404
|
if loss == 'categorical_crossentropy':
|
405
405
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -420,7 +420,7 @@ def evaluate(
|
|
420
420
|
x_test,
|
421
421
|
y_test,
|
422
422
|
W,
|
423
|
-
|
423
|
+
activations=['linear'],
|
424
424
|
auto_normalization=False,
|
425
425
|
is_mlp=False
|
426
426
|
) -> tuple:
|
@@ -434,7 +434,7 @@ def evaluate(
|
|
434
434
|
|
435
435
|
W (cp.ndarray): Neural net weight matrix.
|
436
436
|
|
437
|
-
|
437
|
+
activations (list): Activation list. Default = ['linear'].
|
438
438
|
|
439
439
|
auto_normalization (bool, optional): Normalization for x_test ? Default = False.
|
440
440
|
|
@@ -446,15 +446,15 @@ def evaluate(
|
|
446
446
|
|
447
447
|
if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
|
448
448
|
|
449
|
-
if isinstance(
|
450
|
-
|
451
|
-
elif isinstance(
|
452
|
-
|
449
|
+
if isinstance(activations, str):
|
450
|
+
activations = [activations]
|
451
|
+
elif isinstance(activations, list):
|
452
|
+
activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
|
453
453
|
|
454
454
|
if is_mlp:
|
455
455
|
layer = x_test
|
456
456
|
for i in range(len(W)):
|
457
|
-
if i != len(W) - 1 and i != 0: layer = apply_activation(layer,
|
457
|
+
if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
|
458
458
|
|
459
459
|
layer = layer @ W[i].T
|
460
460
|
|
@@ -462,7 +462,7 @@ def evaluate(
|
|
462
462
|
|
463
463
|
else:
|
464
464
|
|
465
|
-
x_test = apply_activation(x_test,
|
465
|
+
x_test = apply_activation(x_test, activations)
|
466
466
|
result = x_test @ W.T
|
467
467
|
|
468
468
|
max_vals = cp.max(result, axis=1, keepdims=True)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version:
|
3
|
+
Version: 5b2
|
4
4
|
Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,4 +1,4 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=hfJ9eS6MWjT5xcxtk3mDcJhmsG-XwOa5e8rta5QqbIQ,2631
|
2
2
|
pyerualjetwork/activation_functions.py,sha256=X7Kv8qv8oZq8hvTdUiV-GkFjKHRlKIQypRPXh6gdkm4,7614
|
3
3
|
pyerualjetwork/activation_functions_cuda.py,sha256=pefklsl9QuSVbKwiUUHeF_ExN0bICH7QIF1MfoMU40Q,7665
|
4
4
|
pyerualjetwork/data_operations.py,sha256=LKmLfl43zSCCuP2cWkBM-D6GtlhxXQggsNvZNUwHDe4,16347
|
@@ -14,12 +14,12 @@ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,607
|
|
14
14
|
pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
|
15
15
|
pyerualjetwork/model_operations.py,sha256=F5qYAU578yrS1cUCwsaPJnqgfEAEUD_50vspakypTGY,15971
|
16
16
|
pyerualjetwork/model_operations_cuda.py,sha256=lgYAEGUERTMU7TEWRVKaa3yk_IOt4Jo9RJP9lDsNREU,17206
|
17
|
-
pyerualjetwork/neu.py,sha256=
|
18
|
-
pyerualjetwork/neu_cuda.py,sha256=
|
17
|
+
pyerualjetwork/neu.py,sha256=bNl8nnL5R3WS8vNWxFbVzeX9eeGgownHdebbsKKNfvU,24948
|
18
|
+
pyerualjetwork/neu_cuda.py,sha256=0hUCQ02tLFg1cGuqOxT3jfBx3W28hIL6nhEu4m2LeY8,25999
|
19
19
|
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
20
20
|
pyerualjetwork/visualizations.py,sha256=utnX9zQhzmtvBJLOLNGm2jecVVk4zHXABQdjb0XzJac,28352
|
21
21
|
pyerualjetwork/visualizations_cuda.py,sha256=gnoaaazZ-nc9E1ImqXrZBRgQ4Rnpi2qh2yGJ2eLKMlE,28807
|
22
|
-
pyerualjetwork-
|
23
|
-
pyerualjetwork-
|
24
|
-
pyerualjetwork-
|
25
|
-
pyerualjetwork-
|
22
|
+
pyerualjetwork-5b2.dist-info/METADATA,sha256=eS1VixnfYt_TbVFoqu6HJVf0wHrGyc2Ups5Cr-D8Q_4,7503
|
23
|
+
pyerualjetwork-5b2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
24
|
+
pyerualjetwork-5b2.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
25
|
+
pyerualjetwork-5b2.dist-info/RECORD,,
|
File without changes
|
File without changes
|