pyerualjetwork 5.31a1__py3-none-any.whl → 5.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/cpu/activation_functions.py +1 -0
- pyerualjetwork/cpu/data_ops.py +2 -2
- pyerualjetwork/cpu/ene.py +22 -22
- pyerualjetwork/cpu/model_ops.py +4 -4
- pyerualjetwork/cpu/nn.py +22 -12
- pyerualjetwork/cuda/activation_functions.py +1 -0
- pyerualjetwork/cuda/ene.py +22 -22
- pyerualjetwork/cuda/model_ops.py +4 -4
- pyerualjetwork/cuda/nn.py +14 -3
- {pyerualjetwork-5.31a1.dist-info → pyerualjetwork-5.33.dist-info}/METADATA +11 -14
- {pyerualjetwork-5.31a1.dist-info → pyerualjetwork-5.33.dist-info}/RECORD +14 -14
- {pyerualjetwork-5.31a1.dist-info → pyerualjetwork-5.33.dist-info}/WHEEL +0 -0
- {pyerualjetwork-5.31a1.dist-info → pyerualjetwork-5.33.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -42,7 +42,7 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
42
42
|
- Contact: tchasancan@gmail.com
|
43
43
|
"""
|
44
44
|
|
45
|
-
__version__ = "5.
|
45
|
+
__version__ = "5.33"
|
46
46
|
__update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
|
47
47
|
* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
|
48
48
|
* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
pyerualjetwork/cpu/data_ops.py
CHANGED
@@ -284,9 +284,9 @@ def synthetic_augmentation(x, y, dtype=np.float32):
|
|
284
284
|
"""
|
285
285
|
Generates synthetic examples to balance classes with fewer examples using numpy.
|
286
286
|
Args:
|
287
|
-
|
287
|
+
x: numpy array format
|
288
288
|
|
289
|
-
|
289
|
+
y: numpy array format (one-hot encoded)
|
290
290
|
|
291
291
|
dtype (numpy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
|
292
292
|
|
pyerualjetwork/cpu/ene.py
CHANGED
@@ -138,7 +138,7 @@ def evolver(weights,
|
|
138
138
|
weight_mutate_threshold=16,
|
139
139
|
weight_mutate_prob=1,
|
140
140
|
is_mlp=False,
|
141
|
-
save_best_genome=
|
141
|
+
save_best_genome=True,
|
142
142
|
dtype=np.float32):
|
143
143
|
"""
|
144
144
|
Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
|
@@ -157,7 +157,7 @@ def evolver(weights,
|
|
157
157
|
what_gen (int): The current generation number, used for informational purposes or logging.
|
158
158
|
|
159
159
|
fitness (numpy.ndarray): A 1D array containing the fitness values of each genome.
|
160
|
-
The array is used to rank the genomes based on their performance.
|
160
|
+
The array is used to rank the genomes based on their performance. ENE maximizes or minimizes this fitness based on the `target_fitness` parameter.
|
161
161
|
|
162
162
|
weight_evolve (bool, optional): Are weights to be evolves or just activation combinations Default: True. Note: Regardless of whether this parameter is True or False, you must give the evolver function a list of weights equal to the number of activation potentiations. You can create completely random weights if you want. If this parameter is False, the weights entering the evolver function and the resulting weights will be exactly the same.
|
163
163
|
|
@@ -227,7 +227,7 @@ def evolver(weights,
|
|
227
227
|
|
228
228
|
is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
|
229
229
|
|
230
|
-
save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. Default:
|
230
|
+
save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. (index of best individual: 0) Default: True
|
231
231
|
|
232
232
|
dtype (numpy.dtype, optional): Data type for the arrays. Default: np.float32.
|
233
233
|
Example: np.float64 or np.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
|
@@ -314,7 +314,7 @@ def evolver(weights,
|
|
314
314
|
else:
|
315
315
|
raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
|
316
316
|
|
317
|
-
if weight_evolve is False: origin_weights =
|
317
|
+
if weight_evolve is False: origin_weights = copy.deepcopy(weights)
|
318
318
|
|
319
319
|
if is_mlp:
|
320
320
|
activation_mutate_add_prob = 0
|
@@ -336,11 +336,11 @@ def evolver(weights,
|
|
336
336
|
|
337
337
|
good_weights = weights[slice_center:]
|
338
338
|
bad_weights = weights[:slice_center]
|
339
|
-
best_weight =
|
339
|
+
best_weight = copy.deepcopy(good_weights[-1])
|
340
340
|
|
341
341
|
good_activations = list(activations[slice_center:])
|
342
342
|
bad_activations = list(activations[:slice_center])
|
343
|
-
best_activations = good_activations[-1]
|
343
|
+
best_activations = copy.deepcopy(good_activations[-1]) if isinstance(good_activations[-1], list) else good_activations[-1]
|
344
344
|
|
345
345
|
|
346
346
|
### ENE IS APPLIED ACCORDING TO THE SPECIFIED POLICY, STRATEGY, AND PROBABILITY CONFIGURATION:
|
@@ -353,23 +353,23 @@ def evolver(weights,
|
|
353
353
|
best_fitness = normalized_fitness[-1]
|
354
354
|
epsilon = np.finfo(float).eps
|
355
355
|
|
356
|
-
child_W =
|
357
|
-
child_act =
|
356
|
+
child_W = copy.deepcopy(bad_weights)
|
357
|
+
child_act = copy.deepcopy(bad_activations)
|
358
358
|
|
359
|
-
mutated_W =
|
360
|
-
mutated_act =
|
359
|
+
mutated_W = copy.deepcopy(bad_weights)
|
360
|
+
mutated_act = copy.deepcopy(bad_activations)
|
361
361
|
|
362
362
|
|
363
363
|
for i in range(len(bad_weights)):
|
364
364
|
|
365
365
|
if policy == 'aggressive':
|
366
|
-
first_parent_W =
|
367
|
-
first_parent_act = best_activations
|
366
|
+
first_parent_W = copy.deepcopy(best_weight)
|
367
|
+
first_parent_act = copy.deepcopy(best_activations)
|
368
368
|
first_parent_fitness = best_fitness
|
369
369
|
|
370
370
|
elif policy == 'explorer':
|
371
|
-
first_parent_W = good_weights[i]
|
372
|
-
first_parent_act = good_activations[i]
|
371
|
+
first_parent_W = copy.deepcopy(good_weights[i])
|
372
|
+
first_parent_act = copy.deepcopy(good_activations[i])
|
373
373
|
first_parent_fitness = normalized_fitness[len(good_weights) + i]
|
374
374
|
|
375
375
|
else: raise ValueError("policy parameter must be: 'aggressive' or 'explorer'")
|
@@ -475,8 +475,8 @@ def evolver(weights,
|
|
475
475
|
activations = child_act + mutated_act
|
476
476
|
|
477
477
|
if save_best_genome:
|
478
|
-
weights[0] = best_weight
|
479
|
-
activations[0] = best_activations
|
478
|
+
weights[0] = copy.deepcopy(best_weight)
|
479
|
+
activations[0] = copy.deepcopy(best_activations)
|
480
480
|
|
481
481
|
### INFO PRINTING CONSOLE
|
482
482
|
|
@@ -501,7 +501,7 @@ def evolver(weights,
|
|
501
501
|
print(" ACTIVATION SELECTION ADD PROB: ", str(activation_selection_add_prob))
|
502
502
|
print(" ACTIVATION SELECTION CHANGE PROB: ", str(activation_selection_change_prob))
|
503
503
|
print(" FITNESS BIAS: ", str(fitness_bias))
|
504
|
-
print(" SAVE BEST GENOME: ", str(save_best_genome) + '\n')
|
504
|
+
print(" SAVE BEST GENOME: ", str(save_best_genome) + " (index of best individual: 0)" + '\n')
|
505
505
|
|
506
506
|
|
507
507
|
print("*** Performance ***")
|
@@ -664,18 +664,18 @@ def cross_over(first_parent_W,
|
|
664
664
|
decision = dominant_parent_selection(bad_genomes_selection_prob)
|
665
665
|
|
666
666
|
if decision == 'first_parent':
|
667
|
-
dominant_parent_W =
|
667
|
+
dominant_parent_W = copy.deepcopy(first_parent_W)
|
668
668
|
dominant_parent_act = first_parent_act
|
669
669
|
|
670
|
-
undominant_parent_W =
|
670
|
+
undominant_parent_W = copy.deepcopy(second_parent_W)
|
671
671
|
undominant_parent_act = second_parent_act
|
672
672
|
succes = second_parent_fitness + epsilon
|
673
673
|
|
674
674
|
elif decision == 'second_parent':
|
675
|
-
dominant_parent_W =
|
675
|
+
dominant_parent_W = copy.deepcopy(second_parent_W)
|
676
676
|
dominant_parent_act = second_parent_act
|
677
677
|
|
678
|
-
undominant_parent_W =
|
678
|
+
undominant_parent_W = copy.deepcopy(first_parent_W)
|
679
679
|
undominant_parent_act = first_parent_act
|
680
680
|
succes = first_parent_fitness + epsilon
|
681
681
|
|
@@ -713,7 +713,7 @@ def cross_over(first_parent_W,
|
|
713
713
|
if isinstance(dominant_parent_act, str): dominant_parent_act = [dominant_parent_act]
|
714
714
|
if isinstance(undominant_parent_act, str): undominant_parent_act = [undominant_parent_act]
|
715
715
|
|
716
|
-
child_act = list(
|
716
|
+
child_act = list(copy.deepcopy(dominant_parent_act))
|
717
717
|
|
718
718
|
activation_selection_add_prob = 1 - activation_selection_add_prob # if prob 0.8 (%80) then 1 - 0.8. Because 0-1 random number probably greater than 0.2
|
719
719
|
potential_activation_selection_add = random.uniform(0, 1)
|
pyerualjetwork/cpu/model_ops.py
CHANGED
@@ -385,7 +385,7 @@ def predict_from_storage(Input, model_name, model_path=''):
|
|
385
385
|
from storage
|
386
386
|
|
387
387
|
Args:
|
388
|
-
Input (list or ndarray): Input data for the model
|
388
|
+
Input (list or ndarray): Input data for the model.
|
389
389
|
|
390
390
|
model_name (str): Name of the model.
|
391
391
|
|
@@ -459,7 +459,7 @@ def reverse_predict_from_storage(output, model_name, model_path=''):
|
|
459
459
|
reverse prediction function from storage
|
460
460
|
Args:
|
461
461
|
|
462
|
-
output (list or ndarray): output layer for the model
|
462
|
+
output (list or ndarray): output layer for the model .
|
463
463
|
|
464
464
|
model_name (str): Name of the model.
|
465
465
|
|
@@ -489,7 +489,7 @@ def predict_from_memory(Input, W, scaler_params=None, activations=['linear'], ac
|
|
489
489
|
from memory.
|
490
490
|
|
491
491
|
Args:
|
492
|
-
Input (list or ndarray): Input data for the model
|
492
|
+
Input (list or ndarray): Input data for the model.
|
493
493
|
|
494
494
|
W (list of ndarrays): Weights of the model.
|
495
495
|
|
@@ -563,7 +563,7 @@ def reverse_predict_from_memory(output, W):
|
|
563
563
|
|
564
564
|
Args:
|
565
565
|
|
566
|
-
output (list or ndarray): output layer for the model
|
566
|
+
output (list or ndarray): output layer for the model.
|
567
567
|
|
568
568
|
W (list of ndarrays): Weights of the model.
|
569
569
|
|
pyerualjetwork/cpu/nn.py
CHANGED
@@ -4,7 +4,7 @@
|
|
4
4
|
|
5
5
|
NN (Neural Networks) on CPU
|
6
6
|
============================
|
7
|
-
This module hosts functions for training and evaluating artificial neural networks on CPU for labeled classification tasks
|
7
|
+
This module hosts functions for training and evaluating artificial neural networks on CPU for labeled classification tasks.
|
8
8
|
|
9
9
|
Currently, 3 types of models can be trained:
|
10
10
|
|
@@ -132,18 +132,29 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
132
132
|
* This function also able to train classic MLP model architectures.
|
133
133
|
* And my newest innovative architecture: PTNN (Potentiation Transfer Neural Network).
|
134
134
|
|
135
|
+
Examples:
|
136
|
+
|
137
|
+
This creates a PLAN model:
|
138
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=100, fit_start=True) ```
|
139
|
+
|
140
|
+
This creates a MLP model(with 2 hidden layer):
|
141
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=100, fit_start=False, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
|
142
|
+
|
143
|
+
This creates a PTNN model(with 2 hidden layer & 1 aggregation layer(comes with PLAN)):
|
144
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=[10, 100], fit_start=True, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
|
145
|
+
|
135
146
|
:Args:
|
136
147
|
:param x_train: (array-like): Training input data.
|
137
148
|
:param y_train: (array-like): Labels for training data. one-hot encoded.
|
138
|
-
:param optimizer: (function): Optimization technique with hyperparameters. (PLAN, MLP & PTNN (all) using ENE for optimization. Gradient based technique's will added in the future.) Please use this: from pyerualjetwork.
|
149
|
+
:param optimizer: (function): Optimization technique with hyperparameters. (PLAN, MLP & PTNN (all) using ENE for optimization. Gradient based technique's will added in the future.) Please use this: from pyerualjetwork.cpu.ene import evolver (and) optimizer = lambda *args, **kwargs: evolver(*args, 'here give your hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
139
150
|
```python
|
140
|
-
optimizer = lambda *args, **kwargs:
|
151
|
+
optimizer = lambda *args, **kwargs: ene.evolver(*args,
|
141
152
|
activation_add_prob=0.05,
|
142
153
|
strategy='aggressive',
|
143
154
|
policy='more_selective',
|
144
155
|
**kwargs)
|
145
156
|
|
146
|
-
model =
|
157
|
+
model = nn.learn(x_train,
|
147
158
|
y_train,
|
148
159
|
optimizer,
|
149
160
|
fit_start=True,
|
@@ -284,15 +295,14 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
284
295
|
|
285
296
|
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons_copy, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
|
286
297
|
|
287
|
-
# 0 indexed individual will keep PLAN's learned informations and in later generations it will share other individuals.
|
288
|
-
for
|
289
|
-
|
298
|
+
# 0 indexed individual will keep PLAN's learned informations and in later generations it will share with other individuals.
|
299
|
+
for layer in range(1, len(mlp_W)):
|
300
|
+
row_shape, col_shape = mlp_W[layer].shape
|
290
301
|
|
291
|
-
identity_matrix = np.eye(
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
weight_pop[l][0] = np.copy(best_weight)
|
302
|
+
identity_matrix = np.eye(row_shape, col_shape)
|
303
|
+
mlp_W[layer] = identity_matrix
|
304
|
+
|
305
|
+
mlp_W[0] = plan_W
|
296
306
|
|
297
307
|
best_weight = np.array(weight_pop[0], dtype=object)
|
298
308
|
final_activations = act_pop[0]
|
pyerualjetwork/cuda/ene.py
CHANGED
@@ -138,7 +138,7 @@ def evolver(weights,
|
|
138
138
|
weight_mutate_threshold=16,
|
139
139
|
weight_mutate_prob=1,
|
140
140
|
is_mlp=False,
|
141
|
-
save_best_genome=
|
141
|
+
save_best_genome=True,
|
142
142
|
dtype=cp.float32):
|
143
143
|
"""
|
144
144
|
Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
|
@@ -157,7 +157,7 @@ def evolver(weights,
|
|
157
157
|
what_gen (int): The current generation number, used for informational purposes or logging.
|
158
158
|
|
159
159
|
fitness (cupy.ndarray): A 1D array containing the fitness values of each genome.
|
160
|
-
The array is used to rank the genomes based on their performance.
|
160
|
+
The array is used to rank the genomes based on their performance. ENE maximizes or minimizes this fitness based on the `target_fitness` parameter.
|
161
161
|
|
162
162
|
weight_evolve (bool, optional): Are weights to be evolves or just activation combinations Default: True. Note: Regardless of whether this parameter is True or False, you must give the evolver function a list of weights equal to the number of activation potentiations. You can create completely random weights if you want. If this parameter is False, the weights entering the evolver function and the resulting weights will be exactly the same.
|
163
163
|
|
@@ -227,7 +227,7 @@ def evolver(weights,
|
|
227
227
|
|
228
228
|
is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
|
229
229
|
|
230
|
-
save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. Default:
|
230
|
+
save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. (index of best individual: 0) Default: True
|
231
231
|
|
232
232
|
dtype (cupy.dtype): Data type for the arrays. Default: cp.float32.
|
233
233
|
Example: cp.float64 or cp.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
|
@@ -313,7 +313,7 @@ def evolver(weights,
|
|
313
313
|
else:
|
314
314
|
raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
|
315
315
|
|
316
|
-
if weight_evolve is False: origin_weights =
|
316
|
+
if weight_evolve is False: origin_weights = copy.deepcopy(weights)
|
317
317
|
|
318
318
|
if is_mlp:
|
319
319
|
activation_mutate_add_prob = 0
|
@@ -345,14 +345,14 @@ def evolver(weights,
|
|
345
345
|
|
346
346
|
good_weights = weights[slice_center:]
|
347
347
|
bad_weights = weights[:slice_center]
|
348
|
-
best_weight =
|
348
|
+
best_weight = copy.deepcopy(good_weights[-1])
|
349
349
|
|
350
350
|
good_activations = list(activations[slice_center:])
|
351
351
|
bad_activations = list(activations[:slice_center])
|
352
|
-
best_activations = good_activations[-1]
|
352
|
+
best_activations = copy.deepcopy(good_activations[-1]) if isinstance(good_activations[-1], list) else good_activations[-1]
|
353
353
|
|
354
354
|
|
355
|
-
###
|
355
|
+
### ENE IS APPLIED ACCORDING TO THE SPECIFIED POLICY, STRATEGY, AND PROBABILITY CONFIGURATION:
|
356
356
|
|
357
357
|
bar_format = loading_bars()[0]
|
358
358
|
|
@@ -362,23 +362,23 @@ def evolver(weights,
|
|
362
362
|
best_fitness = normalized_fitness[-1]
|
363
363
|
epsilon = cp.finfo(float).eps
|
364
364
|
|
365
|
-
child_W =
|
366
|
-
child_act =
|
365
|
+
child_W = copy.deepcopy(bad_weights)
|
366
|
+
child_act = copy.deepcopy(bad_activations)
|
367
367
|
|
368
|
-
mutated_W =
|
369
|
-
mutated_act =
|
368
|
+
mutated_W = copy.deepcopy(bad_weights)
|
369
|
+
mutated_act = copy.deepcopy(bad_activations)
|
370
370
|
|
371
371
|
|
372
372
|
for i in range(len(bad_weights)):
|
373
373
|
|
374
374
|
if policy == 'aggressive':
|
375
|
-
first_parent_W = best_weight
|
376
|
-
first_parent_act = best_activations
|
375
|
+
first_parent_W = copy.deepcopy(best_weight)
|
376
|
+
first_parent_act = copy.deepcopy(best_activations)
|
377
377
|
first_parent_fitness = best_fitness
|
378
378
|
|
379
379
|
elif policy == 'explorer':
|
380
|
-
first_parent_W = good_weights[i]
|
381
|
-
first_parent_act = good_activations[i]
|
380
|
+
first_parent_W = copy.deepcopy(good_weights[i])
|
381
|
+
first_parent_act = copy.deepcopy(good_activations[i])
|
382
382
|
first_parent_fitness = normalized_fitness[len(good_weights) + i]
|
383
383
|
|
384
384
|
else: raise ValueError("policy parameter must be: 'aggressive' or 'explorer'")
|
@@ -493,8 +493,8 @@ def evolver(weights,
|
|
493
493
|
activations = child_act + mutated_act
|
494
494
|
|
495
495
|
if save_best_genome:
|
496
|
-
weights[0] = best_weight
|
497
|
-
activations[0] = best_activations
|
496
|
+
weights[0] = copy.deepcopy(best_weight)
|
497
|
+
activations[0] = copy.deepcopy(best_activations)
|
498
498
|
|
499
499
|
### INFO PRINTING CONSOLE
|
500
500
|
|
@@ -519,7 +519,7 @@ def evolver(weights,
|
|
519
519
|
print(" ACTIVATION SELECTION ADD PROB: ", str(activation_selection_add_prob))
|
520
520
|
print(" ACTIVATION SELECTION CHANGE PROB: ", str(activation_selection_change_prob))
|
521
521
|
print(" FITNESS BIAS: ", str(fitness_bias))
|
522
|
-
print(" SAVE BEST GENOME: ", str(save_best_genome) + '\n')
|
522
|
+
print(" SAVE BEST GENOME: ", str(save_best_genome) + " (index of best individual: 0)" + '\n')
|
523
523
|
|
524
524
|
print("*** Performance ***")
|
525
525
|
print(" MAX FITNESS: ", str(cp.round(max(fitness), 2)))
|
@@ -682,18 +682,18 @@ def cross_over(first_parent_W,
|
|
682
682
|
decision = dominant_parent_selection(bad_genomes_selection_prob)
|
683
683
|
|
684
684
|
if decision == 'first_parent':
|
685
|
-
dominant_parent_W =
|
685
|
+
dominant_parent_W = copy.deepcopy(first_parent_W)
|
686
686
|
dominant_parent_act = first_parent_act
|
687
687
|
|
688
|
-
undominant_parent_W =
|
688
|
+
undominant_parent_W = copy.deepcopy(second_parent_W)
|
689
689
|
undominant_parent_act = second_parent_act
|
690
690
|
succes = second_parent_fitness + epsilon
|
691
691
|
|
692
692
|
elif decision == 'second_parent':
|
693
|
-
dominant_parent_W =
|
693
|
+
dominant_parent_W = copy.deepcopy(second_parent_W)
|
694
694
|
dominant_parent_act = second_parent_act
|
695
695
|
|
696
|
-
undominant_parent_W =
|
696
|
+
undominant_parent_W = copy.deepcopy(first_parent_W)
|
697
697
|
undominant_parent_act = first_parent_act
|
698
698
|
succes = first_parent_fitness + epsilon
|
699
699
|
|
pyerualjetwork/cuda/model_ops.py
CHANGED
@@ -406,7 +406,7 @@ def predict_from_storage(Input, model_name, model_path='', dtype=cp.float32):
|
|
406
406
|
from storage
|
407
407
|
|
408
408
|
Args:
|
409
|
-
Input (list or ndarray or cupyarray): Input data for the model
|
409
|
+
Input (list or ndarray or cupyarray): Input data for the model.
|
410
410
|
|
411
411
|
model_name (str): Name of the model.
|
412
412
|
|
@@ -484,7 +484,7 @@ def reverse_predict_from_storage(output, model_name, model_path='', dtype=cp.flo
|
|
484
484
|
reverse prediction function from storage
|
485
485
|
Args:
|
486
486
|
|
487
|
-
output (list or ndarray): output layer for the model
|
487
|
+
output (list or ndarray): output layer for the model.
|
488
488
|
|
489
489
|
model_name (str): Name of the model.
|
490
490
|
|
@@ -517,7 +517,7 @@ def predict_from_memory(Input, W, scaler_params=None, activations=['linear'], ac
|
|
517
517
|
from memory.
|
518
518
|
|
519
519
|
Args:
|
520
|
-
Input (list or ndarray): Input data for the model
|
520
|
+
Input (list or ndarray): Input data for the model.
|
521
521
|
|
522
522
|
W (list of ndarrays): Weights of the model.
|
523
523
|
|
@@ -585,7 +585,7 @@ def reverse_predict_from_memory(output, W, dtype=cp.float32):
|
|
585
585
|
|
586
586
|
Args:
|
587
587
|
|
588
|
-
output (list or ndarray): output layer for the model
|
588
|
+
output (list or ndarray): output layer for the model.
|
589
589
|
|
590
590
|
W (list of ndarrays): Weights of the model.
|
591
591
|
|
pyerualjetwork/cuda/nn.py
CHANGED
@@ -127,19 +127,30 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
|
|
127
127
|
* This function also able to train classic MLP model architectures.
|
128
128
|
* And my newest innovative architecture: PTNN (Potentiation Transfer Neural Network).
|
129
129
|
|
130
|
+
Examples:
|
131
|
+
|
132
|
+
This creates a PLAN model:
|
133
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=100, fit_start=True) ```
|
134
|
+
|
135
|
+
This creates a MLP model(with 2 hidden layer):
|
136
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=100, fit_start=False, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
|
137
|
+
|
138
|
+
This creates a PTNN model(with 2 hidden layer & 1 aggregation layer(comes with PLAN)):
|
139
|
+
- ```learn(x_train, y_train, optimizer, pop_size=100, gen=[10, 100], fit_start=True, neurons=[64, 64], activation_functions=['tanh', 'tanh']) ```
|
140
|
+
|
130
141
|
:Args:
|
131
142
|
:param x_train: (array-like): Training input data.
|
132
143
|
:param y_train: (array-like): Labels for training data.
|
133
|
-
:param optimizer: (function): Optimization technique with hyperparameters. (PLAN, MLP & PTNN (all) using ENE for optimization. Gradient based technique's will added in the future.) Please use this: from pyerualjetwork.
|
144
|
+
:param optimizer: (function): Optimization technique with hyperparameters. (PLAN, MLP & PTNN (all) using ENE for optimization. Gradient based technique's will added in the future.) Please use this: from pyerualjetwork.cuda.ene import evolver (and) optimizer = lambda *args, **kwargs: evolver(*args, 'here give your hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
134
145
|
```python
|
135
146
|
|
136
|
-
optimizer = lambda *args, **kwargs:
|
147
|
+
optimizer = lambda *args, **kwargs: ene.evolver(*args,
|
137
148
|
activation_add_prob=0.05,
|
138
149
|
strategy='aggressive',
|
139
150
|
policy='more_selective',
|
140
151
|
**kwargs)
|
141
152
|
|
142
|
-
model =
|
153
|
+
model = nn.learn(x_train,
|
143
154
|
y_train,
|
144
155
|
optimizer,
|
145
156
|
fit_start=True,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 5.
|
3
|
+
Version: 5.33
|
4
4
|
Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -24,17 +24,14 @@ GitHub Page: https://github.com/HCB06/PyerualJetwork
|
|
24
24
|
|
25
25
|
YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7HbsBpCx2VTJ4SK9wcPyse-EHw
|
26
26
|
|
27
|
-
|
27
|
+
installation:
|
28
|
+
'pip install pyerualjetwork'
|
28
29
|
|
29
|
-
|
30
|
-
from pyerualjetwork import
|
31
|
-
from pyerualjetwork import
|
32
|
-
|
33
|
-
|
34
|
-
from pyerualjetwork import neu_cuda
|
35
|
-
from pyerualjetwork import ene_cuda
|
36
|
-
from pyerualjetwork import data_operations_cuda
|
37
|
-
from pyerualjetwork import model_operations_cuda
|
30
|
+
package modules:
|
31
|
+
'from pyerualjetwork.cpu import nn, ene, data_ops, model_ops, memory_ops'
|
32
|
+
'from pyerualjetwork.cuda import nn, ene, data_ops, model_ops, memory_ops'
|
33
|
+
|
34
|
+
please read docstrings.
|
38
35
|
|
39
36
|
PyerualJetwork has Issue Solver. This operation provides users ready-to-use functions to identify potential issues
|
40
37
|
caused by version incompatibilities in major updates, ensuring users are not affected by such problems.
|
@@ -74,10 +71,10 @@ PyerualJetwork is free to use for commercial business and individual users.
|
|
74
71
|
PyerualJetwork ready for both eager execution(like PyTorch) and static graph(like Tensorflow) concepts because PyerualJetwork using only functions.
|
75
72
|
For example:
|
76
73
|
|
77
|
-
|
74
|
+
plan_fit function only fits given training data(suitable for dynamic graph) but learn function learns and optimize entire architecture(suitable for static graph). Or more deeper eager executions PyerualJetwork have: cross_over function, mutation function, list of activation functions, loss functions. You can create your unique model architecture. Move your data to GPU or CPU or manage how much should in GPU, Its all up to you.
|
78
75
|
<br><br>
|
79
76
|
|
80
|
-
PyerualJetworket includes PLAN, MLP & ENE.<br>
|
77
|
+
PyerualJetworket includes PLAN, MLP, PTNN & ENE.<br>
|
81
78
|
|
82
79
|
PLAN VISION:<br>
|
83
80
|
|
@@ -122,6 +119,6 @@ HOW DO I IMPORT IT TO MY PROJECT?
|
|
122
119
|
|
123
120
|
Anaconda users can access the 'Anaconda Prompt' terminal from the Start menu and add the necessary library modules to the Python module search queue by typing "pip install pyerualjetwork" and pressing enter. If you are not using Anaconda, you can simply open the 'cmd' Windows command terminal from the Start menu and type "pip install PyerualJetwork". (Visual Studio Code reccomended) After installation, it's important to periodically open the terminal of the environment you are using and stay up to date by using the command "pip install PyerualJetwork --upgrade".
|
124
121
|
|
125
|
-
After installing the module using "pip" you can now call the library module in your project environment. Use: “from pyerualjetwork import
|
122
|
+
After installing the module using "pip" you can now call the library module in your project environment. Use: “from pyerualjetwork.cpu import nn. Now, you can call the necessary functions from the nn module.
|
126
123
|
|
127
124
|
The PLAN algorithm & ENE algorithm will not be explained in this document. This document focuses on how professionals can integrate and use PyerualJetwork in their systems. However, briefly, the PLAN algorithm can be described as a classification algorithm. PLAN algorithm achieves this task with an incredibly energy-efficient, fast, and hyperparameter-free user-friendly approach. For more detailed information, you can check out .pdf) file.
|
@@ -1,28 +1,28 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=a5zhtKQrVwHbhqgnBuAbKkLi4hC27iGD1Cf1jegr_eU,2704
|
2
2
|
pyerualjetwork/fitness_functions.py,sha256=D9JVCr9DFid_xXgBD4uCKxdW2k10MVDE5HZRSOK4Igg,1237
|
3
3
|
pyerualjetwork/help.py,sha256=Nyi0gHAN9ZnO4wgQLeENt0n7tSCZ3hJmjaJ853eGjCE,831
|
4
4
|
pyerualjetwork/issue_solver.py,sha256=3pZTGotS29sy3pIuGQoJFUePibtSzS-tNoU80T_Usgk,3131
|
5
5
|
pyerualjetwork/memory_ops.py,sha256=TUFh9SYWCKL6N-vNdWId_EwU313TuZomQCHOrltrD-4,14280
|
6
6
|
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
7
7
|
pyerualjetwork/cpu/__init__.py,sha256=0yAYner_-v7SmT3P7JV2itU8xJUQdQpb40dhAMQiZkc,829
|
8
|
-
pyerualjetwork/cpu/activation_functions.py,sha256=
|
9
|
-
pyerualjetwork/cpu/data_ops.py,sha256
|
10
|
-
pyerualjetwork/cpu/ene.py,sha256=
|
8
|
+
pyerualjetwork/cpu/activation_functions.py,sha256=zZSoOQ452Ykp_RsHVxklxesJmmFgufyIB4F3WQjudEQ,6689
|
9
|
+
pyerualjetwork/cpu/data_ops.py,sha256=5biKr7pqLbJOayHYgGdQV1K5GqKbcOvrbbuAyByuDC8,16154
|
10
|
+
pyerualjetwork/cpu/ene.py,sha256=1k2_qQydHyrp-3RlvsUu7cFdPa0pN3OcXI6jwIHiWg0,44484
|
11
11
|
pyerualjetwork/cpu/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
12
12
|
pyerualjetwork/cpu/metrics.py,sha256=WhZ8iEqWehaygPRADUlhA5j_Qv3UwqV_eMxpyRVkeVs,6070
|
13
|
-
pyerualjetwork/cpu/model_ops.py,sha256=
|
14
|
-
pyerualjetwork/cpu/nn.py,sha256=
|
13
|
+
pyerualjetwork/cpu/model_ops.py,sha256=sWsP_7Gfa8_DJ2X7AUrOkeXnz2Eej6573grQQ3CooXM,20295
|
14
|
+
pyerualjetwork/cpu/nn.py,sha256=TYLXmVLbbfFCDFA_cH9TSMxgjauDi6d7xfrPzOx6Xwg,31867
|
15
15
|
pyerualjetwork/cpu/visualizations.py,sha256=rOQsc-W8b71z7ovXSoF49lx4fmpvlaHLsyj9ejWnhnI,28164
|
16
16
|
pyerualjetwork/cuda/__init__.py,sha256=NbqvAS4jlMdoFdXa5_hi5ukXQ5zAZR_5BQ4QAqtiKug,879
|
17
|
-
pyerualjetwork/cuda/activation_functions.py,sha256=
|
17
|
+
pyerualjetwork/cuda/activation_functions.py,sha256=FmoSAxDr9SGO4nkE6ZflXK4pmvZ0sL3Epe1Lz-3GOVI,6766
|
18
18
|
pyerualjetwork/cuda/data_ops.py,sha256=SiNodFNmWyTPY_KnKuAi9biPRdpTAYY3XM01bRSUPCs,18510
|
19
|
-
pyerualjetwork/cuda/ene.py,sha256=
|
19
|
+
pyerualjetwork/cuda/ene.py,sha256=veJRW1KoBFoDEhDJWYr-Y35kQC-Qh6UwURtXeM16S0M,45009
|
20
20
|
pyerualjetwork/cuda/loss_functions.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
21
21
|
pyerualjetwork/cuda/metrics.py,sha256=PjDBoRvr6va8vRvDIJJGBO4-I4uumrk3NCM1Vz4NJTo,5054
|
22
|
-
pyerualjetwork/cuda/model_ops.py,sha256=
|
23
|
-
pyerualjetwork/cuda/nn.py,sha256=
|
22
|
+
pyerualjetwork/cuda/model_ops.py,sha256=iQPuxmthKxP2GTFLHJppxoU64C6mEpkDW-DsfwFGiuY,21020
|
23
|
+
pyerualjetwork/cuda/nn.py,sha256=7rbaIEcmssaFgcionWVRmKijlgFyftVjf-MMNaLO_28,33140
|
24
24
|
pyerualjetwork/cuda/visualizations.py,sha256=9l5BhXqXoeopdhLvVGvjH1TKYZb9JdKOsSE2IYD02zs,28569
|
25
|
-
pyerualjetwork-5.
|
26
|
-
pyerualjetwork-5.
|
27
|
-
pyerualjetwork-5.
|
28
|
-
pyerualjetwork-5.
|
25
|
+
pyerualjetwork-5.33.dist-info/METADATA,sha256=QHnz5cJtj77mPGeUo5ZQcvD1BpFJj5kZPkiCGdZxXv0,8020
|
26
|
+
pyerualjetwork-5.33.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
27
|
+
pyerualjetwork-5.33.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
28
|
+
pyerualjetwork-5.33.dist-info/RECORD,,
|
File without changes
|
File without changes
|