pyerualjetwork 5.0.2b1__py3-none-any.whl → 5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +7 -3
- pyerualjetwork/activation_functions_cpu.py +3 -71
- pyerualjetwork/activation_functions_cuda.py +2 -74
- pyerualjetwork/data_operations_cpu.py +3 -5
- pyerualjetwork/ene_cuda.py +5 -5
- pyerualjetwork/issue_solver.py +2 -2
- pyerualjetwork/model_operations_cpu.py +126 -54
- pyerualjetwork/model_operations_cuda.py +123 -49
- pyerualjetwork/neu_cpu.py +169 -52
- pyerualjetwork/neu_cuda.py +170 -55
- {pyerualjetwork-5.0.2b1.dist-info → pyerualjetwork-5.1.dist-info}/METADATA +3 -3
- pyerualjetwork-5.1.dist-info/RECORD +26 -0
- pyerualjetwork-5.0.2b1.dist-info/RECORD +0 -26
- {pyerualjetwork-5.0.2b1.dist-info → pyerualjetwork-5.1.dist-info}/WHEEL +0 -0
- {pyerualjetwork-5.0.2b1.dist-info → pyerualjetwork-5.1.dist-info}/top_level.txt +0 -0
pyerualjetwork/neu_cuda.py
CHANGED
@@ -6,10 +6,28 @@ NEU (Neural Networks) on CUDA
|
|
6
6
|
=============================
|
7
7
|
This module hosts functions for training and evaluating artificial neural networks on CUDA GPU for labeled classification tasks (for now).
|
8
8
|
|
9
|
-
Currently,
|
9
|
+
Currently, 3 types of models can be trained:
|
10
10
|
|
11
11
|
PLAN (Potentiation Learning Artificial Neural Network)
|
12
|
+
* Training Time for Small Projects: fast
|
13
|
+
* Training Time for Big Projects: fast
|
14
|
+
* Explainability: high
|
15
|
+
* Learning Capacity: medium (compared to single perceptrons)
|
16
|
+
|
12
17
|
MLP (Multi-Layer Perceptron → Deep Learning) -- With non-bias
|
18
|
+
* Training Time for Small Projects: fast
|
19
|
+
* Training Time for Big Projects: slow
|
20
|
+
* Explainability: low
|
21
|
+
* Learning Capacity: high
|
22
|
+
|
23
|
+
PTNN (Potentiation Transfer Neural Network) -- With non-bias
|
24
|
+
* Training Time for Small Projects: fast
|
25
|
+
* Training Time for Big Projects: fast
|
26
|
+
* Explainability: low
|
27
|
+
* Learning Capacity: high
|
28
|
+
|
29
|
+
Read learn function docstring for know how to use of these model architectures.
|
30
|
+
|
13
31
|
|
14
32
|
For more information about PLAN: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
|
15
33
|
|
@@ -33,6 +51,7 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
33
51
|
import cupy as cp
|
34
52
|
import numpy as np
|
35
53
|
import copy
|
54
|
+
import random
|
36
55
|
|
37
56
|
### LIBRARY IMPORTS ###
|
38
57
|
from .ui import loading_bars, initialize_loading_bar
|
@@ -92,23 +111,26 @@ def plan_fit(
|
|
92
111
|
|
93
112
|
return normalization(weight, dtype=dtype)
|
94
113
|
|
95
|
-
def learn(x_train, y_train, optimizer, fit_start=True,
|
114
|
+
def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size=1,
|
96
115
|
weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False, target_acc=None,
|
97
116
|
neurons_history=False, early_stop=False, show_history=False, loss='categorical_crossentropy',
|
98
117
|
interval=33.33, target_loss=None, loss_impact=0.1, acc_impact=0.9,
|
99
|
-
start_this_act=None, start_this_W=None, neurons=[],
|
118
|
+
start_this_act=None, start_this_W=None, neurons=[], activation_functions=[], dtype=cp.float32, memory='gpu'):
|
100
119
|
"""
|
101
120
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
102
|
-
the most accurate combination of activation potentiation(or activation function) & weight values for the given dataset.
|
121
|
+
the most accurate combination of activation potentiation(or activation function) & weight values for the given labeled classificaiton dataset.
|
103
122
|
|
104
123
|
Why genetic optimization ENE(Eugenic NeuroEvolution) and not backpropagation?
|
105
124
|
Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
|
106
125
|
Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: ENE.
|
107
126
|
|
127
|
+
* This function also able to train classic MLP model architectures.
|
128
|
+
* And my newest innovative architecture: PTNN (Potentiation Transfer Neural Network).
|
129
|
+
|
108
130
|
:Args:
|
109
131
|
:param x_train: (array-like): Training input data.
|
110
132
|
:param y_train: (array-like): Labels for training data.
|
111
|
-
:param optimizer: (function): Optimization technique with hyperparameters. (PLAN
|
133
|
+
:param optimizer: (function): Optimization technique with hyperparameters. (PLAN, MLP & PTNN (all) using ENE for optimization. Gradient based technique's will added in the future.) Please use this: from pyerualjetwork.ene_cuda import evolver (and) optimizer = lambda *args, **kwargs: evolver(*args, 'here give your hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
112
134
|
```python
|
113
135
|
|
114
136
|
optimizer = lambda *args, **kwargs: ene_cuda.evolver(*args,
|
@@ -126,10 +148,10 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
126
148
|
batch_size=0.05,
|
127
149
|
interval=16.67)
|
128
150
|
```
|
129
|
-
:param fit_start: (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
|
130
|
-
:param gen: (int
|
151
|
+
:param fit_start: (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Additonaly if you want to train PTNN model you must be give True. Options: True or False. Default: True
|
152
|
+
:param gen: (int or list): The generation count for genetic optimization. If you want to train PTNN model you must give a list of two number. First number for PLAN model training second number for MLP.
|
131
153
|
:param batch_size: (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire train set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents %8 of the train set. Default is 1. (%100 of train)
|
132
|
-
:param pop_size: (int
|
154
|
+
:param pop_size: (int): Population size of each generation.
|
133
155
|
:param weight_evolve: (bool, optional): Activation combinations already optimizes by PLANEAT genetic search algorithm. Should the weight parameters also evolve or should the weights be determined according to the aggregating learning principle of the PLAN algorithm? Default: True (Evolves Weights)
|
134
156
|
:param neural_web_history: (bool, optional): Draws history of neural web. Default is False. [ONLY FOR PLAN MODELS]
|
135
157
|
:param show_current_activations: (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
@@ -145,8 +167,8 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
145
167
|
:param acc_impact: (float, optional): Impact of accuracy for optimization [0-1]. Default: 0.9
|
146
168
|
:param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
147
169
|
:param start_this_W: (cupy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
148
|
-
:param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Number of elements --> Layer count. Default: [] (No hidden layer) --> architecture setted to PLAN, if not --> architecture setted to MLP.
|
149
|
-
:param
|
170
|
+
:param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP or PTNN. Number of elements --> Layer count. Default: [] (No hidden layer) --> architecture setted to PLAN, if not --> architecture setted to MLP.
|
171
|
+
:param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP or PTNN. if neurons is not [] --> uses default: ['linear'] * len(neurons). if neurons is [] --> uses [].
|
150
172
|
:param dtype: (cupy.dtype): Data type for the Weight matrices. np.float32 by default. Example: cp.float64 or cp.float16.
|
151
173
|
:param memory: (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
|
152
174
|
|
@@ -162,13 +184,15 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
162
184
|
activations = [item for item in all_activations() if item not in except_this]
|
163
185
|
activations_len = len(activations)
|
164
186
|
|
165
|
-
if pop_size
|
187
|
+
if pop_size > activations_len and fit_start is True:
|
188
|
+
for _ in range(pop_size - len(activations)):
|
189
|
+
random_index_all_act = random.randint(0, len(activations)-1)
|
190
|
+
activations.append(activations[random_index_all_act])
|
191
|
+
|
166
192
|
y_train = optimize_labels(y_train, cuda=True)
|
167
193
|
|
168
194
|
if pop_size < activations_len: raise ValueError(f"pop_size must be higher or equal to {activations_len}")
|
169
195
|
|
170
|
-
if gen is None: gen = activations_len
|
171
|
-
|
172
196
|
if memory == 'gpu':
|
173
197
|
x_train = transfer_to_gpu(x_train, dtype=x_train.dtype)
|
174
198
|
y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
|
@@ -188,12 +212,39 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
188
212
|
if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
|
189
213
|
|
190
214
|
if neurons != []:
|
191
|
-
fit_start = False
|
192
215
|
weight_evolve = True
|
193
|
-
|
216
|
+
|
194
217
|
if activation_functions == []: activation_functions = ['linear'] * len(neurons)
|
195
|
-
|
218
|
+
|
219
|
+
if fit_start is False:
|
220
|
+
# MLP
|
221
|
+
activations = activation_functions
|
222
|
+
model_type = 'MLP'
|
223
|
+
activation_potentiations = [0] * pop_size
|
224
|
+
activation_potentiation = None
|
225
|
+
is_mlp = True
|
226
|
+
transfer_learning = False
|
227
|
+
else:
|
228
|
+
# PTNN
|
229
|
+
model_type = 'PLAN' # First generation index gen[0] is PLAN, other index gen[1] it will change to PTNN (PLAN Connects to MLP and will transfer the learned information)
|
230
|
+
transfer_learning = True
|
231
|
+
|
232
|
+
neurons_copy = neurons.copy()
|
233
|
+
neurons = []
|
234
|
+
gen_copy = gen.copy()
|
235
|
+
gen = gen[0] + gen[1]
|
236
|
+
activation_potentiations = [0] * pop_size
|
237
|
+
activation_potentiation = None
|
238
|
+
is_mlp = False # it will change
|
239
|
+
|
196
240
|
else:
|
241
|
+
# PLAN
|
242
|
+
model_type = 'PLAN'
|
243
|
+
transfer_learning = False
|
244
|
+
|
245
|
+
activation_potentiations = [0] * pop_size # NOTE: For PLAN models, activation_potentiations is needed BUT activations variable already mirros activation_potentiation values.
|
246
|
+
# So, we don't need to use activation_potentiations variable. activation_potentiations variable is only for PTNN models.
|
247
|
+
activation_potentiation = None
|
197
248
|
is_mlp = False
|
198
249
|
|
199
250
|
# Initialize visualization components
|
@@ -210,12 +261,12 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
210
261
|
|
211
262
|
progress = initialize_loading_bar(total=activations_len, desc="", ncols=79, bar_format=bar_format_learner)
|
212
263
|
|
213
|
-
if fit_start is False
|
214
|
-
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons,
|
264
|
+
if fit_start is False:
|
265
|
+
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, activation_functions=activations, population_size=pop_size, dtype=dtype)
|
215
266
|
|
216
267
|
else:
|
217
|
-
weight_pop = [0] *
|
218
|
-
act_pop = [0] *
|
268
|
+
weight_pop = [0] * len(activations)
|
269
|
+
act_pop = [0] * len(activations)
|
219
270
|
|
220
271
|
if start_this_act is not None and start_this_W is not None:
|
221
272
|
weight_pop[0] = start_this_W
|
@@ -223,6 +274,41 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
223
274
|
|
224
275
|
# LEARNING STARTED
|
225
276
|
for i in range(gen):
|
277
|
+
|
278
|
+
# TRANSFORMATION PLAN TO MLP FOR PTNN (in later generations)
|
279
|
+
if model_type == 'PLAN' and transfer_learning:
|
280
|
+
if i == gen_copy[0]:
|
281
|
+
|
282
|
+
model_type = 'PTNN'
|
283
|
+
neurons = neurons_copy
|
284
|
+
|
285
|
+
for individual in range(len(weight_pop)):
|
286
|
+
weight_pop[individual] = cp.copy(best_weight)
|
287
|
+
activation_potentiations[individual] = final_activations.copy() if isinstance(final_activations, list) else final_activations
|
288
|
+
|
289
|
+
activation_potentiation = activation_potentiations[0]
|
290
|
+
|
291
|
+
neurons_copy = [len(y_train[0])] + neurons_copy
|
292
|
+
activation_functions = ['linear'] + activation_functions
|
293
|
+
|
294
|
+
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons_copy, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
|
295
|
+
|
296
|
+
# 0 indexed individual will keep PLAN's learned informations and in later generations it will share other individuals.
|
297
|
+
for l in range(1, len(weight_pop[0])):
|
298
|
+
original_shape = weight_pop[0][l].shape
|
299
|
+
|
300
|
+
identity_matrix = cp.eye(original_shape[0], original_shape[1], dtype=weight_pop[0][l].dtype)
|
301
|
+
weight_pop[0][l] = identity_matrix
|
302
|
+
|
303
|
+
for l in range(len(weight_pop)):
|
304
|
+
weight_pop[l][0] = cp.copy(best_weight)
|
305
|
+
|
306
|
+
best_weight = list(weight_pop[0])
|
307
|
+
final_activations = act_pop[0]
|
308
|
+
is_mlp = True
|
309
|
+
fit_start = False
|
310
|
+
|
311
|
+
|
226
312
|
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
227
313
|
progress.set_postfix(postfix_dict)
|
228
314
|
|
@@ -237,10 +323,11 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
237
323
|
x_train_batch = cp.array(x_train_batch, dtype=x_train_batch.dtype, copy=False)
|
238
324
|
y_train_batch = cp.array(y_train_batch, dtype=y_train.dtype)
|
239
325
|
|
240
|
-
if fit_start is True and i == 0
|
326
|
+
if fit_start is True and i == 0:
|
241
327
|
if start_this_act is not None and j == 0:
|
242
328
|
pass
|
243
329
|
else:
|
330
|
+
|
244
331
|
act_pop[j] = activations[j]
|
245
332
|
W = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
|
246
333
|
weight_pop[j] = W
|
@@ -248,7 +335,7 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
248
335
|
if weight_evolve is False:
|
249
336
|
weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
|
250
337
|
|
251
|
-
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], auto_normalization=auto_normalization,
|
338
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], activation_potentiations=activation_potentiations[j], auto_normalization=auto_normalization, model_type=model_type)
|
252
339
|
acc = model[get_acc()]
|
253
340
|
|
254
341
|
if loss == 'categorical_crossentropy':
|
@@ -267,11 +354,11 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
267
354
|
best_fitness = fitness
|
268
355
|
best_acc = acc
|
269
356
|
best_loss = train_loss
|
270
|
-
best_weight = cp.copy(weight_pop[j]) if
|
357
|
+
best_weight = cp.copy(weight_pop[j]) if model_type == 'PLAN' else copy.deepcopy(weight_pop[j])
|
271
358
|
best_model = model
|
272
359
|
|
273
360
|
final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
|
274
|
-
if
|
361
|
+
if model_type == 'PLAN': final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
275
362
|
|
276
363
|
if batch_size == 1:
|
277
364
|
postfix_dict[f"{data} Accuracy"] = cp.round(best_acc, 4)
|
@@ -306,27 +393,28 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
306
393
|
if target_acc is not None and best_acc >= target_acc:
|
307
394
|
progress.close()
|
308
395
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
309
|
-
activations=final_activations, auto_normalization=auto_normalization,
|
396
|
+
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, model_type=model_type)
|
310
397
|
if loss == 'categorical_crossentropy':
|
311
398
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
312
399
|
y_pred_batch=train_model[get_preds_softmax()])
|
313
400
|
else:
|
314
401
|
train_loss = binary_crossentropy(y_true_batch=y_train,
|
315
402
|
y_pred_batch=train_model[get_preds_softmax()])
|
316
|
-
|
317
403
|
print('\nActivations: ', final_activations)
|
318
|
-
print(
|
319
|
-
print(
|
404
|
+
print('Activation Potentiation: ', activation_potentiation)
|
405
|
+
print('Train Accuracy:', train_model[get_acc()])
|
406
|
+
print('Train Loss: ', train_loss, '\n')
|
407
|
+
print('Model Type:', model_type)
|
320
408
|
|
321
409
|
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
|
322
410
|
best_loss, y_train, interval)
|
323
|
-
return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
|
411
|
+
return best_weight, best_model[get_preds_softmax()], best_acc, final_activations, None, None, None, None, None, None, None, activation_potentiation
|
324
412
|
|
325
413
|
# Check target loss
|
326
414
|
if target_loss is not None and best_loss <= target_loss:
|
327
415
|
progress.close()
|
328
|
-
train_model = evaluate(x_train, y_train, W=best_weight,
|
329
|
-
activations=final_activations, auto_normalization=auto_normalization,
|
416
|
+
train_model = evaluate(x_train, y_train, W=best_weight,
|
417
|
+
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, model_type=model_type)
|
330
418
|
|
331
419
|
if loss == 'categorical_crossentropy':
|
332
420
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -336,19 +424,22 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
336
424
|
y_pred_batch=train_model[get_preds_softmax()])
|
337
425
|
|
338
426
|
print('\nActivations: ', final_activations)
|
339
|
-
print(
|
340
|
-
print(
|
427
|
+
print('Activation Potentiation: ', activation_potentiation)
|
428
|
+
print('Train Accuracy:', train_model[get_acc()])
|
429
|
+
print('Train Loss: ', train_loss, '\n')
|
430
|
+
print('Model Type:', model_type)
|
341
431
|
|
342
432
|
# Display final visualizations
|
343
433
|
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
|
344
434
|
train_loss, y_train, interval)
|
345
|
-
return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
|
435
|
+
return best_weight, best_model[get_preds_softmax()], best_acc, final_activations, None, None, None, None, None, None, None, activation_potentiation, None, None, None, None, None, None, None, activation_potentiation
|
346
436
|
|
347
437
|
|
348
438
|
progress.update(1)
|
349
439
|
|
350
440
|
if batch_size != 1:
|
351
|
-
train_model = evaluate(x_train, y_train, best_weight,
|
441
|
+
train_model = evaluate(x_train, y_train, W=best_weight,
|
442
|
+
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, model_type=model_type)
|
352
443
|
|
353
444
|
if loss == 'categorical_crossentropy':
|
354
445
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -368,8 +459,8 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
368
459
|
best_acc_per_gen_list.append(best_acc)
|
369
460
|
loss_list.append(best_loss)
|
370
461
|
|
371
|
-
if
|
372
|
-
|
462
|
+
if model_type == 'PLAN': weight_pop = cp.array(weight_pop, copy=False, dtype=dtype)
|
463
|
+
|
373
464
|
weight_pop, act_pop = optimizer(weight_pop, act_pop, i, np.array(target_pop), weight_evolve=weight_evolve, is_mlp=is_mlp, bar_status=False)
|
374
465
|
target_pop = []
|
375
466
|
|
@@ -378,7 +469,7 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
378
469
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
379
470
|
progress.close()
|
380
471
|
train_model = evaluate(x_train, y_train, W=best_weight,
|
381
|
-
|
472
|
+
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, model_type=model_type)
|
382
473
|
|
383
474
|
if loss == 'categorical_crossentropy':
|
384
475
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -388,18 +479,20 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
388
479
|
y_pred_batch=train_model[get_preds_softmax()])
|
389
480
|
|
390
481
|
print('\nActivations: ', final_activations)
|
391
|
-
print(
|
392
|
-
print(
|
482
|
+
print('Activation Potentiation: ', activation_potentiation)
|
483
|
+
print('Train Accuracy:', train_model[get_acc()])
|
484
|
+
print('Train Loss: ', train_loss, '\n')
|
485
|
+
print('Model Type:', model_type)
|
393
486
|
|
394
487
|
# Display final visualizations
|
395
488
|
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
|
396
489
|
train_loss, y_train, interval)
|
397
|
-
return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
|
490
|
+
return best_weight, best_model[get_preds_softmax()], best_acc, final_activations, None, None, None, None, None, None, None, activation_potentiation
|
398
491
|
|
399
492
|
# Final evaluation
|
400
493
|
progress.close()
|
401
|
-
train_model = evaluate(x_train, y_train, W=best_weight,
|
402
|
-
|
494
|
+
train_model = evaluate(x_train, y_train, W=best_weight,
|
495
|
+
activations=final_activations, activation_potentiations=activation_potentiation, auto_normalization=auto_normalization, model_type=model_type)
|
403
496
|
|
404
497
|
if loss == 'categorical_crossentropy':
|
405
498
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -409,36 +502,41 @@ def learn(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, p
|
|
409
502
|
y_pred_batch=train_model[get_preds_softmax()])
|
410
503
|
|
411
504
|
print('\nActivations: ', final_activations)
|
412
|
-
print(
|
413
|
-
print(
|
505
|
+
print('Activation Potentiation: ', activation_potentiation)
|
506
|
+
print('Train Accuracy:', train_model[get_acc()])
|
507
|
+
print('Train Loss: ', train_loss, '\n')
|
508
|
+
print('Model Type:', model_type)
|
414
509
|
|
415
510
|
# Display final visualizations
|
416
511
|
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc, train_loss, y_train, interval)
|
417
|
-
return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
|
512
|
+
return best_weight, best_model[get_preds_softmax()], best_acc, final_activations, None, None, None, None, None, None, None, activation_potentiation
|
418
513
|
|
419
514
|
def evaluate(
|
420
515
|
x_test,
|
421
516
|
y_test,
|
517
|
+
model_type,
|
422
518
|
W,
|
423
519
|
activations=['linear'],
|
424
|
-
|
425
|
-
|
520
|
+
activation_potentiations=[],
|
521
|
+
auto_normalization=False
|
426
522
|
) -> tuple:
|
427
523
|
"""
|
428
524
|
Evaluates the neural network model using the given test data.
|
429
525
|
|
430
526
|
Args:
|
431
527
|
x_test (cp.ndarray): Test data.
|
432
|
-
|
528
|
+
|
433
529
|
y_test (cp.ndarray): Test labels (one-hot encoded).
|
434
|
-
|
530
|
+
|
531
|
+
model_type: (str): Type of the model. Options: 'PLAN', 'MLP', 'PTNN'.
|
532
|
+
|
435
533
|
W (cp.ndarray): Neural net weight matrix.
|
436
534
|
|
437
|
-
activations (list): Activation list. Default = ['linear'].
|
535
|
+
activations (list, optional): Activation list for PLAN or MLP models (MLP layers activations if it PTNN model). Default = ['linear'].
|
536
|
+
|
537
|
+
activation_potentiations (list, optional): Extra activation potentiation list (PLAN layers activations) for PTNN models. Default = [].
|
438
538
|
|
439
539
|
auto_normalization (bool, optional): Normalization for x_test ? Default = False.
|
440
|
-
|
441
|
-
is_mlp (bool, optional): Evaluate PLAN model or MLP model ? Default: False (PLAN)
|
442
540
|
|
443
541
|
Returns:
|
444
542
|
tuple: Model (list).
|
@@ -451,7 +549,7 @@ def evaluate(
|
|
451
549
|
elif isinstance(activations, list):
|
452
550
|
activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
|
453
551
|
|
454
|
-
if
|
552
|
+
if model_type == 'MLP':
|
455
553
|
layer = x_test
|
456
554
|
for i in range(len(W)):
|
457
555
|
if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
|
@@ -460,10 +558,27 @@ def evaluate(
|
|
460
558
|
|
461
559
|
result = layer
|
462
560
|
|
463
|
-
|
561
|
+
if model_type == 'PLAN':
|
464
562
|
|
465
563
|
x_test = apply_activation(x_test, activations)
|
466
564
|
result = x_test @ W.T
|
565
|
+
|
566
|
+
if model_type == 'PTNN':
|
567
|
+
|
568
|
+
if isinstance(activation_potentiations, str):
|
569
|
+
activation_potentiations = [activation_potentiations]
|
570
|
+
elif isinstance(activation_potentiations, list):
|
571
|
+
activation_potentiations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiations]
|
572
|
+
|
573
|
+
x_test = apply_activation(x_test, activation_potentiations)
|
574
|
+
layer = x_test @ W[0].T
|
575
|
+
|
576
|
+
for i in range(1, len(W)):
|
577
|
+
if i != len(W) - 1: layer = apply_activation(layer, activations[i])
|
578
|
+
|
579
|
+
layer = layer @ W[i].T
|
580
|
+
|
581
|
+
result = layer
|
467
582
|
|
468
583
|
max_vals = cp.max(result, axis=1, keepdims=True)
|
469
584
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 5.
|
3
|
+
Version: 5.1
|
4
4
|
Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -63,8 +63,8 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
|
|
63
63
|
|
64
64
|
ABOUT PYERUALJETWORK:
|
65
65
|
|
66
|
-
PyereualJetwork is a large
|
67
|
-
It features PLAN, MLP
|
66
|
+
PyereualJetwork is a large wide GPU-accelerated machine learning library in Python designed for professionals and researchers.
|
67
|
+
It features PLAN, MLP Deep Learning and PTNN training, as well as ENE (Eugenic NeuroEvolution) for genetic optimization,
|
68
68
|
which can also be applied to genetic algorithms or Reinforcement Learning (RL) problems.
|
69
69
|
The library includes functions for data pre-processing, visualizations, model saving and loading, prediction and evaluation,
|
70
70
|
training, and both detailed and simplified memory management. https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4862342. (THIS ARTICLE IS FIRST VERSION OF PLAN.) MODERN VERSION OF PLAN: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
|
@@ -0,0 +1,26 @@
|
|
1
|
+
pyerualjetwork/__init__.py,sha256=IwEIh3pG8VOF-x3j5zczYZoFU0sAPskLx74yLtbhuik,2732
|
2
|
+
pyerualjetwork/activation_functions_cpu.py,sha256=qP_Ipi2-c5tyJ7Jb9gWJZCj2AgeOIzLBdoEqQOUXD-s,5885
|
3
|
+
pyerualjetwork/activation_functions_cuda.py,sha256=IJAqlbVdE01MrtOCX8TvxkGmqqhEcr1p9qozy0Sjpjw,5673
|
4
|
+
pyerualjetwork/data_operations_cpu.py,sha256=HemqiYfSdlQKTTYNzpCh_9lTtS3AimMI4DvqJBAGjGw,16186
|
5
|
+
pyerualjetwork/data_operations_cuda.py,sha256=5zgyJGPjQuHyx6IHNkRwMguYhm-GcI6Hal49WNvw-bM,18536
|
6
|
+
pyerualjetwork/ene_cpu.py,sha256=2y5__d-vx7t5Ajs4IPuNnQe8ULR39Km_KQFNIUnalGA,45167
|
7
|
+
pyerualjetwork/ene_cuda.py,sha256=dWavZydKL9b5BAGL430SuWs1TenMelbFtltoEAXKkJY,45673
|
8
|
+
pyerualjetwork/fitness_functions.py,sha256=pUmAbUy5ex1Vpu6n_RRac_df-FR52gYIV8uxYS5H3tw,1252
|
9
|
+
pyerualjetwork/help.py,sha256=FcX8mxo1_mvoqONVXY0Kn7S09CDkhi0jwNmn8g9mYZc,804
|
10
|
+
pyerualjetwork/issue_solver.py,sha256=iY6hSsBxYI5l82RwnXQp2DrRUJyksk_7U9GUSnt2YfU,3117
|
11
|
+
pyerualjetwork/loss_functions_cpu.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
12
|
+
pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
13
|
+
pyerualjetwork/memory_operations.py,sha256=g24d-cDuUFc0fOEtk3AJe-z_EBctYV5S4cY1rQ6VGiE,14279
|
14
|
+
pyerualjetwork/metrics_cpu.py,sha256=vbfMwS0ay2heMSa0GNo-ydLjQ8cfexbLwaREp4FKAtY,6081
|
15
|
+
pyerualjetwork/metrics_cuda.py,sha256=PWyJyexeqlPKb09LAcF55JvhZVeXLCu3P_siYq5m2gg,5065
|
16
|
+
pyerualjetwork/model_operations_cpu.py,sha256=Y0uPkLVbdodP7lC-fOPdja3RWi2J9z2rwWIS2pxzotU,20523
|
17
|
+
pyerualjetwork/model_operations_cuda.py,sha256=9KfaO3NwvA-bwZqshYQwfc5e-4AYPp53EwFHMvDHS3I,21537
|
18
|
+
pyerualjetwork/neu_cpu.py,sha256=C9nNzMaan8-QyUK6gXTM_PQiSb4cBEpbM-WMLGbuDDY,31202
|
19
|
+
pyerualjetwork/neu_cuda.py,sha256=nK74JgfyJDbaPmSR1CmtZc_PaSKk0FeFmNcWHe5-8U0,32354
|
20
|
+
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
21
|
+
pyerualjetwork/visualizations_cpu.py,sha256=StyD1Hl1Gt55EMqR6tO3yVJZdPyGkOgCnQ75Zn8K6J8,28252
|
22
|
+
pyerualjetwork/visualizations_cuda.py,sha256=7lYrkOdrjwQGB3T4k_vI8UDxsm_TRjzaSSg9GhlNczs,28667
|
23
|
+
pyerualjetwork-5.1.dist-info/METADATA,sha256=aYILBPyOZMu50jTsmu22UVYh6DMToxX_XdhPF720RQw,8132
|
24
|
+
pyerualjetwork-5.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
25
|
+
pyerualjetwork-5.1.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
26
|
+
pyerualjetwork-5.1.dist-info/RECORD,,
|
@@ -1,26 +0,0 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=aoCNbl9z-1Ef506udIAw1B-oRxYyRyH8XA_NP-WFHY0,2672
|
2
|
-
pyerualjetwork/activation_functions_cpu.py,sha256=X7Kv8qv8oZq8hvTdUiV-GkFjKHRlKIQypRPXh6gdkm4,7614
|
3
|
-
pyerualjetwork/activation_functions_cuda.py,sha256=pefklsl9QuSVbKwiUUHeF_ExN0bICH7QIF1MfoMU40Q,7665
|
4
|
-
pyerualjetwork/data_operations_cpu.py,sha256=dLczgxNx8W_GIgBLageegVhv-aczjNYfohB-PPpOU4Y,16401
|
5
|
-
pyerualjetwork/data_operations_cuda.py,sha256=5zgyJGPjQuHyx6IHNkRwMguYhm-GcI6Hal49WNvw-bM,18536
|
6
|
-
pyerualjetwork/ene_cpu.py,sha256=2y5__d-vx7t5Ajs4IPuNnQe8ULR39Km_KQFNIUnalGA,45167
|
7
|
-
pyerualjetwork/ene_cuda.py,sha256=0tOIONUEDmvGZiCihzIHBrGVtRMPR1g23ZNfoI0i39s,45663
|
8
|
-
pyerualjetwork/fitness_functions.py,sha256=pUmAbUy5ex1Vpu6n_RRac_df-FR52gYIV8uxYS5H3tw,1252
|
9
|
-
pyerualjetwork/help.py,sha256=FcX8mxo1_mvoqONVXY0Kn7S09CDkhi0jwNmn8g9mYZc,804
|
10
|
-
pyerualjetwork/issue_solver.py,sha256=_sMlNu6bS1sXZsJkD_eynLULApM8eWmPZJ0Jk1EKttg,3119
|
11
|
-
pyerualjetwork/loss_functions_cpu.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
12
|
-
pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
13
|
-
pyerualjetwork/memory_operations.py,sha256=g24d-cDuUFc0fOEtk3AJe-z_EBctYV5S4cY1rQ6VGiE,14279
|
14
|
-
pyerualjetwork/metrics_cpu.py,sha256=vbfMwS0ay2heMSa0GNo-ydLjQ8cfexbLwaREp4FKAtY,6081
|
15
|
-
pyerualjetwork/metrics_cuda.py,sha256=PWyJyexeqlPKb09LAcF55JvhZVeXLCu3P_siYq5m2gg,5065
|
16
|
-
pyerualjetwork/model_operations_cpu.py,sha256=BeUu_NW5YYWPKRmpm-BgdJZIKuX_KTCI_pVIJBtiuXQ,17115
|
17
|
-
pyerualjetwork/model_operations_cuda.py,sha256=dLxN6ITHDaLR--Mau-xzAXJ8bO_7Jh2sST-NVD9hWtE,18282
|
18
|
-
pyerualjetwork/neu_cpu.py,sha256=5K9oDNiTH6rVyOYo-2esw8ZLJ6M7-YLfiOyq_a9LBQU,25017
|
19
|
-
pyerualjetwork/neu_cuda.py,sha256=VS9YVsYe0DLaqGDCrKyVnYXehu8eE7qZycdaUHCyyM0,26030
|
20
|
-
pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
|
21
|
-
pyerualjetwork/visualizations_cpu.py,sha256=StyD1Hl1Gt55EMqR6tO3yVJZdPyGkOgCnQ75Zn8K6J8,28252
|
22
|
-
pyerualjetwork/visualizations_cuda.py,sha256=7lYrkOdrjwQGB3T4k_vI8UDxsm_TRjzaSSg9GhlNczs,28667
|
23
|
-
pyerualjetwork-5.0.2b1.dist-info/METADATA,sha256=T0SlZ2k6mIO1XgIz9w5RwNdxrvIioVZnVsVYEcuHZyQ,8128
|
24
|
-
pyerualjetwork-5.0.2b1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
25
|
-
pyerualjetwork-5.0.2b1.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
26
|
-
pyerualjetwork-5.0.2b1.dist-info/RECORD,,
|
File without changes
|
File without changes
|