pyerualjetwork 4.1.7__py3-none-any.whl → 4.1.8b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +2 -13
- pyerualjetwork/memory_operations.py +1 -1
- pyerualjetwork/plan.py +105 -108
- pyerualjetwork/plan_cuda.py +109 -113
- pyerualjetwork/planeat.py +26 -17
- pyerualjetwork/planeat_cuda.py +27 -17
- pyerualjetwork/visualizations.py +6 -4
- pyerualjetwork/visualizations_cuda.py +4 -1
- {pyerualjetwork-4.1.7.dist-info → pyerualjetwork-4.1.8b0.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.1.7.dist-info → pyerualjetwork-4.1.8b0.dist-info}/RECORD +12 -12
- {pyerualjetwork-4.1.7.dist-info → pyerualjetwork-4.1.8b0.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.1.7.dist-info → pyerualjetwork-4.1.8b0.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -48,7 +48,7 @@ for package_name in package_names:
|
|
48
48
|
|
49
49
|
print(f"PyerualJetwork is ready to use with {err} errors")
|
50
50
|
|
51
|
-
__version__ = "4.1.
|
51
|
+
__version__ = "4.1.8b0"
|
52
52
|
__update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
|
53
53
|
|
54
54
|
def print_version(__version__):
|
@@ -58,15 +58,4 @@ def print_update_notes(__update__):
|
|
58
58
|
print(f"Update Notes:\n{__update__}")
|
59
59
|
|
60
60
|
print_version(__version__)
|
61
|
-
print_update_notes(__update__)
|
62
|
-
|
63
|
-
from .plan import *
|
64
|
-
from .planeat import *
|
65
|
-
from .activation_functions import *
|
66
|
-
from .data_operations import *
|
67
|
-
from .loss_functions import *
|
68
|
-
from .metrics import *
|
69
|
-
from .model_operations import *
|
70
|
-
from .ui import *
|
71
|
-
from .visualizations import *
|
72
|
-
from .help import *
|
61
|
+
print_update_notes(__update__)
|
@@ -40,7 +40,7 @@ def transfer_to_cpu(x, dtype=np.float32):
|
|
40
40
|
|
41
41
|
:return: NumPy array with the specified dtype
|
42
42
|
"""
|
43
|
-
from
|
43
|
+
from ui import loading_bars, initialize_loading_bar
|
44
44
|
try:
|
45
45
|
if isinstance(x, np.ndarray):
|
46
46
|
return x.astype(dtype) if x.dtype != dtype else x
|
pyerualjetwork/plan.py
CHANGED
@@ -3,8 +3,8 @@
|
|
3
3
|
|
4
4
|
MAIN MODULE FOR PLAN
|
5
5
|
|
6
|
-
PLAN document: https://github.com/HCB06/
|
7
|
-
|
6
|
+
PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
|
7
|
+
PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
8
8
|
|
9
9
|
@author: Hasan Can Beydili
|
10
10
|
@YouTube: https://www.youtube.com/@HasanCanBeydili
|
@@ -173,45 +173,61 @@ def fit(
|
|
173
173
|
return normalization(LTPW, dtype=dtype)
|
174
174
|
|
175
175
|
|
176
|
-
def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', batch_size=1,
|
176
|
+
def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
|
177
177
|
neural_web_history=False, show_current_activations=False, auto_normalization=True,
|
178
|
-
neurons_history=False,
|
179
|
-
early_stop=False, loss='categorical_crossentropy', show_history=False,
|
178
|
+
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
180
179
|
interval=33.33, target_acc=None, target_loss=None, except_this=None,
|
181
|
-
only_this=None,
|
180
|
+
only_this=None, start_this_act=None, start_this_W=None, target_fitness='max', dtype=np.float32):
|
182
181
|
"""
|
183
182
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
184
|
-
the most accurate combination of activation potentiation for the given dataset.
|
183
|
+
the most accurate combination of activation potentiation for the given dataset using genetic algorithm NEAT (Neuroevolution of Augmenting Topologies). But modifided for PLAN version. Created by me: PLANEAT.
|
185
184
|
|
185
|
+
Why genetic optimization and not backpropagation?
|
186
|
+
Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
|
187
|
+
Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
|
188
|
+
|
186
189
|
Args:
|
187
190
|
|
188
191
|
x_train (array-like): Training input data.
|
189
192
|
|
190
193
|
y_train (array-like): Labels for training data. one-hot encoded.
|
191
194
|
|
195
|
+
optimizator (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizator = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
196
|
+
```python
|
197
|
+
genetic_optimizator = lambda *args, **kwargs: planeat.evolve(*args,
|
198
|
+
activation_add_prob=0.85,
|
199
|
+
mutations=False,
|
200
|
+
strategy='cross_over',
|
201
|
+
**kwargs)
|
202
|
+
|
203
|
+
model = plan.learner(x_train,
|
204
|
+
y_train,
|
205
|
+
optimizator=genetic_optimizator,
|
206
|
+
strategy='accuracy',
|
207
|
+
show_history=True,
|
208
|
+
target_acc=0.94,
|
209
|
+
interval=16.67)
|
210
|
+
```
|
211
|
+
|
192
212
|
x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
|
193
213
|
|
194
214
|
y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
|
195
215
|
|
196
|
-
strategy (str, optional): Learning strategy. (options: 'accuracy', '
|
197
|
-
|
198
|
-
patience ((int, float), optional): patience value for adaptive strategies. For 'adaptive_accuracy' Default value: 5. For 'adaptive_loss' Default value: 0.150.
|
216
|
+
strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
|
199
217
|
|
200
|
-
|
218
|
+
gen (int, optional): The generation count for genetic optimization.
|
201
219
|
|
202
220
|
batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
|
203
221
|
|
204
222
|
auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
|
205
223
|
|
206
|
-
|
207
|
-
|
208
|
-
early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two depth stops learning.) Default is False.
|
224
|
+
early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
|
209
225
|
|
210
226
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
211
227
|
|
212
228
|
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
213
229
|
|
214
|
-
loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training
|
230
|
+
loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
215
231
|
|
216
232
|
interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
217
233
|
|
@@ -223,18 +239,23 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
223
239
|
|
224
240
|
only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
|
225
241
|
|
226
|
-
|
242
|
+
start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
243
|
+
|
244
|
+
start_this_W (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
227
245
|
|
228
246
|
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
229
247
|
|
230
248
|
neural_web_history (bool, optional): Draws history of neural web. Default is False.
|
231
249
|
|
250
|
+
target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
|
251
|
+
|
232
252
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
233
253
|
|
234
254
|
Returns:
|
235
255
|
tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
|
236
256
|
|
237
257
|
"""
|
258
|
+
|
238
259
|
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
|
239
260
|
|
240
261
|
activation_potentiation = all_activations()
|
@@ -273,30 +294,13 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
273
294
|
else:
|
274
295
|
data = 'Test'
|
275
296
|
|
276
|
-
if early_shifting != False:
|
277
|
-
shift_patience = early_shifting
|
278
|
-
|
279
|
-
# Strategy initialization
|
280
|
-
if strategy == 'adaptive_accuracy':
|
281
|
-
strategy = 'accuracy'
|
282
|
-
adaptive = True
|
283
|
-
if patience == None:
|
284
|
-
patience = 5
|
285
|
-
elif strategy == 'adaptive_loss':
|
286
|
-
strategy = 'loss'
|
287
|
-
adaptive = True
|
288
|
-
if patience == None:
|
289
|
-
patience = 0.150
|
290
|
-
else:
|
291
|
-
adaptive = False
|
292
|
-
|
293
297
|
# Filter activation functions
|
294
298
|
if only_this != None:
|
295
299
|
activation_potentiation = only_this
|
296
300
|
if except_this != None:
|
297
301
|
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
298
|
-
if
|
299
|
-
|
302
|
+
if gen is None:
|
303
|
+
gen = len(activation_potentiation)
|
300
304
|
|
301
305
|
# Initialize visualization components
|
302
306
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
@@ -306,19 +310,16 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
306
310
|
ncols = 76
|
307
311
|
else:
|
308
312
|
ncols = 89
|
309
|
-
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
310
313
|
|
311
314
|
# Initialize variables
|
312
|
-
|
313
|
-
if
|
314
|
-
best_activations = []
|
315
|
+
act_pop = []
|
316
|
+
if start_this_act is None and start_this_W is None:
|
315
317
|
best_acc = 0
|
316
318
|
else:
|
317
|
-
|
319
|
+
act_pop.append(start_this_act)
|
318
320
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
319
321
|
|
320
|
-
|
321
|
-
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=activations, dtype=dtype)
|
322
|
+
model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype)
|
322
323
|
|
323
324
|
if loss == 'categorical_crossentropy':
|
324
325
|
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
@@ -328,93 +329,101 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
328
329
|
best_acc = model[get_acc()]
|
329
330
|
best_loss = test_loss
|
330
331
|
|
331
|
-
|
332
|
+
best_acc_per_gen_list = []
|
332
333
|
postfix_dict = {}
|
333
334
|
loss_list = []
|
334
|
-
|
335
|
-
|
336
|
-
|
335
|
+
act_pop = []
|
336
|
+
weight_pop = []
|
337
|
+
target_pop = []
|
338
|
+
|
339
|
+
for i in range(len(activation_potentiation)):
|
340
|
+
print(f"\rPre-Run {i}/{len(activation_potentiation)}",end='')
|
341
|
+
|
342
|
+
if i == 0 and start_this_act is not None:
|
343
|
+
act_pop[0] = activation_potentiation[i]
|
344
|
+
weight_pop[0] = W
|
345
|
+
else:
|
346
|
+
act_pop.append(activation_potentiation[i])
|
347
|
+
weight_pop.append(W)
|
348
|
+
|
349
|
+
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
350
|
+
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
351
|
+
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=act_pop[-1], dtype=dtype)
|
352
|
+
|
353
|
+
if strategy == 'accuracy': target_pop.append(model[get_acc()])
|
354
|
+
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
355
|
+
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
356
|
+
|
357
|
+
if strategy == 'precision': target_pop.append(precision_score)
|
358
|
+
if strategy == 'recall': target_pop.append(recall_score)
|
359
|
+
if strategy == 'f1': target_pop.append(f1_score)
|
360
|
+
|
361
|
+
weight_pop, act_pop = optimizator(np.array(weight_pop, dtype=dtype), act_pop, 0, np.array(target_pop, dtype=dtype), target_fitness=target_fitness, bar_status=False)
|
362
|
+
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
363
|
+
|
364
|
+
for i in range(gen):
|
365
|
+
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
337
366
|
progress.set_postfix(postfix_dict)
|
338
367
|
|
339
368
|
progress.n = 0
|
340
369
|
progress.last_print_n = 0
|
341
370
|
progress.update(0)
|
371
|
+
|
372
|
+
weight_pop, act_pop = optimizator(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
373
|
+
target_pop = []
|
342
374
|
|
343
375
|
for j in range(len(activation_potentiation)):
|
344
|
-
for k in range(len(best_activations)):
|
345
|
-
activations.append(best_activations[k])
|
346
|
-
|
347
|
-
activations.append(activation_potentiation[j])
|
348
376
|
|
349
377
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
350
|
-
|
351
|
-
W = fit(x_train, y_train, activation_potentiation=activations, train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
352
|
-
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=activations, dtype=dtype)
|
378
|
+
model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
|
353
379
|
|
354
380
|
acc = model[get_acc()]
|
381
|
+
if strategy == 'accuracy': target_pop.append(acc)
|
355
382
|
|
356
|
-
|
357
|
-
if loss == 'categorical_crossentropy':
|
358
|
-
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
359
|
-
else:
|
360
|
-
test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
361
|
-
|
362
|
-
if i == 0 and j == 0 and start_this is None:
|
363
|
-
best_loss = test_loss
|
364
|
-
|
365
|
-
if strategy == 'f1' or strategy == 'precision' or strategy == 'recall' or strategy == 'all':
|
383
|
+
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
366
384
|
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
367
385
|
|
368
|
-
if strategy == 'precision'
|
386
|
+
if strategy == 'precision':
|
387
|
+
target_pop.append(precision_score)
|
388
|
+
|
369
389
|
if i == 0 and j == 0:
|
370
390
|
best_precision = precision_score
|
371
391
|
|
372
|
-
if strategy == 'recall'
|
392
|
+
if strategy == 'recall':
|
393
|
+
target_pop.append(recall_score)
|
394
|
+
|
373
395
|
if i == 0 and j == 0:
|
374
396
|
best_recall = recall_score
|
375
397
|
|
376
|
-
if strategy == 'f1'
|
398
|
+
if strategy == 'f1':
|
399
|
+
target_pop.append(f1_score)
|
400
|
+
|
377
401
|
if i == 0 and j == 0:
|
378
402
|
best_f1 = f1_score
|
379
403
|
|
380
|
-
if early_shifting != False:
|
381
|
-
if acc <= best_acc:
|
382
|
-
early_shifting -= 1
|
383
|
-
if early_shifting == 0:
|
384
|
-
early_shifting = shift_patience
|
385
|
-
break
|
386
|
-
|
387
404
|
if ((strategy == 'accuracy' and acc >= best_acc) or
|
388
|
-
(strategy == 'loss' and test_loss <= best_loss) or
|
389
405
|
(strategy == 'f1' and f1_score >= best_f1) or
|
390
406
|
(strategy == 'precision' and precision_score >= best_precision) or
|
391
|
-
(strategy == 'recall' and recall_score >= best_recall)
|
392
|
-
(strategy == 'all' and f1_score >= best_f1 and acc >= best_acc and
|
393
|
-
test_loss <= best_loss and precision_score >= best_precision and
|
394
|
-
recall_score >= best_recall)):
|
407
|
+
(strategy == 'recall' and recall_score >= best_recall)):
|
395
408
|
|
396
|
-
current_best_activation = activation_potentiation[j]
|
397
409
|
best_acc = acc
|
398
|
-
|
410
|
+
best_weights = weight_pop[j]
|
411
|
+
final_activations = act_pop[j]
|
412
|
+
best_model = model
|
413
|
+
|
399
414
|
if batch_size == 1:
|
400
415
|
postfix_dict[f"{data} Accuracy"] = best_acc
|
401
416
|
else:
|
402
417
|
postfix_dict[f"{data} Batch Accuracy"] = acc
|
403
418
|
progress.set_postfix(postfix_dict)
|
404
|
-
|
405
|
-
final_activations = activations
|
406
419
|
|
407
420
|
if show_current_activations:
|
408
421
|
print(f", Current Activations={final_activations}", end='')
|
409
422
|
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
if loss == 'categorical_crossentropy':
|
415
|
-
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
416
|
-
else:
|
417
|
-
test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
423
|
+
if loss == 'categorical_crossentropy':
|
424
|
+
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
425
|
+
else:
|
426
|
+
test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
418
427
|
|
419
428
|
if batch_size == 1:
|
420
429
|
postfix_dict[f"{data} Loss"] = test_loss
|
@@ -426,9 +435,9 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
426
435
|
|
427
436
|
# Update visualizations during training
|
428
437
|
if show_history:
|
429
|
-
|
430
|
-
update_history_plots_for_learner(viz_objects,
|
431
|
-
|
438
|
+
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
439
|
+
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [test_loss],
|
440
|
+
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
432
441
|
|
433
442
|
if neurons_history:
|
434
443
|
viz_objects['neurons']['artists'] = (
|
@@ -496,25 +505,13 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
496
505
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
497
506
|
|
498
507
|
progress.update(1)
|
499
|
-
activations = []
|
500
508
|
|
501
|
-
|
502
|
-
best_acc_per_depth_list.append(best_acc)
|
509
|
+
best_acc_per_gen_list.append(best_acc)
|
503
510
|
loss_list.append(best_loss)
|
504
511
|
|
505
|
-
# Check adaptive strategy conditions
|
506
|
-
if adaptive == True and strategy == 'accuracy' and i + 1 >= patience:
|
507
|
-
check = best_acc_per_depth_list[-patience:]
|
508
|
-
if all(x == check[0] for x in check):
|
509
|
-
strategy = 'loss'
|
510
|
-
adaptive = False
|
511
|
-
elif adaptive == True and strategy == 'loss' and best_loss <= patience:
|
512
|
-
strategy = 'accuracy'
|
513
|
-
adaptive = False
|
514
|
-
|
515
512
|
# Early stopping check
|
516
513
|
if early_stop == True and i > 0:
|
517
|
-
if
|
514
|
+
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
518
515
|
progress.close()
|
519
516
|
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
520
517
|
activation_potentiation=final_activations, dtype=dtype)
|
@@ -548,7 +545,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
548
545
|
else:
|
549
546
|
train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
550
547
|
|
551
|
-
print('\nActivations: ',
|
548
|
+
print('\nActivations: ', act_pop[-1])
|
552
549
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
553
550
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
554
551
|
if data == 'Test':
|
pyerualjetwork/plan_cuda.py
CHANGED
@@ -1,10 +1,10 @@
|
|
1
1
|
# -*- coding: utf-8 -*-
|
2
2
|
"""
|
3
3
|
|
4
|
-
MAIN MODULE FOR
|
4
|
+
MAIN MODULE FOR PLAN_CUDA
|
5
5
|
|
6
|
-
PLAN document: https://github.com/HCB06/
|
7
|
-
|
6
|
+
PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
|
7
|
+
PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
8
8
|
|
9
9
|
@author: Hasan Can Beydili
|
10
10
|
@YouTube: https://www.youtube.com/@HasanCanBeydili
|
@@ -24,6 +24,7 @@ from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
|
|
24
24
|
from .activation_functions_cuda import apply_activation, Softmax, all_activations
|
25
25
|
from .metrics_cuda import metrics
|
26
26
|
from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
|
27
|
+
from .memory_operations import transfer_to_gpu, transfer_to_cpu
|
27
28
|
from .visualizations_cuda import (
|
28
29
|
draw_neural_web,
|
29
30
|
update_neural_web_for_fit,
|
@@ -188,45 +189,63 @@ def fit(
|
|
188
189
|
return normalization(LTPW, dtype=dtype)
|
189
190
|
|
190
191
|
|
191
|
-
def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', batch_size=1,
|
192
|
+
def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
|
192
193
|
neural_web_history=False, show_current_activations=False, auto_normalization=True,
|
193
|
-
neurons_history=False,
|
194
|
-
early_stop=False, loss='categorical_crossentropy', show_history=False,
|
194
|
+
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
195
195
|
interval=33.33, target_acc=None, target_loss=None, except_this=None,
|
196
|
-
only_this=None,
|
196
|
+
only_this=None, start_this_act=None, start_this_W=None, target_fitness='max', dtype=cp.float32, memory='gpu'):
|
197
197
|
"""
|
198
198
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
199
199
|
the most accurate combination of activation potentiation for the given dataset.
|
200
200
|
|
201
|
+
Why genetic optimization and not backpropagation?
|
202
|
+
Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
|
203
|
+
Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
|
204
|
+
|
201
205
|
Args:
|
202
206
|
|
203
207
|
x_train (array-like): Training input data.
|
204
208
|
|
205
209
|
y_train (array-like): Labels for training data.
|
206
210
|
|
211
|
+
optimizator (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizator = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
212
|
+
```python
|
213
|
+
genetic_optimizator = lambda *args, **kwargs: planeat_cuda.evolve(*args,
|
214
|
+
activation_add_prob=0.85,
|
215
|
+
mutations=False,
|
216
|
+
strategy='cross_over',
|
217
|
+
**kwargs)
|
218
|
+
|
219
|
+
model = plan_cuda.learner(x_train,
|
220
|
+
y_train,
|
221
|
+
optimizator=genetic_optimizator,
|
222
|
+
strategy='accuracy',
|
223
|
+
show_history=True,
|
224
|
+
target_acc=0.94,
|
225
|
+
interval=16.67)
|
226
|
+
```
|
227
|
+
|
207
228
|
x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
|
208
229
|
|
209
230
|
y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
|
210
231
|
|
211
|
-
strategy (str, optional): Learning strategy. (options: 'accuracy', '
|
232
|
+
strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
|
212
233
|
|
213
234
|
patience ((int, float), optional): patience value for adaptive strategies. For 'adaptive_accuracy' Default value: 5. For 'adaptive_loss' Default value: 0.150.
|
214
235
|
|
215
|
-
|
236
|
+
gen (int, optional): The generation count for genetic optimization.
|
216
237
|
|
217
238
|
batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
|
218
239
|
|
219
240
|
auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
|
220
241
|
|
221
|
-
|
222
|
-
|
223
|
-
early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two depth stops learning.) Default is False.
|
242
|
+
early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
|
224
243
|
|
225
244
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
226
245
|
|
227
246
|
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
228
247
|
|
229
|
-
loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training
|
248
|
+
loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
230
249
|
|
231
250
|
interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
232
251
|
|
@@ -238,8 +257,10 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
238
257
|
|
239
258
|
only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
|
240
259
|
|
241
|
-
|
260
|
+
start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
242
261
|
|
262
|
+
start_this_W (cupy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
263
|
+
|
243
264
|
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
244
265
|
|
245
266
|
neural_web_history (bool, optional): Draws history of neural web. Default is False.
|
@@ -252,7 +273,6 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
252
273
|
tuple: A list for model parameters: [Weight matrix, Preds, Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
|
253
274
|
|
254
275
|
"""
|
255
|
-
from memory_operations import transfer_to_gpu, transfer_to_cpu
|
256
276
|
|
257
277
|
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
|
258
278
|
|
@@ -272,7 +292,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
272
292
|
x_test = transfer_to_gpu(x_test, dtype=dtype)
|
273
293
|
y_test = transfer_to_gpu(y_test, dtype=y_train.dtype)
|
274
294
|
|
275
|
-
from data_operations_cuda import batcher
|
295
|
+
from .data_operations_cuda import batcher
|
276
296
|
|
277
297
|
elif memory == 'cpu':
|
278
298
|
x_train = transfer_to_cpu(x_train, dtype=dtype)
|
@@ -281,57 +301,37 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
281
301
|
x_test = transfer_to_cpu(x_test, dtype=dtype)
|
282
302
|
y_test = transfer_to_cpu(y_test, dtype=y_train.dtype)
|
283
303
|
|
284
|
-
from data_operations import batcher
|
304
|
+
from .data_operations import batcher
|
285
305
|
|
286
306
|
else:
|
287
307
|
raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
|
288
308
|
|
289
|
-
|
290
|
-
if early_shifting != False:
|
291
|
-
shift_patience = early_shifting
|
292
|
-
|
293
|
-
# Strategy initialization
|
294
|
-
if strategy == 'adaptive_accuracy':
|
295
|
-
strategy = 'accuracy'
|
296
|
-
adaptive = True
|
297
|
-
if patience == None:
|
298
|
-
patience = 5
|
299
|
-
elif strategy == 'adaptive_loss':
|
300
|
-
strategy = 'loss'
|
301
|
-
adaptive = True
|
302
|
-
if patience == None:
|
303
|
-
patience = 0.150
|
304
|
-
else:
|
305
|
-
adaptive = False
|
306
|
-
|
307
309
|
# Filter activation functions
|
308
310
|
if only_this != None:
|
309
311
|
activation_potentiation = only_this
|
310
312
|
if except_this != None:
|
311
313
|
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
312
|
-
if
|
313
|
-
|
314
|
+
if gen is None:
|
315
|
+
gen = len(activation_potentiation)
|
314
316
|
|
315
317
|
# Initialize visualization components
|
316
318
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
317
319
|
|
318
320
|
# Initialize progress bar
|
319
321
|
if batch_size == 1:
|
320
|
-
ncols =
|
322
|
+
ncols = 76
|
321
323
|
else:
|
322
|
-
ncols =
|
323
|
-
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
324
|
+
ncols = 89
|
324
325
|
|
325
326
|
# Initialize variables
|
326
|
-
|
327
|
-
if
|
328
|
-
best_activations = []
|
327
|
+
act_pop = []
|
328
|
+
if start_this_act is None and start_this_W is None:
|
329
329
|
best_acc = 0
|
330
330
|
else:
|
331
|
-
|
331
|
+
act_pop.append(start_this_act)
|
332
332
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
333
|
-
|
334
|
-
model = evaluate(x_test_batch, y_test_batch, W=
|
333
|
+
|
334
|
+
model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype, memory=memory)
|
335
335
|
|
336
336
|
if loss == 'categorical_crossentropy':
|
337
337
|
test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
@@ -341,12 +341,42 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
341
341
|
best_acc = model[get_acc()]
|
342
342
|
best_loss = test_loss
|
343
343
|
|
344
|
-
|
344
|
+
best_acc_per_gen_list = []
|
345
345
|
postfix_dict = {}
|
346
346
|
loss_list = []
|
347
|
-
|
348
|
-
|
349
|
-
|
347
|
+
act_pop = []
|
348
|
+
weight_pop = []
|
349
|
+
target_pop = []
|
350
|
+
|
351
|
+
for i in range(len(activation_potentiation)):
|
352
|
+
print(f"\rPre-Run {i}/{len(activation_potentiation)}",end='')
|
353
|
+
|
354
|
+
if i == 0 and start_this_act is not None:
|
355
|
+
act_pop[0] = activation_potentiation[i]
|
356
|
+
weight_pop[0] = W
|
357
|
+
else:
|
358
|
+
act_pop.append(activation_potentiation[i])
|
359
|
+
weight_pop.append(W)
|
360
|
+
|
361
|
+
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
362
|
+
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype, memory=memory)
|
363
|
+
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=act_pop[-1], dtype=dtype, memory=memory)
|
364
|
+
|
365
|
+
if strategy == 'accuracy': target_pop.append(model[get_acc()])
|
366
|
+
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
367
|
+
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
368
|
+
|
369
|
+
if strategy == 'precision': target_pop.append(precision_score)
|
370
|
+
if strategy == 'recall': target_pop.append(recall_score)
|
371
|
+
if strategy == 'f1': target_pop.append(f1_score)
|
372
|
+
|
373
|
+
|
374
|
+
|
375
|
+
weight_pop, act_pop = optimizator(cp.array(weight_pop, dtype=dtype), act_pop, 0, cp.array(target_pop, dtype=dtype), target_fitness=target_fitness, bar_status=False)
|
376
|
+
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
377
|
+
|
378
|
+
for i in range(gen):
|
379
|
+
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
350
380
|
progress.set_postfix(postfix_dict)
|
351
381
|
|
352
382
|
progress.n = 0
|
@@ -354,79 +384,57 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
354
384
|
progress.update(0)
|
355
385
|
|
356
386
|
for j in range(len(activation_potentiation)):
|
357
|
-
for k in range(len(best_activations)):
|
358
|
-
activations.append(best_activations[k])
|
359
|
-
|
360
|
-
activations.append(activation_potentiation[j])
|
361
387
|
|
362
388
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
363
|
-
|
364
|
-
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=activations, dtype=dtype, memory=memory)
|
389
|
+
model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
|
365
390
|
|
366
391
|
acc = model[get_acc()]
|
392
|
+
if strategy == 'accuracy': target_pop.append(acc)
|
367
393
|
|
368
|
-
|
369
|
-
|
370
|
-
test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
371
|
-
else:
|
372
|
-
test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
394
|
+
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
395
|
+
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
373
396
|
|
374
|
-
if
|
375
|
-
|
397
|
+
if strategy == 'precision':
|
398
|
+
target_pop.append(precision_score)
|
376
399
|
|
377
|
-
if strategy == 'f1' or strategy == 'precision' or strategy == 'recall' or strategy == 'all':
|
378
|
-
precision_score, recall_score, f1_score = metrics(transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), model[get_preds()])
|
379
|
-
|
380
|
-
if strategy == 'precision' or strategy == 'all':
|
381
400
|
if i == 0 and j == 0:
|
382
401
|
best_precision = precision_score
|
383
402
|
|
384
|
-
if strategy == 'recall'
|
403
|
+
if strategy == 'recall':
|
404
|
+
target_pop.append(recall_score)
|
405
|
+
|
385
406
|
if i == 0 and j == 0:
|
386
407
|
best_recall = recall_score
|
387
408
|
|
388
|
-
if strategy == 'f1'
|
409
|
+
if strategy == 'f1':
|
410
|
+
target_pop.append(f1_score)
|
411
|
+
|
389
412
|
if i == 0 and j == 0:
|
390
413
|
best_f1 = f1_score
|
391
414
|
|
392
|
-
if early_shifting != False:
|
393
|
-
if acc <= best_acc:
|
394
|
-
early_shifting -= 1
|
395
|
-
if early_shifting == 0:
|
396
|
-
early_shifting = shift_patience
|
397
|
-
break
|
398
|
-
|
399
415
|
if ((strategy == 'accuracy' and acc >= best_acc) or
|
400
|
-
(strategy == 'loss' and test_loss <= best_loss) or
|
401
416
|
(strategy == 'f1' and f1_score >= best_f1) or
|
402
417
|
(strategy == 'precision' and precision_score >= best_precision) or
|
403
|
-
(strategy == 'recall' and recall_score >= best_recall)
|
404
|
-
(strategy == 'all' and f1_score >= best_f1 and acc >= best_acc and
|
405
|
-
test_loss <= best_loss and precision_score >= best_precision and
|
406
|
-
recall_score >= best_recall)):
|
418
|
+
(strategy == 'recall' and recall_score >= best_recall)):
|
407
419
|
|
408
|
-
current_best_activation = activation_potentiation[j]
|
409
420
|
best_acc = acc
|
410
|
-
|
421
|
+
best_weights = weight_pop[j]
|
422
|
+
final_activations = act_pop[j]
|
423
|
+
best_model = model
|
424
|
+
|
411
425
|
if batch_size == 1:
|
412
426
|
postfix_dict[f"{data} Accuracy"] = best_acc
|
413
427
|
else:
|
414
428
|
postfix_dict[f"{data} Batch Accuracy"] = acc
|
415
429
|
progress.set_postfix(postfix_dict)
|
416
|
-
|
417
|
-
final_activations = activations
|
418
430
|
|
419
431
|
if show_current_activations:
|
420
432
|
print(f", Current Activations={final_activations}", end='')
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
if loss == 'categorical_crossentropy':
|
427
|
-
test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
428
|
-
else:
|
429
|
-
test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
433
|
+
|
434
|
+
if loss == 'categorical_crossentropy':
|
435
|
+
test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
436
|
+
else:
|
437
|
+
test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
|
430
438
|
|
431
439
|
if batch_size == 1:
|
432
440
|
postfix_dict[f"{data} Loss"] = test_loss
|
@@ -438,9 +446,9 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
438
446
|
|
439
447
|
# Update visualizations during training
|
440
448
|
if show_history:
|
441
|
-
|
442
|
-
update_history_plots_for_learner(viz_objects,
|
443
|
-
|
449
|
+
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
450
|
+
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [test_loss],
|
451
|
+
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
444
452
|
|
445
453
|
if neurons_history:
|
446
454
|
viz_objects['neurons']['artists'] = (
|
@@ -508,28 +516,16 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
508
516
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
509
517
|
|
510
518
|
progress.update(1)
|
511
|
-
activations = []
|
512
519
|
|
513
|
-
|
514
|
-
best_acc_per_depth_list.append(best_acc)
|
520
|
+
best_acc_per_gen_list.append(best_acc)
|
515
521
|
loss_list.append(best_loss)
|
516
522
|
|
517
|
-
# Check adaptive strategy conditions
|
518
|
-
if adaptive == True and strategy == 'accuracy' and i + 1 >= patience:
|
519
|
-
check = best_acc_per_depth_list[-patience:]
|
520
|
-
if all(x == check[0] for x in check):
|
521
|
-
strategy = 'loss'
|
522
|
-
adaptive = False
|
523
|
-
elif adaptive == True and strategy == 'loss' and best_loss <= patience:
|
524
|
-
strategy = 'accuracy'
|
525
|
-
adaptive = False
|
526
|
-
|
527
523
|
# Early stopping check
|
528
524
|
if early_stop == True and i > 0:
|
529
|
-
if
|
525
|
+
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
530
526
|
progress.close()
|
531
527
|
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
532
|
-
activation_potentiation=final_activations, dtype=dtype)
|
528
|
+
activation_potentiation=final_activations, dtype=dtype, memory=memory)
|
533
529
|
|
534
530
|
if loss == 'categorical_crossentropy':
|
535
531
|
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
@@ -553,7 +549,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
553
549
|
# Final evaluation
|
554
550
|
progress.close()
|
555
551
|
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
556
|
-
activation_potentiation=final_activations, dtype=dtype)
|
552
|
+
activation_potentiation=final_activations, dtype=dtype, memory=memory)
|
557
553
|
|
558
554
|
if loss == 'categorical_crossentropy':
|
559
555
|
train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
pyerualjetwork/planeat.py
CHANGED
@@ -17,7 +17,7 @@ from tqdm import tqdm
|
|
17
17
|
### LIBRARY IMPORTS ###
|
18
18
|
from .plan import feed_forward
|
19
19
|
from .data_operations import normalization
|
20
|
-
from .ui import loading_bars
|
20
|
+
from .ui import loading_bars, initialize_loading_bar
|
21
21
|
from .activation_functions import apply_activation, all_activations
|
22
22
|
|
23
23
|
def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
|
@@ -69,7 +69,7 @@ def define_genomes(input_shape, output_shape, population_size, dtype=np.float32)
|
|
69
69
|
return np.array(population_weights, dtype=dtype), population_activations
|
70
70
|
|
71
71
|
|
72
|
-
def evolve(weights, activation_potentiations, what_gen,
|
72
|
+
def evolve(weights, activation_potentiations, what_gen, fitness, show_info=False, strategy='cross_over', bar_status=True, policy='normal_selective', target_fitness='max', mutations=True, bad_genoms_mutation_prob=None, activation_mutate_prob=0.5, save_best_genom=True, cross_over_mode='tpm', activation_add_prob=0.5, activation_delete_prob=0.5, activation_change_prob=0.5, weight_mutate_prob=1, weight_mutate_rate=32, activation_selection_add_prob=0.7, activation_selection_change_prob=0.5, activation_selection_rate=2, dtype=np.float32):
|
73
73
|
"""
|
74
74
|
Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
|
75
75
|
The function modifies the population's weights and activation functions based on a specified policy, mutation probabilities, and strategy.
|
@@ -83,8 +83,8 @@ Args:
|
|
83
83
|
|
84
84
|
what_gen (int): The current generation number, used for informational purposes or logging.
|
85
85
|
|
86
|
-
|
87
|
-
The array is used to rank the genomes based on their performance. PLANEAT maximizes
|
86
|
+
fitness (numpy.ndarray): A 1D array containing the fitness values of each genome.
|
87
|
+
The array is used to rank the genomes based on their performance. PLANEAT maximizes or minimizes this fitness for looking 'target' hyperparameter.
|
88
88
|
|
89
89
|
show_info (bool, optional): If True, prints information about the current generation and the
|
90
90
|
maximum reward obtained. Also shows the current configuration. Default is False.
|
@@ -96,12 +96,16 @@ Args:
|
|
96
96
|
(PLAN feature, similar to arithmetic crossover but different.)
|
97
97
|
Default is 'cross_over'.
|
98
98
|
|
99
|
+
bar_status (bool, optional): Loading bar status during evolving process of genomes. True or False. Default: True
|
100
|
+
|
99
101
|
policy (str, optional): The selection policy that governs how genomes are selected for reproduction. Options:
|
100
102
|
- 'normal_selective': Normal selection based on reward, where a portion of the bad genes are discarded.
|
101
103
|
- 'more_selective': A more selective policy, where fewer bad genes survive.
|
102
104
|
- 'less_selective': A less selective policy, where more bad genes survive.
|
103
105
|
Default is 'normal_selective'.
|
104
106
|
|
107
|
+
target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
|
108
|
+
|
105
109
|
mutations (bool, optional): If True, mutations are applied to the bad genomes and potentially
|
106
110
|
to the best genomes as well. Default is True.
|
107
111
|
|
@@ -163,7 +167,7 @@ Returns:
|
|
163
167
|
|
164
168
|
Notes:
|
165
169
|
- **Selection Process**:
|
166
|
-
- The genomes are sorted by their fitness (based on `
|
170
|
+
- The genomes are sorted by their fitness (based on `fitness`), and then split into "best" and "bad" halves.
|
167
171
|
- The best genomes are retained, and the bad genomes are modified based on the selected strategy.
|
168
172
|
|
169
173
|
- **Crossover and Potentiation Strategies**:
|
@@ -175,13 +179,13 @@ Notes:
|
|
175
179
|
- `bad_genoms_mutation_prob` determines the probability of applying mutations to the bad genomes.
|
176
180
|
- If `activation_mutate_prob` is provided, activation function mutations are applied to the genomes based on this probability.
|
177
181
|
|
178
|
-
- **Population Size**: The population size must be an even number to properly split the best and bad genomes. If `
|
182
|
+
- **Population Size**: The population size must be an even number to properly split the best and bad genomes. If `fitness` has an odd length, an error is raised.
|
179
183
|
|
180
184
|
- **Logging**: If `show_info=True`, the current generation and the maximum reward from the population are printed for tracking the learning progress.
|
181
185
|
|
182
186
|
Example:
|
183
187
|
```python
|
184
|
-
weights, activation_potentiations = learner(weights, activation_potentiations, 1,
|
188
|
+
weights, activation_potentiations = learner(weights, activation_potentiations, 1, fitness, info=True, strategy='cross_over', policy='normal_selective')
|
185
189
|
```
|
186
190
|
|
187
191
|
- The function returns the updated weights and activations after processing based on the chosen strategy, policy, and mutation parameters.
|
@@ -219,17 +223,19 @@ Example:
|
|
219
223
|
if not isinstance(activation_mutate_prob, float) or activation_mutate_prob < 0 or activation_mutate_prob > 1:
|
220
224
|
raise ValueError("activation_mutate_prob parameter must be float and 0-1 range")
|
221
225
|
|
222
|
-
if len(
|
223
|
-
slice_center = int(len(
|
226
|
+
if len(fitness) % 2 == 0:
|
227
|
+
slice_center = int(len(fitness) / 2)
|
224
228
|
|
225
229
|
else:
|
226
230
|
raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
|
227
231
|
|
228
|
-
|
232
|
+
|
233
|
+
### FITNESS IS SORTED IN ASCENDING (OR DESCENDING) ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
|
229
234
|
|
230
|
-
|
235
|
+
if target_fitness == 'max': sort_indices = np.argsort(fitness)
|
236
|
+
elif target_fitness == 'min': sort_indices = np.argsort(-fitness)
|
231
237
|
|
232
|
-
|
238
|
+
fitness = fitness[sort_indices]
|
233
239
|
weights = weights[sort_indices]
|
234
240
|
|
235
241
|
activation_potentiations = [activation_potentiations[i] for i in sort_indices]
|
@@ -249,7 +255,9 @@ Example:
|
|
249
255
|
|
250
256
|
bar_format = loading_bars()[0]
|
251
257
|
|
252
|
-
|
258
|
+
if bar_status: progress = initialize_loading_bar(len(bad_weights), desc="GENERATION: " + str(what_gen), bar_format=bar_format, ncols=50, ascii="▱▰")
|
259
|
+
|
260
|
+
for i in range(len(bad_weights)):
|
253
261
|
|
254
262
|
if policy == 'normal_selective':
|
255
263
|
|
@@ -313,7 +321,8 @@ Example:
|
|
313
321
|
|
314
322
|
elif mutation_prob < bad_genoms_mutation_prob:
|
315
323
|
bad_weights[i], bad_activations[i] = mutation(bad_weights[i], bad_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate, dtype=dtype)
|
316
|
-
|
324
|
+
|
325
|
+
if bar_status: progress.update(1)
|
317
326
|
|
318
327
|
weights = np.vstack((bad_weights, best_weights))
|
319
328
|
activation_potentiations = bad_activations + best_activations
|
@@ -344,9 +353,9 @@ Example:
|
|
344
353
|
print(" ACTIVATION SELECTION RATE (THRESHOLD VALUE FOR SINGLE CROSS OVER):", str(activation_selection_rate) + '\n')
|
345
354
|
|
346
355
|
print("*** Performance ***")
|
347
|
-
print(" MAX
|
348
|
-
print(" MEAN
|
349
|
-
print(" MIN
|
356
|
+
print(" MAX FITNESS: ", str(round(max(fitness), 2)))
|
357
|
+
print(" MEAN FITNESS: ", str(round(np.mean(fitness), 2)))
|
358
|
+
print(" MIN FITNESS: ", str(round(min(fitness), 2)) + '\n')
|
350
359
|
|
351
360
|
print(" BEST GENOME INDEX: ", str(len(weights)-1))
|
352
361
|
print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
|
pyerualjetwork/planeat_cuda.py
CHANGED
@@ -13,12 +13,12 @@ ANAPLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/
|
|
13
13
|
import cupy as cp
|
14
14
|
import numpy as np
|
15
15
|
import random
|
16
|
-
|
16
|
+
|
17
17
|
|
18
18
|
### LIBRARY IMPORTS ###
|
19
19
|
from .plan_cuda import feed_forward
|
20
20
|
from .data_operations_cuda import normalization
|
21
|
-
from .ui import loading_bars
|
21
|
+
from .ui import loading_bars, initialize_loading_bar
|
22
22
|
from .activation_functions_cuda import apply_activation, all_activations
|
23
23
|
|
24
24
|
def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32):
|
@@ -70,13 +70,13 @@ def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32)
|
|
70
70
|
return cp.array(population_weights, dtype=dtype), population_activations
|
71
71
|
|
72
72
|
|
73
|
-
def evolve(weights, activation_potentiations, what_gen,
|
73
|
+
def evolve(weights, activation_potentiations, what_gen, fitness, show_info=False, strategy='cross_over', bar_status=True, policy='normal_selective', target_fitness='max', mutations=True, bad_genoms_mutation_prob=None, activation_mutate_prob=0.5, save_best_genom=True, cross_over_mode='tpm', activation_add_prob=0.5, activation_delete_prob=0.5, activation_change_prob=0.5, weight_mutate_prob=1, weight_mutate_rate=32, activation_selection_add_prob=0.7, activation_selection_change_prob=0.5, activation_selection_rate=2, dtype=cp.float32):
|
74
74
|
"""
|
75
75
|
Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
|
76
76
|
The function modifies the population's weights and activation functions based on a specified policy, mutation probabilities, and strategy.
|
77
77
|
|
78
78
|
Args:
|
79
|
-
weights (
|
79
|
+
weights (cupy.ndarray): Array of weights for each genome.
|
80
80
|
(first returned value of define_genomes function)
|
81
81
|
|
82
82
|
activation_potentiations (list): A list of activation functions for each genome.
|
@@ -84,7 +84,7 @@ Args:
|
|
84
84
|
|
85
85
|
what_gen (int): The current generation number, used for informational purposes or logging.
|
86
86
|
|
87
|
-
|
87
|
+
fitness (cupy.ndarray): A 1D array containing the fitness or reward values of each genome.
|
88
88
|
The array is used to rank the genomes based on their performance. PLANEAT maximizes the reward.
|
89
89
|
|
90
90
|
show_info (bool, optional): If True, prints information about the current generation and the
|
@@ -97,12 +97,16 @@ Args:
|
|
97
97
|
(PLAN feature, similar to arithmetic crossover but different.)
|
98
98
|
Default is 'cross_over'.
|
99
99
|
|
100
|
+
bar_status (bool, optional): Loading bar status during evolving process of genomes. True or False. Default: True
|
101
|
+
|
100
102
|
policy (str, optional): The selection policy that governs how genomes are selected for reproduction. Options:
|
101
103
|
- 'normal_selective': Normal selection based on reward, where a portion of the bad genes are discarded.
|
102
104
|
- 'more_selective': A more selective policy, where fewer bad genes survive.
|
103
105
|
- 'less_selective': A less selective policy, where more bad genes survive.
|
104
106
|
Default is 'normal_selective'.
|
105
107
|
|
108
|
+
target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
|
109
|
+
|
106
110
|
mutations (bool, optional): If True, mutations are applied to the bad genomes and potentially
|
107
111
|
to the best genomes as well. Default is True.
|
108
112
|
|
@@ -164,7 +168,7 @@ Returns:
|
|
164
168
|
|
165
169
|
Notes:
|
166
170
|
- **Selection Process**:
|
167
|
-
- The genomes are sorted by their fitness (based on `
|
171
|
+
- The genomes are sorted by their fitness (based on `fitness`), and then split into "best" and "bad" halves.
|
168
172
|
- The best genomes are retained, and the bad genomes are modified based on the selected strategy.
|
169
173
|
|
170
174
|
- **Crossover and Potentiation Strategies**:
|
@@ -176,13 +180,13 @@ Notes:
|
|
176
180
|
- `bad_genoms_mutation_prob` determines the probability of applying mutations to the bad genomes.
|
177
181
|
- If `activation_mutate_prob` is provided, activation function mutations are applied to the genomes based on this probability.
|
178
182
|
|
179
|
-
- **Population Size**: The population size must be an even number to properly split the best and bad genomes. If `
|
183
|
+
- **Population Size**: The population size must be an even number to properly split the best and bad genomes. If `fitness` has an odd length, an error is raised.
|
180
184
|
|
181
185
|
- **Logging**: If `show_info=True`, the current generation and the maximum reward from the population are printed for tracking the learning progress.
|
182
186
|
|
183
187
|
Example:
|
184
188
|
```python
|
185
|
-
weights, activation_potentiations = learner(weights, activation_potentiations, 1,
|
189
|
+
weights, activation_potentiations = learner(weights, activation_potentiations, 1, fitness, info=True, strategy='cross_over', policy='normal_selective')
|
186
190
|
```
|
187
191
|
|
188
192
|
- The function returns the updated weights and activations after processing based on the chosen strategy, policy, and mutation parameters.
|
@@ -220,17 +224,20 @@ Example:
|
|
220
224
|
if not isinstance(activation_mutate_prob, float) or activation_mutate_prob < 0 or activation_mutate_prob > 1:
|
221
225
|
raise ValueError("activation_mutate_prob parameter must be float and 0-1 range")
|
222
226
|
|
223
|
-
if len(
|
224
|
-
slice_center = int(len(
|
227
|
+
if len(fitness) % 2 == 0:
|
228
|
+
slice_center = int(len(fitness) / 2)
|
225
229
|
|
226
230
|
else:
|
227
231
|
raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
|
228
232
|
|
229
|
-
sort_indices = cp.argsort(
|
233
|
+
sort_indices = cp.argsort(fitness)
|
230
234
|
|
231
|
-
###
|
235
|
+
### FITNESS LIST IS SORTED IN ASCENDING (OR DESCENDING) ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
|
232
236
|
|
233
|
-
|
237
|
+
if target_fitness == 'max': sort_indices = np.argsort(fitness)
|
238
|
+
elif target_fitness == 'min': sort_indices = np.argsort(-fitness)
|
239
|
+
|
240
|
+
fitness = fitness[sort_indices]
|
234
241
|
weights = weights[sort_indices]
|
235
242
|
|
236
243
|
activation_potentiations = [activation_potentiations[i] for i in sort_indices]
|
@@ -250,7 +257,9 @@ Example:
|
|
250
257
|
|
251
258
|
bar_format = loading_bars()[0]
|
252
259
|
|
253
|
-
|
260
|
+
if bar_status: progress = initialize_loading_bar(len(bad_weights), desc="GENERATION: " + str(what_gen), bar_format=bar_format, ncols=50, ascii="▱▰")
|
261
|
+
|
262
|
+
for i in range(len(bad_weights)):
|
254
263
|
|
255
264
|
if policy == 'normal_selective':
|
256
265
|
|
@@ -315,6 +324,7 @@ Example:
|
|
315
324
|
elif mutation_prob < bad_genoms_mutation_prob:
|
316
325
|
bad_weights[i], bad_activations[i] = mutation(bad_weights[i], bad_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate, dtype=dtype)
|
317
326
|
|
327
|
+
if bar_status: progress.update(1)
|
318
328
|
|
319
329
|
weights = cp.vstack((bad_weights, best_weights))
|
320
330
|
activation_potentiations = bad_activations + best_activations
|
@@ -345,9 +355,9 @@ Example:
|
|
345
355
|
print(" ACTIVATION SELECTION RATE (THRESHOLD VALUE FOR SINGLE CROSS OVER):", str(activation_selection_rate) + '\n')
|
346
356
|
|
347
357
|
print("*** Performance ***")
|
348
|
-
print(" MAX REWARD: ", str(round(max(
|
349
|
-
print(" MEAN REWARD: ", str(round(cp.mean(
|
350
|
-
print(" MIN REWARD: ", str(round(min(
|
358
|
+
print(" MAX REWARD: ", str(round(max(fitness), 2)))
|
359
|
+
print(" MEAN REWARD: ", str(round(cp.mean(fitness), 2)))
|
360
|
+
print(" MIN REWARD: ", str(round(min(fitness), 2)) + '\n')
|
351
361
|
|
352
362
|
print(" BEST GENOME INDEX: ", str(len(weights)-1))
|
353
363
|
print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
|
pyerualjetwork/visualizations.py
CHANGED
@@ -318,10 +318,12 @@ def draw_activations(x_train, activation):
|
|
318
318
|
|
319
319
|
elif activation == 'spiral':
|
320
320
|
result = af.spiral_activation(x_train)
|
321
|
-
|
322
|
-
return result
|
323
|
-
|
324
|
-
|
321
|
+
|
322
|
+
try: return result
|
323
|
+
except:
|
324
|
+
print('WARNING: error in drawing some activation.')
|
325
|
+
return x_train
|
326
|
+
|
325
327
|
def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation):
|
326
328
|
|
327
329
|
from .metrics import metrics, confusion_matrix, roc_curve
|
@@ -321,7 +321,10 @@ def draw_activations(x_train, activation):
|
|
321
321
|
elif activation == 'spiral':
|
322
322
|
result = af.spiral_activation(x_train)
|
323
323
|
|
324
|
-
return result
|
324
|
+
try: return result
|
325
|
+
except:
|
326
|
+
print('WARNING: error in drawing some activation.')
|
327
|
+
return x_train
|
325
328
|
|
326
329
|
|
327
330
|
def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.1.
|
3
|
+
Version: 4.1.8b0
|
4
4
|
Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,4 +1,4 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=WB6frZ8e1IWyGHYt3RE7bsg4yn7Ozrudnf6rk60_H3o,2177
|
2
2
|
pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
|
3
3
|
pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
|
4
4
|
pyerualjetwork/data_operations.py,sha256=ZM24BuPsIAtI0a_Exr4HgCjmlb285wEeO8juFY9sJr0,14680
|
@@ -6,19 +6,19 @@ pyerualjetwork/data_operations_cuda.py,sha256=UpoJoFhIwTU4xg9dVuLAxLAT4CkRaGsxvt
|
|
6
6
|
pyerualjetwork/help.py,sha256=OZghUy7GZTgEX_i3NYtgcpzUgCDOi6r2vVUF1ROkFiI,774
|
7
7
|
pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
8
8
|
pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
9
|
-
pyerualjetwork/memory_operations.py,sha256=
|
9
|
+
pyerualjetwork/memory_operations.py,sha256=_Wu9FJc6ozQTPOC2tXfXWPCwUIvPRuDjmLw_McntVSI,13470
|
10
10
|
pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
|
11
11
|
pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
|
12
12
|
pyerualjetwork/model_operations.py,sha256=hnhR8dtoICNJWIwGgJ65-LN3GYN_DYH4LMe6YpZVbnI,12967
|
13
13
|
pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
|
14
|
-
pyerualjetwork/plan.py,sha256=
|
15
|
-
pyerualjetwork/plan_cuda.py,sha256=
|
16
|
-
pyerualjetwork/planeat.py,sha256=
|
17
|
-
pyerualjetwork/planeat_cuda.py,sha256=
|
14
|
+
pyerualjetwork/plan.py,sha256=jE3xQUf2h8Vi0GshBktpn4Tyn3IiyJOo2GMZOTPsJQA,35359
|
15
|
+
pyerualjetwork/plan_cuda.py,sha256=xdD6CFYiyta93L8VWA9NGXcXjzFzvNYr29XSZ-9wZiQ,35996
|
16
|
+
pyerualjetwork/planeat.py,sha256=pVp8ndi5E_muwOTFmlcav70-5LLV5A2yA0_SgURvT08,40236
|
17
|
+
pyerualjetwork/planeat_cuda.py,sha256=3Vt5_zHUK4Jt_vW6LugQOy3to8gzQfT0_poPxeJTy68,40253
|
18
18
|
pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
|
19
|
-
pyerualjetwork/visualizations.py,sha256=
|
20
|
-
pyerualjetwork/visualizations_cuda.py,sha256=
|
21
|
-
pyerualjetwork-4.1.
|
22
|
-
pyerualjetwork-4.1.
|
23
|
-
pyerualjetwork-4.1.
|
24
|
-
pyerualjetwork-4.1.
|
19
|
+
pyerualjetwork/visualizations.py,sha256=QaYSIyVkJZ8NqpBKArQKkI1y37nCQo_KIM98IMssnRc,28766
|
20
|
+
pyerualjetwork/visualizations_cuda.py,sha256=9qw46Y4bo67l0nVVF1FSNS8ksyzbIAJdaPDFOhN5J8Y,29188
|
21
|
+
pyerualjetwork-4.1.8b0.dist-info/METADATA,sha256=ZW6TS4CJncDtHKSJSuOhiFgyy3vBZVqeExiVXcmcgAU,7795
|
22
|
+
pyerualjetwork-4.1.8b0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
23
|
+
pyerualjetwork-4.1.8b0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
24
|
+
pyerualjetwork-4.1.8b0.dist-info/RECORD,,
|
File without changes
|
File without changes
|