pyerualjetwork 4.2.0b6__py3-none-any.whl → 4.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,61 +1,11 @@
1
-
2
- import subprocess
3
- subprocess.check_call(["pip", "install", 'setuptools==75.6.0'])
4
- import pkg_resources
5
-
6
- print("Auto checking and installation dependencies for PyerualJetwork")
7
-
8
- package_names = [
9
- 'scipy==1.13.1',
10
- 'tqdm==4.66.4',
11
- 'seaborn==0.13.2',
12
- 'pandas==2.2.2',
13
- 'networkx==3.3',
14
- 'numpy==1.26.4',
15
- 'matplotlib==3.9.0',
16
- 'colorama==0.4.6',
17
- 'psutil==6.1.1',
18
- 'cupy-cuda12x==13.3.0'
19
- ]
20
-
21
- installed_packages = pkg_resources.working_set
22
- installed = {pkg.key: pkg.version for pkg in installed_packages}
23
- err = 0
24
-
25
- for package_name in package_names:
26
- package_name_only, required_version = package_name.split('==')
27
-
28
- if package_name_only not in installed:
29
-
30
- try:
31
- print(f"{package_name} Installing...")
32
- subprocess.check_call(["pip", "install", package_name])
33
- except Exception as e:
34
- err += 1
35
- print(f"Error installing {package_name} library, installation continues: {e}")
36
- else:
37
-
38
- installed_version = installed[package_name_only]
39
- if installed_version != required_version:
40
- print(f"Updating {package_name_only} from version {installed_version} to {required_version}...")
41
- try:
42
- subprocess.check_call(["pip", "install", package_name])
43
- except Exception as e:
44
- err += 1
45
- print(f"Error updating {package_name} library, installation continues: {e}")
46
- else:
47
- print(f"{package_name} ready.")
48
-
49
- print(f"PyerualJetwork is ready to use with {err} errors")
50
-
51
- __version__ = "4.2.0b6"
52
- __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
1
+ __version__ = "4.2.2"
2
+ __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
53
3
 
54
4
  def print_version(__version__):
55
5
  print(f"PyerualJetwork Version {__version__}" + '\n')
56
6
 
57
7
  def print_update_notes(__update__):
58
- print(f"Update Notes:\n{__update__}")
8
+ print(f"Notes:\n{__update__}")
59
9
 
60
10
  print_version(__version__)
61
11
  print_update_notes(__update__)
@@ -66,9 +66,13 @@ def split(X, y, test_size, random_state=42, dtype=np.float32):
66
66
 
67
67
  Args:
68
68
  X (numpy.ndarray): Features data.
69
+
69
70
  y (numpy.ndarray): Labels data.
71
+
70
72
  test_size (float or int): Proportion or number of samples for the test subset.
73
+
71
74
  random_state (int or None): Seed for random state. Default: 42.
75
+
72
76
  dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
73
77
 
74
78
  Returns:
pyerualjetwork/help.py CHANGED
@@ -1,4 +1,5 @@
1
- from .activation_functions import all_activations
1
+ from activation_functions import all_activations
2
+
2
3
 
3
4
  def activation_potentiation():
4
5
 
@@ -34,13 +34,13 @@ def transfer_to_cpu(x, dtype=np.float32):
34
34
  The `transfer_to_cpu` function converts data to a specified data type on the CPU, handling memory constraints
35
35
  by batching the conversion process and ensuring complete GPU memory cleanup.
36
36
 
37
- :param x: Input data to transfer to CPU (CuPy array)
37
+ param x: Input data to transfer to CPU (CuPy array)
38
38
 
39
- :param dtype: Target NumPy dtype for the output array (default: np.float32)
39
+ param dtype: Target NumPy dtype for the output array (default: np.float32)
40
40
 
41
- :return: NumPy array with the specified dtype
41
+ return: NumPy array with the specified dtype
42
42
  """
43
- from ui import loading_bars, initialize_loading_bar
43
+ from .ui import loading_bars, initialize_loading_bar
44
44
  try:
45
45
  if isinstance(x, np.ndarray):
46
46
  return x.astype(dtype) if x.dtype != dtype else x
@@ -334,8 +334,8 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
334
334
  ndarray: Output from the model.
335
335
  """
336
336
 
337
- from .data_operations import standard_scaler
338
- from .plan import feed_forward
337
+ from data_operations import standard_scaler
338
+ from plan import feed_forward
339
339
 
340
340
  Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
341
341
 
pyerualjetwork/plan.py CHANGED
@@ -16,7 +16,6 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
16
16
  """
17
17
 
18
18
  import numpy as np
19
- from colorama import Fore
20
19
  import math
21
20
 
22
21
  ### LIBRARY IMPORTS ###
@@ -174,11 +173,11 @@ def fit(
174
173
  return normalization(LTPW, dtype=dtype)
175
174
 
176
175
 
177
- def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
176
+ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
178
177
  neural_web_history=False, show_current_activations=False, auto_normalization=True,
179
178
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
180
- interval=33.33, target_acc=None, target_loss=None, except_this=None,
181
- only_this=None, start_this_act=None, start_this_W=None, target_fitness='max', dtype=np.float32):
179
+ interval=33.33, target_acc=None, target_loss=None,
180
+ start_this_act=None, start_this_W=None, dtype=np.float32):
182
181
  """
183
182
  Optimizes the activation functions for a neural network by leveraging train data to find
184
183
  the most accurate combination of activation potentiation for the given dataset using genetic algorithm NEAT (Neuroevolution of Augmenting Topologies). But modifided for PLAN version. Created by me: PLANEAT.
@@ -195,34 +194,34 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
195
194
 
196
195
  optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
197
196
  ```python
198
- genetic_optimizer = lambda *args, **kwargs: planeat.evolve(*args,
197
+ genetic_optimizer = lambda *args, **kwargs: planeat.evolver(*args,
199
198
  activation_add_prob=0.85,
200
- mutations=False,
201
- strategy='cross_over',
199
+ mutations=True,
200
+ strategy='aggressive',
202
201
  **kwargs)
203
202
 
204
203
  model = plan.learner(x_train,
205
204
  y_train,
206
205
  optimizer=genetic_optimizer,
206
+ fit_start=True,
207
207
  strategy='accuracy',
208
208
  show_history=True,
209
- target_acc=0.94,
209
+ gen=15,
210
+ batch_size=0.05,
210
211
  interval=16.67)
211
212
  ```
212
213
 
213
- x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
214
-
215
- y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
214
+ fit_start (bool): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. The fit_start parameter is MANDATORY and must be provided.
216
215
 
217
216
  strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
218
217
 
219
218
  gen (int, optional): The generation count for genetic optimization.
220
219
 
221
- batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
220
+ batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
222
221
 
223
- auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
222
+ early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
224
223
 
225
- early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
224
+ auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
226
225
 
227
226
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
228
227
 
@@ -235,10 +234,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
235
234
  target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
236
235
 
237
236
  target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
238
-
239
- except_this (list, optional): A list of activations to exclude from optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
240
-
241
- only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
242
237
 
243
238
  start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
244
239
 
@@ -247,8 +242,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
247
242
  neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
248
243
 
249
244
  neural_web_history (bool, optional): Draws history of neural web. Default is False.
250
-
251
- target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
252
245
 
253
246
  dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
254
247
 
@@ -257,37 +250,25 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
257
250
 
258
251
  """
259
252
 
260
- print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
253
+ from .planeat import define_genomes
261
254
 
262
- activation_potentiation = all_activations()
255
+ data = 'Train'
263
256
 
257
+ activation_potentiation = all_activations()
258
+ activation_potentiation_len = len(activation_potentiation)
259
+
264
260
  # Pre-checks
265
261
 
266
262
  x_train = x_train.astype(dtype, copy=False)
267
263
  y_train = optimize_labels(y_train, cuda=False)
268
264
 
269
- if x_test is None and y_test is None:
270
- x_test = x_train
271
- y_test = y_train
272
- data = 'Train'
273
- else:
274
- x_test = x_test.astype(dtype, copy=False)
275
- y_test = optimize_labels(y_test, cuda=False)
276
- data = 'Test'
277
-
278
- # Filter activation functions
279
- if only_this is not None:
280
- activation_potentiation = only_this
281
- if except_this is not None:
282
- activation_potentiation = [item for item in activation_potentiation if item not in except_this]
283
265
  if gen is None:
284
- gen = len(activation_potentiation)
266
+ gen = activation_potentiation_len
285
267
 
286
268
  if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
269
+ if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
270
+ if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
287
271
 
288
- if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
289
- if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
290
-
291
272
  # Initialize visualization components
292
273
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
293
274
 
@@ -298,33 +279,27 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
298
279
  ncols = 89
299
280
 
300
281
  # Initialize variables
301
- act_pop = []
302
- weight_pop = []
303
-
304
- if start_this_act is None and start_this_W is None:
305
- best_acc = 0
306
- else:
307
- act_pop.append(start_this_act)
308
- weight_pop.append(start_this_W)
309
-
310
- x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
311
-
312
- model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype)
313
-
314
- if loss == 'categorical_crossentropy':
315
- test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
316
- else:
317
- test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
318
-
319
- best_acc = model[get_acc()]
320
- best_loss = test_loss
321
-
282
+ best_acc = 0
283
+ best_f1 = 0
284
+ best_recall = 0
285
+ best_precision = 0
322
286
  best_acc_per_gen_list = []
323
287
  postfix_dict = {}
324
288
  loss_list = []
325
289
  target_pop = []
326
290
 
327
- progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
291
+ progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
292
+
293
+ if fit_start is False:
294
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len, dtype=dtype)
295
+
296
+ if start_this_act is not None and start_this_W is not None:
297
+ weight_pop[0] = start_this_W
298
+ act_pop[0] = start_this_act
299
+
300
+ else:
301
+ weight_pop = []
302
+ act_pop = []
328
303
 
329
304
  for i in range(gen):
330
305
  postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
@@ -334,22 +309,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
334
309
  progress.last_print_n = 0
335
310
  progress.update(0)
336
311
 
337
- for j in range(len(activation_potentiation)):
312
+ for j in range(activation_potentiation_len):
338
313
 
339
- x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
314
+ x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
340
315
 
341
- if i == 0:
316
+ if fit_start is True and i == 0:
342
317
  act_pop.append(activation_potentiation[j])
343
- W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
318
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
344
319
  weight_pop.append(W)
345
-
346
- model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
320
+
321
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
347
322
  acc = model[get_acc()]
348
323
 
349
324
  if strategy == 'accuracy': target_pop.append(acc)
350
325
 
351
326
  elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
352
- precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
327
+ precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
353
328
 
354
329
  if strategy == 'precision':
355
330
  target_pop.append(precision_score)
@@ -391,22 +366,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
391
366
  print(f", Current Activations={final_activations}", end='')
392
367
 
393
368
  if loss == 'categorical_crossentropy':
394
- test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
369
+ train_loss = categorical_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
395
370
  else:
396
- test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
371
+ train_loss = binary_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
397
372
 
398
373
  if batch_size == 1:
399
- postfix_dict[f"{data} Loss"] = test_loss
400
- best_loss = test_loss
374
+ postfix_dict[f"{data} Loss"] = train_loss
375
+ best_loss = train_loss
401
376
  else:
402
- postfix_dict[f"{data} Batch Loss"] = test_loss
377
+ postfix_dict[f"{data} Batch Loss"] = train_loss
403
378
  progress.set_postfix(postfix_dict)
404
- best_loss = test_loss
379
+ best_loss = train_loss
405
380
 
406
381
  # Update visualizations during training
407
382
  if show_history:
408
383
  gen_list = range(1, len(best_acc_per_gen_list) + 2)
409
- update_history_plots_for_learner(viz_objects, gen_list, loss_list + [test_loss],
384
+ update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
410
385
  best_acc_per_gen_list + [best_acc], x_train, final_activations)
411
386
 
412
387
  if neurons_history:
@@ -415,7 +390,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
415
390
  viz_objects['neurons']['row'], viz_objects['neurons']['col'],
416
391
  y_train[0], viz_objects['neurons']['artists'],
417
392
  data=data, fig1=viz_objects['neurons']['fig'],
418
- acc=best_acc, loss=test_loss)
393
+ acc=best_acc, loss=train_loss)
419
394
  )
420
395
 
421
396
  if neural_web_history:
@@ -440,13 +415,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
440
415
  print('\nActivations: ', final_activations)
441
416
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
442
417
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
443
- if data == 'Test':
444
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
445
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
446
418
 
447
419
  # Display final visualizations
448
420
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
449
- test_loss, y_train, interval)
421
+ train_loss, y_train, interval)
450
422
  return best_weights, best_model[get_preds()], best_acc, final_activations
451
423
 
452
424
  # Check target loss
@@ -465,13 +437,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
465
437
  print('\nActivations: ', final_activations)
466
438
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
467
439
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
468
- if data == 'Test':
469
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
470
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
471
440
 
472
441
  # Display final visualizations
473
442
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
474
- test_loss, y_train, interval)
443
+ train_loss, y_train, interval)
475
444
  return best_weights, best_model[get_preds()], best_acc, final_activations
476
445
 
477
446
  progress.update(1)
@@ -479,7 +448,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
479
448
  best_acc_per_gen_list.append(best_acc)
480
449
  loss_list.append(best_loss)
481
450
 
482
- weight_pop, act_pop = optimizer(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
451
+ weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
483
452
  target_pop = []
484
453
 
485
454
  # Early stopping check
@@ -499,13 +468,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
499
468
  print('\nActivations: ', final_activations)
500
469
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
501
470
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
502
- if data == 'Test':
503
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
504
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
505
471
 
506
472
  # Display final visualizations
507
473
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
508
- test_loss, y_train, interval)
474
+ train_loss, y_train, interval)
509
475
  return best_weights, best_model[get_preds()], best_acc, final_activations
510
476
 
511
477
  # Final evaluation
@@ -518,15 +484,12 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
518
484
  else:
519
485
  train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
520
486
 
521
- print('\nActivations: ', act_pop[-1])
487
+ print('\nActivations: ', final_activations)
522
488
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
523
489
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
524
- if data == 'Test':
525
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
526
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
527
490
 
528
491
  # Display final visualizations
529
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval)
492
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
530
493
  return best_weights, best_model[get_preds()], best_acc, final_activations
531
494
 
532
495
 
@@ -621,12 +584,12 @@ def evaluate(
621
584
  if y_test.dtype != np.uint32:
622
585
  y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
623
586
 
624
- predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=np.float32)
625
- real_classes = np.empty(len(x_test), dtype=np.int32)
626
- predict_classes = np.empty(len(x_test), dtype=np.int32)
587
+ predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=dtype)
588
+ real_classes = np.empty(len(x_test), dtype=y_test.dtype)
589
+ predict_classes = np.empty(len(x_test), dtype=y_test.dtype)
627
590
 
628
591
  true_predict = 0
629
- acc_list = np.empty(len(x_test), dtype=np.float32)
592
+ acc_list = np.empty(len(x_test), dtype=dtype)
630
593
 
631
594
  if loading_bar_status:
632
595
  loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
@@ -16,7 +16,6 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
16
16
  """
17
17
 
18
18
  import cupy as cp
19
- from colorama import Fore
20
19
  import math
21
20
 
22
21
  ### LIBRARY IMPORTS ###
@@ -189,11 +188,11 @@ def fit(
189
188
  return normalization(LTPW, dtype=dtype)
190
189
 
191
190
 
192
- def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
191
+ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
193
192
  neural_web_history=False, show_current_activations=False, auto_normalization=True,
194
193
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
195
- interval=33.33, target_acc=None, target_loss=None, except_this=None,
196
- only_this=None, start_this_act=None, start_this_W=None, target_fitness='max', dtype=cp.float32, memory='gpu'):
194
+ interval=33.33, target_acc=None, target_loss=None,
195
+ start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
197
196
  """
198
197
  Optimizes the activation functions for a neural network by leveraging train data to find
199
198
  the most accurate combination of activation potentiation for the given dataset.
@@ -210,36 +209,33 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
210
209
 
211
210
  optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
212
211
  ```python
213
- genetic_optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args,
212
+ genetic_optimizer = lambda *args, **kwargs: planeat_cuda.evolver(*args,
214
213
  activation_add_prob=0.85,
215
- mutations=False,
216
- strategy='cross_over',
214
+ mutations=True,
215
+ strategy='aggressive',
217
216
  **kwargs)
218
217
 
219
218
  model = plan_cuda.learner(x_train,
220
- y_train,
221
- optimizer=genetic_optimizer,
222
- strategy='accuracy',
223
- show_history=True,
224
- target_acc=0.94,
225
- interval=16.67)
219
+ y_train,
220
+ optimizer=genetic_optimizer,
221
+ fit_start=True,
222
+ strategy='accuracy',
223
+ show_history=True,
224
+ gen=15,
225
+ batch_size=0.05,
226
+ interval=16.67)
226
227
  ```
227
-
228
- x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
229
-
230
- y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
231
-
228
+ fit_start (bool): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. The fit_start parameter is MANDATORY and must be provided.
229
+
232
230
  strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
233
231
 
234
- patience ((int, float), optional): patience value for adaptive strategies. For 'adaptive_accuracy' Default value: 5. For 'adaptive_loss' Default value: 0.150.
235
-
236
232
  gen (int, optional): The generation count for genetic optimization.
237
233
 
238
- batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
234
+ batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
239
235
 
240
- auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
236
+ early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
241
237
 
242
- early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
238
+ auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
243
239
 
244
240
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
245
241
 
@@ -252,10 +248,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
252
248
  target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
253
249
 
254
250
  target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
255
-
256
- except_this (list, optional): A list of activations to exclude from optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
257
-
258
- only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
259
251
 
260
252
  start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
261
253
 
@@ -274,90 +266,65 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
274
266
 
275
267
  """
276
268
 
277
- print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
269
+ from .planeat_cuda import define_genomes
270
+
271
+ data = 'Train'
278
272
 
279
273
  activation_potentiation = all_activations()
274
+ activation_potentiation_len = len(activation_potentiation)
280
275
 
281
276
  y_train = optimize_labels(y_train, cuda=True)
282
277
 
283
- if x_test is None and y_test is None:
284
- x_test = x_train
285
- y_test = y_train
286
- data = 'Train'
287
- else:
288
- data = 'Test'
289
- y_test = optimize_labels(y_test, cuda=True)
290
-
291
278
  if memory == 'gpu':
292
279
  x_train = transfer_to_gpu(x_train, dtype=dtype)
293
280
  y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
294
281
 
295
- x_test = transfer_to_gpu(x_test, dtype=dtype)
296
- y_test = transfer_to_gpu(y_test, dtype=y_train.dtype)
297
-
298
282
  from .data_operations_cuda import batcher
299
283
 
300
284
  elif memory == 'cpu':
301
285
  x_train = transfer_to_cpu(x_train, dtype=dtype)
302
286
  y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
303
287
 
304
- x_test = transfer_to_cpu(x_test, dtype=dtype)
305
- y_test = transfer_to_cpu(y_test, dtype=y_train.dtype)
306
-
307
288
  from .data_operations import batcher
308
289
 
309
290
  else:
310
291
  raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
311
292
 
312
293
  if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
294
+ if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
295
+ if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
313
296
 
314
- # Filter activation functions
315
- if only_this is not None:
316
- activation_potentiation = only_this
317
- if except_this is not None:
318
- activation_potentiation = [item for item in activation_potentiation if item not in except_this]
319
- if gen is None:
320
- gen = len(activation_potentiation)
321
-
322
- if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
323
- if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
324
-
325
297
  # Initialize visualization components
326
298
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
327
299
 
328
300
  # Initialize progress bar
329
301
  if batch_size == 1:
330
- ncols = 86
302
+ ncols = 76
331
303
  else:
332
- ncols = 99
304
+ ncols = 89
333
305
 
334
306
  # Initialize variables
335
- act_pop = []
336
- weight_pop = []
337
-
338
- if start_this_act is None and start_this_W is None:
339
- best_acc = 0
340
- else:
341
- act_pop.append(start_this_act)
342
- weight_pop.append(start_this_W)
343
-
344
- x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
345
- model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype, memory=memory)
346
-
347
- if loss == 'categorical_crossentropy':
348
- test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
349
- else:
350
- test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
351
-
352
- best_acc = model[get_acc()]
353
- best_loss = test_loss
354
-
307
+ best_acc = 0
308
+ best_f1 = 0
309
+ best_recall = 0
310
+ best_precision = 0
355
311
  best_acc_per_gen_list = []
356
312
  postfix_dict = {}
357
313
  loss_list = []
358
314
  target_pop = []
359
315
 
360
- progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
316
+ progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
317
+
318
+ if fit_start is False:
319
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len, dtype=dtype)
320
+
321
+ if start_this_act is not None and start_this_W is not None:
322
+ weight_pop[0] = start_this_W
323
+ act_pop[0] = start_this_act
324
+
325
+ else:
326
+ weight_pop = []
327
+ act_pop = []
361
328
 
362
329
  for i in range(gen):
363
330
  postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
@@ -367,22 +334,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
367
334
  progress.last_print_n = 0
368
335
  progress.update(0)
369
336
 
370
- for j in range(len(activation_potentiation)):
337
+ for j in range(activation_potentiation_len):
371
338
 
372
- x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
373
-
374
- if i == 0:
339
+ x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
340
+
341
+ if fit_start is True and i == 0:
375
342
  act_pop.append(activation_potentiation[j])
376
- W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
343
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
377
344
  weight_pop.append(W)
378
-
379
- model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
380
-
345
+
346
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
381
347
  acc = model[get_acc()]
348
+
382
349
  if strategy == 'accuracy': target_pop.append(acc)
383
350
 
384
351
  elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
385
- precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
352
+ precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
386
353
 
387
354
  if strategy == 'precision':
388
355
  target_pop.append(precision_score)
@@ -415,31 +382,31 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
415
382
  final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
416
383
 
417
384
  if batch_size == 1:
418
- postfix_dict[f"{data} Accuracy"] = best_acc
385
+ postfix_dict[f"{data} Accuracy"] = cp.round(best_acc, 3)
419
386
  else:
420
- postfix_dict[f"{data} Batch Accuracy"] = acc
387
+ postfix_dict[f"{data} Batch Accuracy"] = cp.round(best_acc, 3)
421
388
  progress.set_postfix(postfix_dict)
422
389
 
423
390
  if show_current_activations:
424
391
  print(f", Current Activations={final_activations}", end='')
425
392
 
426
393
  if loss == 'categorical_crossentropy':
427
- test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
394
+ train_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
428
395
  else:
429
- test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
396
+ train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
430
397
 
431
398
  if batch_size == 1:
432
- postfix_dict[f"{data} Loss"] = test_loss
433
- best_loss = test_loss
399
+ postfix_dict[f"{data} Loss"] = cp.round(train_loss, 3)
400
+ best_loss = train_loss
434
401
  else:
435
- postfix_dict[f"{data} Batch Loss"] = test_loss
402
+ postfix_dict[f"{data} Batch Loss"] = cp.round(train_loss, 3)
436
403
  progress.set_postfix(postfix_dict)
437
- best_loss = test_loss
404
+ best_loss = train_loss
438
405
 
439
406
  # Update visualizations during training
440
407
  if show_history:
441
408
  gen_list = range(1, len(best_acc_per_gen_list) + 2)
442
- update_history_plots_for_learner(viz_objects, gen_list, loss_list + [test_loss],
409
+ update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
443
410
  best_acc_per_gen_list + [best_acc], x_train, final_activations)
444
411
 
445
412
  if neurons_history:
@@ -448,7 +415,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
448
415
  viz_objects['neurons']['row'], viz_objects['neurons']['col'],
449
416
  y_train[0], viz_objects['neurons']['artists'],
450
417
  data=data, fig1=viz_objects['neurons']['fig'],
451
- acc=best_acc, loss=test_loss)
418
+ acc=best_acc, loss=train_loss)
452
419
  )
453
420
 
454
421
  if neural_web_history:
@@ -473,13 +440,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
473
440
  print('\nActivations: ', final_activations)
474
441
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
475
442
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
476
- if data == 'Test':
477
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
478
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
479
-
443
+
480
444
  # Display final visualizations
481
445
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
482
- test_loss, y_train, interval)
446
+ train_loss, y_train, interval)
483
447
  return best_weights, best_model[get_preds()], best_acc, final_activations
484
448
 
485
449
  # Check target loss
@@ -498,13 +462,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
498
462
  print('\nActivations: ', final_activations)
499
463
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
500
464
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
501
- if data == 'Test':
502
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
503
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
504
465
 
505
466
  # Display final visualizations
506
467
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
507
- test_loss, y_train, interval)
468
+ train_loss, y_train, interval)
508
469
  return best_weights, best_model[get_preds()], best_acc, final_activations
509
470
 
510
471
  progress.update(1)
@@ -512,7 +473,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
512
473
  best_acc_per_gen_list.append(best_acc)
513
474
  loss_list.append(best_loss)
514
475
 
515
- weight_pop, act_pop = optimizer(cp.array(weight_pop, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
476
+ weight_pop, act_pop = optimizer(cp.array(weight_pop, copy=False, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), bar_status=False)
516
477
  target_pop = []
517
478
 
518
479
  # Early stopping check
@@ -532,13 +493,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
532
493
  print('\nActivations: ', final_activations)
533
494
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
534
495
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
535
- if data == 'Test':
536
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
537
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
538
496
 
539
497
  # Display final visualizations
540
498
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
541
- test_loss, y_train, interval)
499
+ train_loss, y_train, interval)
542
500
  return best_weights, best_model[get_preds()], best_acc, final_activations
543
501
 
544
502
  # Final evaluation
@@ -554,12 +512,9 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
554
512
  print('\nActivations: ', final_activations)
555
513
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
556
514
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
557
- if data == 'Test':
558
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
559
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
560
515
 
561
516
  # Display final visualizations
562
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval)
517
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
563
518
  return best_weights, best_model[get_preds()], best_acc, final_activations
564
519
 
565
520
 
pyerualjetwork/planeat.py CHANGED
@@ -190,7 +190,7 @@ def evolver(weights,
190
190
 
191
191
  Raises:
192
192
  ValueError:
193
- - If `policy` is not one of the specified values ('aggresive', 'explorer').
193
+ - If `policy` is not one of the specified values ('aggressive', 'explorer').
194
194
  - If 'strategy' is not one of the specified values ('less_selective', 'normal_selective', 'more_selective')
195
195
  - If `cross_over_mode` is not one of the specified values ('tpm').
196
196
  - If `bad_genomes_mutation_prob`, `activation_mutate_prob`, or other probability parameters are not in the range 0 and 1.
@@ -222,7 +222,7 @@ def evolver(weights,
222
222
 
223
223
  Example:
224
224
  ```python
225
- weights, activation_potentiations = planeat.evolver(weights, activation_potentiations, 1, fitness, show_info=True, strategy='normal_selective', policy='aggresive')
225
+ weights, activation_potentiations = planeat.evolver(weights, activation_potentiations, 1, fitness, show_info=True, strategy='normal_selective', policy='aggressive')
226
226
  ```
227
227
 
228
228
  - The function returns the updated weights and activations after processing based on the chosen strategy, policy, and mutation parameters.
@@ -247,8 +247,6 @@ def evolver(weights,
247
247
 
248
248
  else:
249
249
  raise ValueError("strategy parameter must be: 'normal_selective' or 'more_selective' or 'less_selective'")
250
-
251
- if policy == 'explorer': fitness_bias = 0
252
250
 
253
251
  if ((activation_mutate_add_prob < 0 or activation_mutate_add_prob > 1) or
254
252
  (activation_mutate_change_prob < 0 or activation_mutate_change_prob > 1) or
@@ -328,7 +326,7 @@ def evolver(weights,
328
326
  cross_over_mode=cross_over_mode,
329
327
  activation_selection_add_prob=activation_selection_add_prob,
330
328
  activation_selection_change_prob=activation_selection_change_prob,
331
- activation_selection_rate=activation_selection_rate,
329
+ threshold=activation_selection_rate,
332
330
  bad_genomes_selection_prob=bad_genomes_selection_prob,
333
331
  first_parent_fitness=best_fitness,
334
332
  fitness_bias=fitness_bias,
@@ -351,8 +349,7 @@ def evolver(weights,
351
349
  activation_change_prob=activation_mutate_change_prob,
352
350
  weight_mutate_prob=weight_mutate_prob,
353
351
  threshold=weight_mutate_rate,
354
- genome_fitness=normalized_fitness[i],
355
- epsilon=epsilon
352
+ genome_fitness=normalized_fitness[i]
356
353
  )
357
354
 
358
355
  elif mutation_prob < bad_genomes_mutation_prob:
@@ -364,10 +361,9 @@ def evolver(weights,
364
361
  activation_change_prob=activation_mutate_change_prob,
365
362
  weight_mutate_prob=weight_mutate_prob,
366
363
  threshold=weight_mutate_rate,
367
- genome_fitness=normalized_fitness[i],
368
- epsilon=epsilon
364
+ genome_fitness=normalized_fitness[i]
369
365
  )
370
-
366
+
371
367
  if bar_status: progress.update(1)
372
368
 
373
369
  weights = np.vstack((bad_weights, good_weights))
@@ -397,11 +393,13 @@ def evolver(weights,
397
393
  print(" FITNESS BIAS: ", str(fitness_bias))
398
394
  print(" ACTIVATION SELECTION RATE (THRESHOLD VALUE FOR SINGLE CROSS OVER):", str(activation_selection_rate) + '\n')
399
395
 
396
+
400
397
  print("*** Performance ***")
401
398
  print(" MAX FITNESS: ", str(round(max(fitness), 2)))
402
399
  print(" MEAN FITNESS: ", str(round(np.mean(fitness), 2)))
403
400
  print(" MIN FITNESS: ", str(round(min(fitness), 2)) + '\n')
404
401
 
402
+ print(" BEST GENOME ACTIVATION LENGTH: ", str(len(activation_potentiations[-1])))
405
403
  print(" BEST GENOME INDEX: ", str(len(weights)-1))
406
404
  print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
407
405
 
@@ -487,7 +485,7 @@ def cross_over(first_parent_W,
487
485
  cross_over_mode,
488
486
  activation_selection_add_prob,
489
487
  activation_selection_change_prob,
490
- activation_selection_rate,
488
+ threshold,
491
489
  bad_genomes_selection_prob,
492
490
  first_parent_fitness,
493
491
  second_parent_fitness,
@@ -516,7 +514,7 @@ def cross_over(first_parent_W,
516
514
  activation_selection_change_prob (float): Probability of replacing an activation function in the child genome
517
515
  with one from the second parent.
518
516
 
519
- activation_selection_rate (float): Determines how quickly activation functions are added or replaced
517
+ threshold (float): Determines how quickly activation functions are added or replaced
520
518
  during the crossover process.
521
519
 
522
520
  bad_genomes_selection_prob (float): Probability of selecting a "bad" genome for replacement with the offspring.
@@ -549,12 +547,12 @@ def cross_over(first_parent_W,
549
547
  cross_over_mode='tpm',
550
548
  activation_selection_add_prob=0.8,
551
549
  activation_selection_change_prob=0.5,
552
- activation_selection_rate=0.1,
550
+ threshold=2,
553
551
  bad_genomes_selection_prob=0.7,
554
552
  first_parent_fitness=0.9,
555
553
  second_parent_fitness=0.85,
556
554
  fitness_bias=0.6,
557
- epsilon=np.finfo.eps
555
+ epsilon=np.finfo(float).eps
558
556
  )
559
557
  ```
560
558
  """
@@ -624,8 +622,8 @@ def cross_over(first_parent_W,
624
622
 
625
623
  if potential_activation_selection_add > activation_selection_add_prob:
626
624
 
627
- activation_selection_rate = activation_selection_rate / succes
628
- new_threshold = activation_selection_rate
625
+ threshold = threshold / succes
626
+ new_threshold = threshold
629
627
 
630
628
  while True:
631
629
 
@@ -635,7 +633,7 @@ def cross_over(first_parent_W,
635
633
  child_act.append(random_undominant_activation)
636
634
 
637
635
  if len(dominant_parent_act) > new_threshold:
638
- new_threshold += activation_selection_rate
636
+ new_threshold += threshold
639
637
  pass
640
638
 
641
639
  else:
@@ -646,8 +644,8 @@ def cross_over(first_parent_W,
646
644
 
647
645
  if potential_activation_selection_change_prob > activation_selection_change_prob:
648
646
 
649
- activation_selection_rate = activation_selection_rate / succes
650
- new_threshold = activation_selection_rate
647
+ threshold = threshold / succes
648
+ new_threshold = threshold
651
649
 
652
650
  while True:
653
651
 
@@ -658,7 +656,7 @@ def cross_over(first_parent_W,
658
656
  child_act[random_index_dominant] = random_undominant_activation
659
657
 
660
658
  if len(dominant_parent_act) > new_threshold:
661
- new_threshold += activation_selection_rate
659
+ new_threshold += threshold
662
660
  pass
663
661
 
664
662
  else:
@@ -674,8 +672,8 @@ def mutation(weight,
674
672
  activation_change_prob,
675
673
  weight_mutate_prob,
676
674
  threshold,
677
- genome_fitness,
678
- epsilon):
675
+ genome_fitness
676
+ ):
679
677
  """
680
678
  Performs mutation on the given weight matrix and activation functions.
681
679
  - The weight matrix is mutated by randomly changing its values based on the mutation probability.
@@ -699,8 +697,6 @@ def mutation(weight,
699
697
  threshold (float): If the value you enter here is equal to the result of input layer * output layer, only a single weight will be mutated during each mutation process. If the value you enter here is half of the result of input layer * output layer, two weights in the weight matrix will be mutated.
700
698
 
701
699
  genome_fitness (float): Fitness value of genome
702
-
703
- epsilon (float): Small epsilon constant
704
700
 
705
701
  Returns:
706
702
  tuple: A tuple containing:
@@ -730,7 +726,8 @@ def mutation(weight,
730
726
  row_end = weight.shape[0]
731
727
  col_end = weight.shape[1]
732
728
 
733
- threshold = threshold * (genome_fitness + epsilon)
729
+ threshold = threshold * genome_fitness
730
+ performance_control = 0
734
731
  new_threshold = threshold
735
732
 
736
733
  while True:
@@ -742,10 +739,14 @@ def mutation(weight,
742
739
 
743
740
  if int(row_end * col_end) > new_threshold:
744
741
  new_threshold += threshold
742
+ performance_control += 1
745
743
  pass
746
744
 
747
745
  else:
748
746
  break
747
+
748
+ if performance_control >= int(row_end * col_end):
749
+ break
749
750
 
750
751
  activation_mutate_prob = 1 - activation_mutate_prob
751
752
  potential_activation_mutation = random.uniform(0, 1)
@@ -248,8 +248,6 @@ def evolver(weights,
248
248
 
249
249
  else:
250
250
  raise ValueError("strategy parameter must be: 'normal_selective' or 'more_selective' or 'less_selective'")
251
-
252
- if policy =='explorer': fitness_bias = 0
253
251
 
254
252
  if ((activation_mutate_add_prob < 0 or activation_mutate_add_prob > 1) or
255
253
  (activation_mutate_change_prob < 0 or activation_mutate_change_prob > 1) or
@@ -328,7 +326,7 @@ def evolver(weights,
328
326
  cross_over_mode=cross_over_mode,
329
327
  activation_selection_add_prob=activation_selection_add_prob,
330
328
  activation_selection_change_prob=activation_selection_change_prob,
331
- activation_selection_rate=activation_selection_rate,
329
+ threshold=activation_selection_rate,
332
330
  bad_genomes_selection_prob=bad_genomes_selection_prob,
333
331
  first_parent_fitness=best_fitness,
334
332
  fitness_bias=fitness_bias,
@@ -351,8 +349,7 @@ def evolver(weights,
351
349
  activation_change_prob=activation_mutate_change_prob,
352
350
  weight_mutate_prob=weight_mutate_prob,
353
351
  threshold=weight_mutate_rate,
354
- genome_fitness=normalized_fitness[i],
355
- epsilon=epsilon
352
+ genome_fitness=normalized_fitness[i]
356
353
  )
357
354
 
358
355
  elif mutation_prob < bad_genomes_mutation_prob:
@@ -364,8 +361,7 @@ def evolver(weights,
364
361
  activation_change_prob=activation_mutate_change_prob,
365
362
  weight_mutate_prob=weight_mutate_prob,
366
363
  threshold=weight_mutate_rate,
367
- genome_fitness=normalized_fitness[i],
368
- epsilon=epsilon
364
+ genome_fitness=normalized_fitness[i]
369
365
  )
370
366
 
371
367
  if bar_status: progress.update(1)
@@ -401,6 +397,7 @@ def evolver(weights,
401
397
  print(" MEAN REWARD: ", str(cp.round(cp.mean(fitness), 2)))
402
398
  print(" MIN REWARD: ", str(cp.round(min(fitness), 2)) + '\n')
403
399
 
400
+ print(" BEST GENOME ACTIVATION LENGTH: ", str(len(activation_potentiations)))
404
401
  print(" BEST GENOME INDEX: ", str(len(weights)-1))
405
402
  print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
406
403
 
@@ -490,7 +487,7 @@ def cross_over(first_parent_W,
490
487
  cross_over_mode,
491
488
  activation_selection_add_prob,
492
489
  activation_selection_change_prob,
493
- activation_selection_rate,
490
+ threshold,
494
491
  bad_genomes_selection_prob,
495
492
  first_parent_fitness,
496
493
  second_parent_fitness,
@@ -519,7 +516,7 @@ def cross_over(first_parent_W,
519
516
  activation_selection_change_prob (float): Probability of replacing an activation function in the child genome
520
517
  with one from the second parent.
521
518
 
522
- activation_selection_rate (float): Determines how quickly activation functions are added or replaced
519
+ threshold (float): Determines how quickly activation functions are added or replaced
523
520
  during the crossover process.
524
521
 
525
522
  bad_genomes_selection_prob (float): Probability of selecting a "bad" genome for replacement with the offspring.
@@ -552,12 +549,12 @@ def cross_over(first_parent_W,
552
549
  cross_over_mode='tpm',
553
550
  activation_selection_add_prob=0.8,
554
551
  activation_selection_change_prob=0.5,
555
- activation_selection_rate=0.1,
552
+ threshold=2,
556
553
  bad_genomes_selection_prob=0.7,
557
554
  first_parent_fitness=0.9,
558
555
  second_parent_fitness=0.85,
559
556
  fitness_bias=0.6,
560
- epsilon=cp.finfo.eps
557
+ epsilon=cp.finfo(float).eps
561
558
  )
562
559
  ```
563
560
  """
@@ -628,8 +625,8 @@ def cross_over(first_parent_W,
628
625
 
629
626
  if potential_activation_selection_add > activation_selection_add_prob:
630
627
 
631
- activation_selection_rate = activation_selection_rate / succes
632
- new_threshold = activation_selection_rate
628
+ threshold = threshold / succes
629
+ new_threshold = threshold
633
630
 
634
631
  while True:
635
632
 
@@ -639,7 +636,7 @@ def cross_over(first_parent_W,
639
636
  child_act.append(random_undominant_activation)
640
637
 
641
638
  if len(dominant_parent_act) > new_threshold:
642
- new_threshold += activation_selection_rate
639
+ new_threshold += threshold
643
640
  pass
644
641
 
645
642
  else:
@@ -650,8 +647,8 @@ def cross_over(first_parent_W,
650
647
 
651
648
  if potential_activation_selection_change_prob > activation_selection_change_prob:
652
649
 
653
- activation_selection_rate = activation_selection_rate / succes
654
- new_threshold = activation_selection_rate
650
+ threshold = threshold / succes
651
+ new_threshold = threshold
655
652
 
656
653
  while True:
657
654
 
@@ -662,7 +659,7 @@ def cross_over(first_parent_W,
662
659
  child_act[random_index_dominant] = random_undominant_activation
663
660
 
664
661
  if len(dominant_parent_act) > new_threshold:
665
- new_threshold += activation_selection_rate
662
+ new_threshold += threshold
666
663
  pass
667
664
 
668
665
  else:
@@ -679,8 +676,7 @@ def mutation(weight,
679
676
  activation_change_prob,
680
677
  weight_mutate_prob,
681
678
  threshold,
682
- genome_fitness,
683
- epsilon):
679
+ genome_fitness):
684
680
  """
685
681
  Performs mutation on the given weight matrix and activation functions.
686
682
  - The weight matrix is mutated by randomly changing its values based on the mutation probability.
@@ -705,7 +701,6 @@ def mutation(weight,
705
701
 
706
702
  genome_fitness (float): Fitness value of genome
707
703
 
708
- epsilon (float): Small epsilon constant
709
704
 
710
705
  Returns:
711
706
  tuple: A tuple containing:
@@ -735,9 +730,10 @@ def mutation(weight,
735
730
  row_end = weight.shape[0]
736
731
  col_end = weight.shape[1]
737
732
 
738
- threshold = threshold * (genome_fitness + epsilon)
733
+ threshold = threshold * genome_fitness
739
734
  new_threshold = threshold
740
-
735
+ performance_control = 0
736
+
741
737
  while True:
742
738
 
743
739
  selected_row = int(random.uniform(start, row_end))
@@ -747,10 +743,14 @@ def mutation(weight,
747
743
 
748
744
  if int(row_end * col_end) > new_threshold:
749
745
  new_threshold += threshold
746
+ performance_control += 1
750
747
  pass
751
748
 
752
749
  else:
753
750
  break
751
+
752
+ if performance_control >= int(row_end * col_end):
753
+ break
754
754
 
755
755
  activation_mutate_prob = 1 - activation_mutate_prob
756
756
  potential_activation_mutation = random.uniform(0, 1)
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.2.0b6
4
- Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
3
+ Version: 4.2.2
4
+ Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classification,potentiation learning artificial neural networks,NEAT,genetic algorithms,reinforcement learning,neural networks
@@ -0,0 +1,24 @@
1
+ pyerualjetwork/__init__.py,sha256=UMtN3yCygX5xQf18GGEkRF7UerSr7DM54nxBu7kvc7U,639
2
+ pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
3
+ pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
4
+ pyerualjetwork/data_operations.py,sha256=pb5CqJ0Th6fCjTNMCtqQMiwH3KezTxAijacglsKUxmY,14730
5
+ pyerualjetwork/data_operations_cuda.py,sha256=UpoJoFhIwTU4xg9dVuLAxLAT4CkRaGsxvtJG9j1xrNo,17629
6
+ pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
7
+ pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
+ pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
+ pyerualjetwork/memory_operations.py,sha256=I7QiZ--xSyRkFF0wcckPwZV7K9emEvyx5aJ3DiRHZFI,13468
10
+ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
11
+ pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
+ pyerualjetwork/model_operations.py,sha256=RKqnh7-MByFosxqme4q4jC1lOndX26O-OVXYV6ZxoEE,12965
13
+ pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
14
+ pyerualjetwork/plan.py,sha256=YOBF2CqGu400Zk6xuraP0X8WzMNyejpZc5tdVV4dEvE,32219
15
+ pyerualjetwork/plan_cuda.py,sha256=OKK0pmJYLQd5-dJ1aLiyWiZRBmoUp1zBkFxRcxnWBVI,33610
16
+ pyerualjetwork/planeat.py,sha256=hMSyrSPipOxKgOqyoAiZtniVgxPQxc4rRsvEEMOS2Ng,40757
17
+ pyerualjetwork/planeat_cuda.py,sha256=9uopmM-gTZpSb0EOExrOZPT8FF5BqDdEfCX0zYQb9QU,40712
18
+ pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
+ pyerualjetwork/visualizations.py,sha256=QaYSIyVkJZ8NqpBKArQKkI1y37nCQo_KIM98IMssnRc,28766
20
+ pyerualjetwork/visualizations_cuda.py,sha256=F60vQ92AXlMgBka3InXnOtGoM25vQJAlBIU2AlYTwks,29200
21
+ pyerualjetwork-4.2.2.dist-info/METADATA,sha256=cUOm0ZcV8oZqpFZ7RuUWnt8nRcBFhSIFH1GNfFDPF4c,7912
22
+ pyerualjetwork-4.2.2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
+ pyerualjetwork-4.2.2.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
+ pyerualjetwork-4.2.2.dist-info/RECORD,,
@@ -1,24 +0,0 @@
1
- pyerualjetwork/__init__.py,sha256=Kw8tdzAvdhcMyujbsOF0VTVKE5wB-uqKBZ4AH8iNlcQ,2177
2
- pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
3
- pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
4
- pyerualjetwork/data_operations.py,sha256=HjyW2QE18age6J8iG0jpbwqGOylL_nM-vE2CLbP9Wes,14690
5
- pyerualjetwork/data_operations_cuda.py,sha256=UpoJoFhIwTU4xg9dVuLAxLAT4CkRaGsxvtJG9j1xrNo,17629
6
- pyerualjetwork/help.py,sha256=OZghUy7GZTgEX_i3NYtgcpzUgCDOi6r2vVUF1ROkFiI,774
7
- pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
- pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
- pyerualjetwork/memory_operations.py,sha256=_Wu9FJc6ozQTPOC2tXfXWPCwUIvPRuDjmLw_McntVSI,13470
10
- pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
11
- pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
- pyerualjetwork/model_operations.py,sha256=hnhR8dtoICNJWIwGgJ65-LN3GYN_DYH4LMe6YpZVbnI,12967
13
- pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
14
- pyerualjetwork/plan.py,sha256=EobwajGSIgbOujkzDKb-Kea0LGRHqpK3Xy1Le8VBAe8,34422
15
- pyerualjetwork/plan_cuda.py,sha256=iCcAHLzVw_VyjhkFHXzBWiedwbnpI1MCXNJgSDgZxWw,36065
16
- pyerualjetwork/planeat.py,sha256=EtmOUCRmkXGuSj35fU5Y-gvBsRodVlMsEgvbrXIzY2A,40997
17
- pyerualjetwork/planeat_cuda.py,sha256=XkFbQF7pmPRWCQTacbmIab8yWkq-6S3dM-N1ehxzSvk,40963
18
- pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
- pyerualjetwork/visualizations.py,sha256=QaYSIyVkJZ8NqpBKArQKkI1y37nCQo_KIM98IMssnRc,28766
20
- pyerualjetwork/visualizations_cuda.py,sha256=F60vQ92AXlMgBka3InXnOtGoM25vQJAlBIU2AlYTwks,29200
21
- pyerualjetwork-4.2.0b6.dist-info/METADATA,sha256=-m0uyY5sgobRog9idkhbeSHS5l9h_qDoKywhbG2sAes,7795
22
- pyerualjetwork-4.2.0b6.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
- pyerualjetwork-4.2.0b6.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
- pyerualjetwork-4.2.0b6.dist-info/RECORD,,