pyerualjetwork 4.1.8b5__py3-none-any.whl → 4.1.8b7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -48,7 +48,7 @@ for package_name in package_names:
48
48
 
49
49
  print(f"PyerualJetwork is ready to use with {err} errors")
50
50
 
51
- __version__ = "4.1.8b5"
51
+ __version__ = "4.1.8b7"
52
52
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
53
53
 
54
54
  def print_version(__version__):
pyerualjetwork/plan.py CHANGED
@@ -171,7 +171,7 @@ def fit(
171
171
  return normalization(LTPW, dtype=dtype)
172
172
 
173
173
 
174
- def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
174
+ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
175
175
  neural_web_history=False, show_current_activations=False, auto_normalization=True,
176
176
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
177
177
  interval=33.33, target_acc=None, target_loss=None, except_this=None,
@@ -190,9 +190,9 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
190
190
 
191
191
  y_train (array-like): Labels for training data. one-hot encoded.
192
192
 
193
- optimizator (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizator = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
193
+ optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
194
194
  ```python
195
- genetic_optimizator = lambda *args, **kwargs: planeat.evolve(*args,
195
+ genetic_optimizer = lambda *args, **kwargs: planeat.evolve(*args,
196
196
  activation_add_prob=0.85,
197
197
  mutations=False,
198
198
  strategy='cross_over',
@@ -200,7 +200,7 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
200
200
 
201
201
  model = plan.learner(x_train,
202
202
  y_train,
203
- optimizator=genetic_optimizator,
203
+ optimizer=genetic_optimizer,
204
204
  strategy='accuracy',
205
205
  show_history=True,
206
206
  target_acc=0.94,
@@ -337,7 +337,6 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
337
337
  target_pop = []
338
338
 
339
339
  for i in range(len(activation_potentiation)):
340
- print(f"\rPre-Run {i}/{len(activation_potentiation)}",end='')
341
340
 
342
341
  if i == 0 and start_this_act is not None:
343
342
  act_pop[0] = activation_potentiation[i]
@@ -363,7 +362,8 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
363
362
  if strategy == 'recall': target_pop.append(recall_score)
364
363
  if strategy == 'f1': target_pop.append(f1_score)
365
364
 
366
- weight_pop, act_pop = optimizator(np.array(weight_pop, dtype=dtype), act_pop, 0, np.array(target_pop, dtype=dtype), target_fitness=target_fitness, bar_status=False)
365
+ print(f"\rPre-Fit {i}/{len(activation_potentiation)}, {data} {strategy}: {target_pop[-1]}",end='')
366
+
367
367
  progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
368
368
 
369
369
  for i in range(gen):
@@ -374,7 +374,7 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
374
374
  progress.last_print_n = 0
375
375
  progress.update(0)
376
376
 
377
- weight_pop, act_pop = optimizator(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
377
+ weight_pop, act_pop = optimizer(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
378
378
  target_pop = []
379
379
 
380
380
  for j in range(len(activation_potentiation)):
@@ -14,6 +14,7 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
14
14
  """
15
15
 
16
16
  import cupy as cp
17
+ import numpy as np
17
18
  from colorama import Fore
18
19
  import math
19
20
 
@@ -187,7 +188,7 @@ def fit(
187
188
  return normalization(LTPW, dtype=dtype)
188
189
 
189
190
 
190
- def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
191
+ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
191
192
  neural_web_history=False, show_current_activations=False, auto_normalization=True,
192
193
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
193
194
  interval=33.33, target_acc=None, target_loss=None, except_this=None,
@@ -206,9 +207,9 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
206
207
 
207
208
  y_train (array-like): Labels for training data.
208
209
 
209
- optimizator (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizator = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
210
+ optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
210
211
  ```python
211
- genetic_optimizator = lambda *args, **kwargs: planeat_cuda.evolve(*args,
212
+ genetic_optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args,
212
213
  activation_add_prob=0.85,
213
214
  mutations=False,
214
215
  strategy='cross_over',
@@ -216,7 +217,7 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
216
217
 
217
218
  model = plan_cuda.learner(x_train,
218
219
  y_train,
219
- optimizator=genetic_optimizator,
220
+ optimizer=genetic_optimizer,
220
221
  strategy='accuracy',
221
222
  show_history=True,
222
223
  target_acc=0.94,
@@ -317,9 +318,9 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
317
318
 
318
319
  # Initialize progress bar
319
320
  if batch_size == 1:
320
- ncols = 76
321
+ ncols = 86
321
322
  else:
322
- ncols = 89
323
+ ncols = 99
323
324
 
324
325
  # Initialize variables
325
326
  act_pop = []
@@ -332,7 +333,6 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
332
333
  weight_pop.append(start_this_W)
333
334
 
334
335
  x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
335
-
336
336
  model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype, memory=memory)
337
337
 
338
338
  if loss == 'categorical_crossentropy':
@@ -349,7 +349,6 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
349
349
  target_pop = []
350
350
 
351
351
  for i in range(len(activation_potentiation)):
352
- print(f"\rPre-Run {i}/{len(activation_potentiation)}",end='')
353
352
 
354
353
  if i == 0 and start_this_act is not None:
355
354
  act_pop[0] = activation_potentiation[i]
@@ -375,9 +374,8 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
375
374
  if strategy == 'recall': target_pop.append(recall_score)
376
375
  if strategy == 'f1': target_pop.append(f1_score)
377
376
 
378
-
377
+ print(f"\rPre-Fit {i}/{len(activation_potentiation)}, {data} {strategy}: {target_pop[-1]}",end='')
379
378
 
380
- weight_pop, act_pop = optimizator(cp.array(weight_pop, dtype=dtype), act_pop, 0, cp.array(target_pop, dtype=dtype), target_fitness=target_fitness, bar_status=False)
381
379
  progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
382
380
 
383
381
  for i in range(gen):
@@ -388,6 +386,9 @@ def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='a
388
386
  progress.last_print_n = 0
389
387
  progress.update(0)
390
388
 
389
+ weight_pop, act_pop = optimizer(cp.array(weight_pop, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
390
+ target_pop = []
391
+
391
392
  for j in range(len(activation_potentiation)):
392
393
 
393
394
  x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
@@ -230,17 +230,15 @@ Example:
230
230
  else:
231
231
  raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
232
232
 
233
- sort_indices = cp.argsort(fitness)
234
-
235
233
  ### FITNESS LIST IS SORTED IN ASCENDING (OR DESCENDING) ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
236
234
 
237
- if target_fitness == 'max': sort_indices = np.argsort(fitness)
238
- elif target_fitness == 'min': sort_indices = np.argsort(-fitness)
235
+ if target_fitness == 'max': sort_indices = cp.argsort(fitness)
236
+ elif target_fitness == 'min': sort_indices = cp.argsort(-fitness)
239
237
 
240
238
  fitness = fitness[sort_indices]
241
239
  weights = weights[sort_indices]
242
240
 
243
- activation_potentiations = [activation_potentiations[i] for i in sort_indices]
241
+ activation_potentiations = [activation_potentiations[int(i)] for i in sort_indices]
244
242
 
245
243
  ### GENOMES ARE DIVIDED INTO TWO GROUPS: GOOD GENOMES AND BAD GENOMES:
246
244
 
@@ -355,9 +353,9 @@ Example:
355
353
  print(" ACTIVATION SELECTION RATE (THRESHOLD VALUE FOR SINGLE CROSS OVER):", str(activation_selection_rate) + '\n')
356
354
 
357
355
  print("*** Performance ***")
358
- print(" MAX REWARD: ", str(round(max(fitness), 2)))
359
- print(" MEAN REWARD: ", str(round(cp.mean(fitness), 2)))
360
- print(" MIN REWARD: ", str(round(min(fitness), 2)) + '\n')
356
+ print(" MAX REWARD: ", str(cp.round(max(fitness), 2)))
357
+ print(" MEAN REWARD: ", str(cp.round(cp.mean(fitness), 2)))
358
+ print(" MIN REWARD: ", str(cp.round(min(fitness), 2)) + '\n')
361
359
 
362
360
  print(" BEST GENOME INDEX: ", str(len(weights)-1))
363
361
  print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.1.8b5
3
+ Version: 4.1.8b7
4
4
  Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -1,4 +1,4 @@
1
- pyerualjetwork/__init__.py,sha256=TjNWEkaJUK3BCnAwlidyDnpk-xNRJG1jekVTq5IB5vk,2177
1
+ pyerualjetwork/__init__.py,sha256=1nfR13uyGk9RA12z_vzmGRlH3he4Y2fX4bDnLnZgmYY,2177
2
2
  pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
3
3
  pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
4
4
  pyerualjetwork/data_operations.py,sha256=ZM24BuPsIAtI0a_Exr4HgCjmlb285wEeO8juFY9sJr0,14680
@@ -11,14 +11,14 @@ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,607
11
11
  pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
12
  pyerualjetwork/model_operations.py,sha256=hnhR8dtoICNJWIwGgJ65-LN3GYN_DYH4LMe6YpZVbnI,12967
13
13
  pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
14
- pyerualjetwork/plan.py,sha256=K24krCR7oN2fH0JLM9i_wtd4NRqLV608tX1bcQKbjRQ,35553
15
- pyerualjetwork/plan_cuda.py,sha256=3rZvnuv6zF0Wns8xwT0jH1ySOKYXzSUMvg2FI8beLys,36181
14
+ pyerualjetwork/plan.py,sha256=RtvLjszctY3aHbV1DinGfFVPPjm_tuXJOeMeoM4-pWM,35408
15
+ pyerualjetwork/plan_cuda.py,sha256=oc1Fim1-SQfA0-0ctu4PvmzsTn0Iz2Z-XSmuFc5Wkeg,36247
16
16
  pyerualjetwork/planeat.py,sha256=VtWtWndbKoFNYTWd1EsyKBV4Vp5U6cc7uWDgQ4WjHqo,40248
17
- pyerualjetwork/planeat_cuda.py,sha256=6cizuh06kgq8x8dPqVutbgpgrpM76cF_XWRtUVQ9U1k,40265
17
+ pyerualjetwork/planeat_cuda.py,sha256=fSn28ZbxctPvBjpKgtv_uGwwUdTEXkBizy76mMlZYJ0,40237
18
18
  pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
19
  pyerualjetwork/visualizations.py,sha256=QaYSIyVkJZ8NqpBKArQKkI1y37nCQo_KIM98IMssnRc,28766
20
20
  pyerualjetwork/visualizations_cuda.py,sha256=9qw46Y4bo67l0nVVF1FSNS8ksyzbIAJdaPDFOhN5J8Y,29188
21
- pyerualjetwork-4.1.8b5.dist-info/METADATA,sha256=NamWj41_Jt1lC-dRwfJPK5J3goq2OtDfo9YDdNM8nSg,7795
22
- pyerualjetwork-4.1.8b5.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
- pyerualjetwork-4.1.8b5.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
- pyerualjetwork-4.1.8b5.dist-info/RECORD,,
21
+ pyerualjetwork-4.1.8b7.dist-info/METADATA,sha256=-SWnXD8f5SB85DpF-beH6drOdrIv8xC_zoljAznyFUk,7795
22
+ pyerualjetwork-4.1.8b7.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
+ pyerualjetwork-4.1.8b7.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
+ pyerualjetwork-4.1.8b7.dist-info/RECORD,,