pyerualjetwork 5.32__py3-none-any.whl → 5.33__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -42,7 +42,7 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
42
42
  - Contact: tchasancan@gmail.com
43
43
  """
44
44
 
45
- __version__ = "5.32"
45
+ __version__ = "5.33"
46
46
  __update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
47
47
  * PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
48
48
  * PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
pyerualjetwork/cpu/ene.py CHANGED
@@ -138,7 +138,7 @@ def evolver(weights,
138
138
  weight_mutate_threshold=16,
139
139
  weight_mutate_prob=1,
140
140
  is_mlp=False,
141
- save_best_genome=False,
141
+ save_best_genome=True,
142
142
  dtype=np.float32):
143
143
  """
144
144
  Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
@@ -157,7 +157,7 @@ def evolver(weights,
157
157
  what_gen (int): The current generation number, used for informational purposes or logging.
158
158
 
159
159
  fitness (numpy.ndarray): A 1D array containing the fitness values of each genome.
160
- The array is used to rank the genomes based on their performance. PLANEAT maximizes or minimizes this fitness based on the `target_fitness` parameter.
160
+ The array is used to rank the genomes based on their performance. ENE maximizes or minimizes this fitness based on the `target_fitness` parameter.
161
161
 
162
162
  weight_evolve (bool, optional): Are weights to be evolves or just activation combinations Default: True. Note: Regardless of whether this parameter is True or False, you must give the evolver function a list of weights equal to the number of activation potentiations. You can create completely random weights if you want. If this parameter is False, the weights entering the evolver function and the resulting weights will be exactly the same.
163
163
 
@@ -227,7 +227,7 @@ def evolver(weights,
227
227
 
228
228
  is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
229
229
 
230
- save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. Default: False
230
+ save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. (index of best individual: 0) Default: True
231
231
 
232
232
  dtype (numpy.dtype, optional): Data type for the arrays. Default: np.float32.
233
233
  Example: np.float64 or np.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
@@ -314,7 +314,7 @@ def evolver(weights,
314
314
  else:
315
315
  raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
316
316
 
317
- if weight_evolve is False: origin_weights = np.copy(weights)
317
+ if weight_evolve is False: origin_weights = copy.deepcopy(weights)
318
318
 
319
319
  if is_mlp:
320
320
  activation_mutate_add_prob = 0
@@ -336,11 +336,11 @@ def evolver(weights,
336
336
 
337
337
  good_weights = weights[slice_center:]
338
338
  bad_weights = weights[:slice_center]
339
- best_weight = np.copy(good_weights[-1]) if is_mlp is False else copy.deepcopy(good_weights[-1])
339
+ best_weight = copy.deepcopy(good_weights[-1])
340
340
 
341
341
  good_activations = list(activations[slice_center:])
342
342
  bad_activations = list(activations[:slice_center])
343
- best_activations = good_activations[-1].copy() if isinstance(good_activations[-1], list) else good_activations[-1]
343
+ best_activations = copy.deepcopy(good_activations[-1]) if isinstance(good_activations[-1], list) else good_activations[-1]
344
344
 
345
345
 
346
346
  ### ENE IS APPLIED ACCORDING TO THE SPECIFIED POLICY, STRATEGY, AND PROBABILITY CONFIGURATION:
@@ -353,23 +353,23 @@ def evolver(weights,
353
353
  best_fitness = normalized_fitness[-1]
354
354
  epsilon = np.finfo(float).eps
355
355
 
356
- child_W = np.copy(bad_weights)
357
- child_act = bad_activations.copy()
356
+ child_W = copy.deepcopy(bad_weights)
357
+ child_act = copy.deepcopy(bad_activations)
358
358
 
359
- mutated_W = np.copy(bad_weights)
360
- mutated_act = bad_activations.copy()
359
+ mutated_W = copy.deepcopy(bad_weights)
360
+ mutated_act = copy.deepcopy(bad_activations)
361
361
 
362
362
 
363
363
  for i in range(len(bad_weights)):
364
364
 
365
365
  if policy == 'aggressive':
366
- first_parent_W = np.copy(best_weight)
367
- first_parent_act = best_activations
366
+ first_parent_W = copy.deepcopy(best_weight)
367
+ first_parent_act = copy.deepcopy(best_activations)
368
368
  first_parent_fitness = best_fitness
369
369
 
370
370
  elif policy == 'explorer':
371
- first_parent_W = good_weights[i]
372
- first_parent_act = good_activations[i]
371
+ first_parent_W = copy.deepcopy(good_weights[i])
372
+ first_parent_act = copy.deepcopy(good_activations[i])
373
373
  first_parent_fitness = normalized_fitness[len(good_weights) + i]
374
374
 
375
375
  else: raise ValueError("policy parameter must be: 'aggressive' or 'explorer'")
@@ -475,8 +475,8 @@ def evolver(weights,
475
475
  activations = child_act + mutated_act
476
476
 
477
477
  if save_best_genome:
478
- weights[0] = best_weight
479
- activations[0] = best_activations
478
+ weights[0] = copy.deepcopy(best_weight)
479
+ activations[0] = copy.deepcopy(best_activations)
480
480
 
481
481
  ### INFO PRINTING CONSOLE
482
482
 
@@ -501,7 +501,7 @@ def evolver(weights,
501
501
  print(" ACTIVATION SELECTION ADD PROB: ", str(activation_selection_add_prob))
502
502
  print(" ACTIVATION SELECTION CHANGE PROB: ", str(activation_selection_change_prob))
503
503
  print(" FITNESS BIAS: ", str(fitness_bias))
504
- print(" SAVE BEST GENOME: ", str(save_best_genome) + '\n')
504
+ print(" SAVE BEST GENOME: ", str(save_best_genome) + " (index of best individual: 0)" + '\n')
505
505
 
506
506
 
507
507
  print("*** Performance ***")
@@ -664,18 +664,18 @@ def cross_over(first_parent_W,
664
664
  decision = dominant_parent_selection(bad_genomes_selection_prob)
665
665
 
666
666
  if decision == 'first_parent':
667
- dominant_parent_W = np.copy(first_parent_W)
667
+ dominant_parent_W = copy.deepcopy(first_parent_W)
668
668
  dominant_parent_act = first_parent_act
669
669
 
670
- undominant_parent_W = np.copy(second_parent_W)
670
+ undominant_parent_W = copy.deepcopy(second_parent_W)
671
671
  undominant_parent_act = second_parent_act
672
672
  succes = second_parent_fitness + epsilon
673
673
 
674
674
  elif decision == 'second_parent':
675
- dominant_parent_W = np.copy(second_parent_W)
675
+ dominant_parent_W = copy.deepcopy(second_parent_W)
676
676
  dominant_parent_act = second_parent_act
677
677
 
678
- undominant_parent_W = np.copy(first_parent_W)
678
+ undominant_parent_W = copy.deepcopy(first_parent_W)
679
679
  undominant_parent_act = first_parent_act
680
680
  succes = first_parent_fitness + epsilon
681
681
 
@@ -713,7 +713,7 @@ def cross_over(first_parent_W,
713
713
  if isinstance(dominant_parent_act, str): dominant_parent_act = [dominant_parent_act]
714
714
  if isinstance(undominant_parent_act, str): undominant_parent_act = [undominant_parent_act]
715
715
 
716
- child_act = list(np.copy(dominant_parent_act))
716
+ child_act = list(copy.deepcopy(dominant_parent_act))
717
717
 
718
718
  activation_selection_add_prob = 1 - activation_selection_add_prob # if prob 0.8 (%80) then 1 - 0.8. Because 0-1 random number probably greater than 0.2
719
719
  potential_activation_selection_add = random.uniform(0, 1)
pyerualjetwork/cpu/nn.py CHANGED
@@ -295,15 +295,14 @@ def learn(x_train, y_train, optimizer, gen, pop_size, fit_start=True, batch_size
295
295
 
296
296
  weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons_copy, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
297
297
 
298
- # 0 indexed individual will keep PLAN's learned informations and in later generations it will share other individuals.
299
- for l in range(1, len(weight_pop[0])):
300
- original_shape = weight_pop[0][l].shape
298
+ # 0 indexed individual will keep PLAN's learned informations and in later generations it will share with other individuals.
299
+ for layer in range(1, len(mlp_W)):
300
+ row_shape, col_shape = mlp_W[layer].shape
301
301
 
302
- identity_matrix = np.eye(original_shape[0], original_shape[1], dtype=weight_pop[0][l].dtype)
303
- weight_pop[0][l] = identity_matrix
304
-
305
- for l in range(len(weight_pop)):
306
- weight_pop[l][0] = np.copy(best_weight)
302
+ identity_matrix = np.eye(row_shape, col_shape)
303
+ mlp_W[layer] = identity_matrix
304
+
305
+ mlp_W[0] = plan_W
307
306
 
308
307
  best_weight = np.array(weight_pop[0], dtype=object)
309
308
  final_activations = act_pop[0]
@@ -138,7 +138,7 @@ def evolver(weights,
138
138
  weight_mutate_threshold=16,
139
139
  weight_mutate_prob=1,
140
140
  is_mlp=False,
141
- save_best_genome=False,
141
+ save_best_genome=True,
142
142
  dtype=cp.float32):
143
143
  """
144
144
  Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
@@ -157,7 +157,7 @@ def evolver(weights,
157
157
  what_gen (int): The current generation number, used for informational purposes or logging.
158
158
 
159
159
  fitness (cupy.ndarray): A 1D array containing the fitness values of each genome.
160
- The array is used to rank the genomes based on their performance. PLANEAT maximizes or minimizes this fitness based on the `target_fitness` parameter.
160
+ The array is used to rank the genomes based on their performance. ENE maximizes or minimizes this fitness based on the `target_fitness` parameter.
161
161
 
162
162
  weight_evolve (bool, optional): Are weights to be evolves or just activation combinations Default: True. Note: Regardless of whether this parameter is True or False, you must give the evolver function a list of weights equal to the number of activation potentiations. You can create completely random weights if you want. If this parameter is False, the weights entering the evolver function and the resulting weights will be exactly the same.
163
163
 
@@ -227,7 +227,7 @@ def evolver(weights,
227
227
 
228
228
  is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
229
229
 
230
- save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. Default: False
230
+ save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. (index of best individual: 0) Default: True
231
231
 
232
232
  dtype (cupy.dtype): Data type for the arrays. Default: cp.float32.
233
233
  Example: cp.float64 or cp.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
@@ -313,7 +313,7 @@ def evolver(weights,
313
313
  else:
314
314
  raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
315
315
 
316
- if weight_evolve is False: origin_weights = cp.copy(weights) if is_mlp else copy.deepcopy(weights)
316
+ if weight_evolve is False: origin_weights = copy.deepcopy(weights)
317
317
 
318
318
  if is_mlp:
319
319
  activation_mutate_add_prob = 0
@@ -345,14 +345,14 @@ def evolver(weights,
345
345
 
346
346
  good_weights = weights[slice_center:]
347
347
  bad_weights = weights[:slice_center]
348
- best_weight = cp.copy(good_weights[-1])
348
+ best_weight = copy.deepcopy(good_weights[-1])
349
349
 
350
350
  good_activations = list(activations[slice_center:])
351
351
  bad_activations = list(activations[:slice_center])
352
- best_activations = good_activations[-1].copy() if isinstance(good_activations[-1], list) else good_activations[-1]
352
+ best_activations = copy.deepcopy(good_activations[-1]) if isinstance(good_activations[-1], list) else good_activations[-1]
353
353
 
354
354
 
355
- ### PLANEAT IS APPLIED ACCORDING TO THE SPECIFIED POLICY, STRATEGY, AND PROBABILITY CONFIGURATION:
355
+ ### ENE IS APPLIED ACCORDING TO THE SPECIFIED POLICY, STRATEGY, AND PROBABILITY CONFIGURATION:
356
356
 
357
357
  bar_format = loading_bars()[0]
358
358
 
@@ -362,23 +362,23 @@ def evolver(weights,
362
362
  best_fitness = normalized_fitness[-1]
363
363
  epsilon = cp.finfo(float).eps
364
364
 
365
- child_W = cp.copy(bad_weights)
366
- child_act = bad_activations.copy()
365
+ child_W = copy.deepcopy(bad_weights)
366
+ child_act = copy.deepcopy(bad_activations)
367
367
 
368
- mutated_W = cp.copy(bad_weights)
369
- mutated_act = bad_activations.copy()
368
+ mutated_W = copy.deepcopy(bad_weights)
369
+ mutated_act = copy.deepcopy(bad_activations)
370
370
 
371
371
 
372
372
  for i in range(len(bad_weights)):
373
373
 
374
374
  if policy == 'aggressive':
375
- first_parent_W = best_weight
376
- first_parent_act = best_activations
375
+ first_parent_W = copy.deepcopy(best_weight)
376
+ first_parent_act = copy.deepcopy(best_activations)
377
377
  first_parent_fitness = best_fitness
378
378
 
379
379
  elif policy == 'explorer':
380
- first_parent_W = good_weights[i]
381
- first_parent_act = good_activations[i]
380
+ first_parent_W = copy.deepcopy(good_weights[i])
381
+ first_parent_act = copy.deepcopy(good_activations[i])
382
382
  first_parent_fitness = normalized_fitness[len(good_weights) + i]
383
383
 
384
384
  else: raise ValueError("policy parameter must be: 'aggressive' or 'explorer'")
@@ -493,8 +493,8 @@ def evolver(weights,
493
493
  activations = child_act + mutated_act
494
494
 
495
495
  if save_best_genome:
496
- weights[0] = best_weight
497
- activations[0] = best_activations
496
+ weights[0] = copy.deepcopy(best_weight)
497
+ activations[0] = copy.deepcopy(best_activations)
498
498
 
499
499
  ### INFO PRINTING CONSOLE
500
500
 
@@ -519,7 +519,7 @@ def evolver(weights,
519
519
  print(" ACTIVATION SELECTION ADD PROB: ", str(activation_selection_add_prob))
520
520
  print(" ACTIVATION SELECTION CHANGE PROB: ", str(activation_selection_change_prob))
521
521
  print(" FITNESS BIAS: ", str(fitness_bias))
522
- print(" SAVE BEST GENOME: ", str(save_best_genome) + '\n')
522
+ print(" SAVE BEST GENOME: ", str(save_best_genome) + " (index of best individual: 0)" + '\n')
523
523
 
524
524
  print("*** Performance ***")
525
525
  print(" MAX FITNESS: ", str(cp.round(max(fitness), 2)))
@@ -682,18 +682,18 @@ def cross_over(first_parent_W,
682
682
  decision = dominant_parent_selection(bad_genomes_selection_prob)
683
683
 
684
684
  if decision == 'first_parent':
685
- dominant_parent_W = cp.copy(first_parent_W)
685
+ dominant_parent_W = copy.deepcopy(first_parent_W)
686
686
  dominant_parent_act = first_parent_act
687
687
 
688
- undominant_parent_W = cp.copy(second_parent_W)
688
+ undominant_parent_W = copy.deepcopy(second_parent_W)
689
689
  undominant_parent_act = second_parent_act
690
690
  succes = second_parent_fitness + epsilon
691
691
 
692
692
  elif decision == 'second_parent':
693
- dominant_parent_W = cp.copy(second_parent_W)
693
+ dominant_parent_W = copy.deepcopy(second_parent_W)
694
694
  dominant_parent_act = second_parent_act
695
695
 
696
- undominant_parent_W = cp.copy(first_parent_W)
696
+ undominant_parent_W = copy.deepcopy(first_parent_W)
697
697
  undominant_parent_act = first_parent_act
698
698
  succes = first_parent_fitness + epsilon
699
699
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 5.32
3
+ Version: 5.33
4
4
  Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -1,4 +1,4 @@
1
- pyerualjetwork/__init__.py,sha256=X-alwoq6f9sVQjlVKzJMev51yAFgX4qoqSKrMGEd2b8,2704
1
+ pyerualjetwork/__init__.py,sha256=a5zhtKQrVwHbhqgnBuAbKkLi4hC27iGD1Cf1jegr_eU,2704
2
2
  pyerualjetwork/fitness_functions.py,sha256=D9JVCr9DFid_xXgBD4uCKxdW2k10MVDE5HZRSOK4Igg,1237
3
3
  pyerualjetwork/help.py,sha256=Nyi0gHAN9ZnO4wgQLeENt0n7tSCZ3hJmjaJ853eGjCE,831
4
4
  pyerualjetwork/issue_solver.py,sha256=3pZTGotS29sy3pIuGQoJFUePibtSzS-tNoU80T_Usgk,3131
@@ -7,22 +7,22 @@ pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
7
7
  pyerualjetwork/cpu/__init__.py,sha256=0yAYner_-v7SmT3P7JV2itU8xJUQdQpb40dhAMQiZkc,829
8
8
  pyerualjetwork/cpu/activation_functions.py,sha256=zZSoOQ452Ykp_RsHVxklxesJmmFgufyIB4F3WQjudEQ,6689
9
9
  pyerualjetwork/cpu/data_ops.py,sha256=5biKr7pqLbJOayHYgGdQV1K5GqKbcOvrbbuAyByuDC8,16154
10
- pyerualjetwork/cpu/ene.py,sha256=ZLCaCxkpAmFLdxDS2OH-S8fT4jKq4HNVCHgpIufb8lg,44322
10
+ pyerualjetwork/cpu/ene.py,sha256=1k2_qQydHyrp-3RlvsUu7cFdPa0pN3OcXI6jwIHiWg0,44484
11
11
  pyerualjetwork/cpu/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
12
12
  pyerualjetwork/cpu/metrics.py,sha256=WhZ8iEqWehaygPRADUlhA5j_Qv3UwqV_eMxpyRVkeVs,6070
13
13
  pyerualjetwork/cpu/model_ops.py,sha256=sWsP_7Gfa8_DJ2X7AUrOkeXnz2Eej6573grQQ3CooXM,20295
14
- pyerualjetwork/cpu/nn.py,sha256=B8V32y0j4a85JBz11Ke_hE8hUp8kv0gQ6LDtaCiASzk,32010
14
+ pyerualjetwork/cpu/nn.py,sha256=TYLXmVLbbfFCDFA_cH9TSMxgjauDi6d7xfrPzOx6Xwg,31867
15
15
  pyerualjetwork/cpu/visualizations.py,sha256=rOQsc-W8b71z7ovXSoF49lx4fmpvlaHLsyj9ejWnhnI,28164
16
16
  pyerualjetwork/cuda/__init__.py,sha256=NbqvAS4jlMdoFdXa5_hi5ukXQ5zAZR_5BQ4QAqtiKug,879
17
17
  pyerualjetwork/cuda/activation_functions.py,sha256=FmoSAxDr9SGO4nkE6ZflXK4pmvZ0sL3Epe1Lz-3GOVI,6766
18
18
  pyerualjetwork/cuda/data_ops.py,sha256=SiNodFNmWyTPY_KnKuAi9biPRdpTAYY3XM01bRSUPCs,18510
19
- pyerualjetwork/cuda/ene.py,sha256=aSCPr9VFdgK2cxxfwuP7z0jbJL9gkKNM0rgu8ihLarQ,44830
19
+ pyerualjetwork/cuda/ene.py,sha256=veJRW1KoBFoDEhDJWYr-Y35kQC-Qh6UwURtXeM16S0M,45009
20
20
  pyerualjetwork/cuda/loss_functions.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
21
21
  pyerualjetwork/cuda/metrics.py,sha256=PjDBoRvr6va8vRvDIJJGBO4-I4uumrk3NCM1Vz4NJTo,5054
22
22
  pyerualjetwork/cuda/model_ops.py,sha256=iQPuxmthKxP2GTFLHJppxoU64C6mEpkDW-DsfwFGiuY,21020
23
23
  pyerualjetwork/cuda/nn.py,sha256=7rbaIEcmssaFgcionWVRmKijlgFyftVjf-MMNaLO_28,33140
24
24
  pyerualjetwork/cuda/visualizations.py,sha256=9l5BhXqXoeopdhLvVGvjH1TKYZb9JdKOsSE2IYD02zs,28569
25
- pyerualjetwork-5.32.dist-info/METADATA,sha256=1wsSwMOifBtpBpAZE3UnFKdK8P4Bq3DUX8N_sdi-Pe4,8020
26
- pyerualjetwork-5.32.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
27
- pyerualjetwork-5.32.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
28
- pyerualjetwork-5.32.dist-info/RECORD,,
25
+ pyerualjetwork-5.33.dist-info/METADATA,sha256=QHnz5cJtj77mPGeUo5ZQcvD1BpFJj5kZPkiCGdZxXv0,8020
26
+ pyerualjetwork-5.33.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
27
+ pyerualjetwork-5.33.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
28
+ pyerualjetwork-5.33.dist-info/RECORD,,