pyerualjetwork 4.1.6__py3-none-any.whl → 4.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -48,7 +48,7 @@ for package_name in package_names:
48
48
 
49
49
  print(f"PyerualJetwork is ready to use with {err} errors")
50
50
 
51
- __version__ = "4.1.6"
51
+ __version__ = "4.1.7"
52
52
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
53
53
 
54
54
  def print_version(__version__):
@@ -64,17 +64,22 @@ def decode_one_hot(encoded_data):
64
64
 
65
65
 
66
66
 
67
- def split(X, y, test_size, random_state=42, dtype=cp.float32, use_cpu=False):
67
+ def split(X, y, test_size, random_state=42, dtype=cp.float32, shuffle_in_cpu=False):
68
68
  """
69
69
  Splits the given X (features) and y (labels) data into training and testing subsets.
70
70
 
71
71
  Args:
72
72
  X (cupy.ndarray): Features data.
73
+
73
74
  y (cupy.ndarray): Labels data.
75
+
74
76
  test_size (float or int): Proportion or number of samples for the test subset.
77
+
75
78
  random_state (int or None): Seed for random state. Default: 42.
79
+
76
80
  dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
77
- use_cpu (bool): If True, output will be same cpu's split function. Default: False.
81
+
82
+ shuffle_in_cpu (bool): If True, output will be same cpu's split function. Default: False. (Use this for direct comparison of cpu training.)
78
83
  Returns:
79
84
  tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
80
85
  """
@@ -94,7 +99,7 @@ def split(X, y, test_size, random_state=42, dtype=cp.float32, use_cpu=False):
94
99
  else:
95
100
  raise ValueError("test_size should be float or int.")
96
101
 
97
- if use_cpu:
102
+ if shuffle_in_cpu:
98
103
  indices = np.arange(num_samples)
99
104
  np.random.seed(random_state)
100
105
  np.random.shuffle(indices)
@@ -117,7 +122,7 @@ def split(X, y, test_size, random_state=42, dtype=cp.float32, use_cpu=False):
117
122
  return x_train, x_test, y_train, y_test
118
123
 
119
124
 
120
- def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32, use_cpu=False):
125
+ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32, shuffle_in_cpu=False):
121
126
  """
122
127
  Generates synthetic examples to balance classes to the specified number of examples per class.
123
128
 
@@ -131,7 +136,7 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
131
136
 
132
137
  dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
133
138
 
134
- use_cpu (bool): If True, output will be same cpu's manuel_balancer function. Default: False.
139
+ shuffle_in_cpu (bool): If True, output will be same cpu's manuel_balancer function. Default: False. (Use this for direct comparison of cpu training.)
135
140
 
136
141
  Returns:
137
142
  x_balanced -- Balanced input dataset (cupy array format)
@@ -157,7 +162,7 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
157
162
 
158
163
  if num_samples > target_samples_per_class:
159
164
 
160
- if use_cpu:
165
+ if shuffle_in_cpu:
161
166
  selected_indices = np.random.choice(
162
167
  class_indices.get(), target_samples_per_class, replace=False)
163
168
  else:
@@ -179,7 +184,7 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
179
184
 
180
185
  for i in range(samples_to_add):
181
186
 
182
- if use_cpu:
187
+ if shuffle_in_cpu:
183
188
  random_indices = np.random.choice(class_indices.get(), 2, replace=False)
184
189
  else:
185
190
  random_indices = cp.random.choice(class_indices, 2, replace=False)
@@ -187,7 +192,7 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
187
192
  sample1 = x_train[random_indices[0]]
188
193
  sample2 = x_train[random_indices[1]]
189
194
 
190
- if use_cpu:
195
+ if shuffle_in_cpu:
191
196
  synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
192
197
  else:
193
198
  synthetic_sample = sample1 + (sample2 - sample1) * cp.random.rand()
@@ -208,7 +213,7 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
208
213
  return x_balanced, y_balanced
209
214
 
210
215
 
211
- def auto_balancer(x_train, y_train, dtype=cp.float32, use_cpu=False):
216
+ def auto_balancer(x_train, y_train, dtype=cp.float32, shuffle_in_cpu=False):
212
217
 
213
218
  """
214
219
  Function to balance the training data across different classes.
@@ -220,7 +225,7 @@ def auto_balancer(x_train, y_train, dtype=cp.float32, use_cpu=False):
220
225
 
221
226
  dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
222
227
 
223
- use_cpu (bool): If True, output will be same cpu's auto_balancer function. Default: False.
228
+ shuffle_in_cpu (bool): If True, output will be same cpu's auto_balancer function. Default: False. (Use this for direct comparison of cpu training.)
224
229
  Returns:
225
230
  tuple: A tuple containing balanced input data and labels.
226
231
  """
@@ -250,7 +255,7 @@ def auto_balancer(x_train, y_train, dtype=cp.float32, use_cpu=False):
250
255
  for i in tqdm(range(class_count),leave=False, ascii="▱▰",
251
256
  bar_format= bar_format, desc='Balancing Data',ncols=70):
252
257
  if len(ClassIndices[i]) > MinCount:
253
- if use_cpu:
258
+ if shuffle_in_cpu:
254
259
  SelectedIndices = np.random.choice(
255
260
  ClassIndices[i].get(), MinCount, replace=False)
256
261
  else:
@@ -263,7 +268,7 @@ def auto_balancer(x_train, y_train, dtype=cp.float32, use_cpu=False):
263
268
  BalancedInputs = [x_train[idx] for idx in BalancedIndices]
264
269
  BalancedLabels = [y_train[idx] for idx in BalancedIndices]
265
270
 
266
- if use_cpu:
271
+ if shuffle_in_cpu:
267
272
  permutation = np.random.permutation(len(BalancedInputs))
268
273
  else:
269
274
  permutation = cp.random.permutation(len(BalancedInputs))
@@ -279,7 +284,7 @@ def auto_balancer(x_train, y_train, dtype=cp.float32, use_cpu=False):
279
284
  return BalancedInputs, BalancedLabels
280
285
 
281
286
 
282
- def synthetic_augmentation(x_train, y_train, dtype=cp.float32, use_cpu=False):
287
+ def synthetic_augmentation(x_train, y_train, dtype=cp.float32, shuffle_in_cpu=False):
283
288
  """
284
289
  Generates synthetic examples to balance classes with fewer examples using CuPy.
285
290
  Arguments:
@@ -290,7 +295,7 @@ def synthetic_augmentation(x_train, y_train, dtype=cp.float32, use_cpu=False):
290
295
 
291
296
  dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
292
297
 
293
- use_cpu (bool): If True, output will be same cpu's synthetic_augmentation function. Default: False.
298
+ shuffle_in_cpu (bool): If True, output will be same cpu's synthetic_augmentation function. Default: False. (Use this for direct comparison of cpu training.)
294
299
 
295
300
  Returns:
296
301
  x_train_balanced -- Balanced input dataset (cupy array format)
@@ -322,7 +327,7 @@ def synthetic_augmentation(x_train, y_train, dtype=cp.float32, use_cpu=False):
322
327
 
323
328
  if num_samples < max_class_count:
324
329
  while num_samples < max_class_count:
325
- if use_cpu:
330
+ if shuffle_in_cpu:
326
331
  random_indices = np.random.choice(
327
332
  class_indices, 2, replace=False)
328
333
  else:
@@ -331,7 +336,7 @@ def synthetic_augmentation(x_train, y_train, dtype=cp.float32, use_cpu=False):
331
336
  sample1 = x[random_indices[0]]
332
337
  sample2 = x[random_indices[1]]
333
338
 
334
- if use_cpu:
339
+ if shuffle_in_cpu:
335
340
  synthetic_sample = sample1 + \
336
341
  (sample2 - sample1) * np.random.rand()
337
342
  else:
@@ -17,15 +17,13 @@ def get_optimal_batch_size_for_cpu(x, data_size_bytes, available_memory):
17
17
  The function calculates the optimal batch size for a given data size and available memory based on
18
18
  the size of each element.
19
19
 
20
- :param x: `x` is a NumPy array representing the input data for which we want to determine the
21
- optimal batch size for processing on the CPU
22
- :param data_size_bytes: The `data_size_bytes` parameter represents the size of the data in bytes
23
- that you want to process in batches
24
- :param available_memory: The `available_memory` parameter represents the total memory available on
25
- the CPU in bytes. This function calculates the optimal batch size for processing data based on the
26
- provided parameters. Let me know if you need any further assistance or explanation!
27
- :return: the optimal batch size for a given array `x` based on the available memory and the size of
28
- each element in bytes.
20
+ :param x: `x` is a NumPy array representing the input data for which we want to determine the optimal batch size for processing on the CPU
21
+
22
+ :param data_size_bytes: The `data_size_bytes` parameter represents the size of the data in bytes that you want to process in batches
23
+
24
+ :param available_memory: The `available_memory` parameter represents the total memory available on the CPU in bytes. This function calculates the optimal batch size for processing data based on the provided parameters. Let me know if you need any further assistance or explanation!
25
+
26
+ :return: the optimal batch size for a given array `x` based on the available memory and the size of each element in bytes.
29
27
  """
30
28
  safe_memory = available_memory * 0.25
31
29
  element_size = data_size_bytes / x.size
@@ -37,7 +35,9 @@ def transfer_to_cpu(x, dtype=np.float32):
37
35
  by batching the conversion process and ensuring complete GPU memory cleanup.
38
36
 
39
37
  :param x: Input data to transfer to CPU (CuPy array)
38
+
40
39
  :param dtype: Target NumPy dtype for the output array (default: np.float32)
40
+
41
41
  :return: NumPy array with the specified dtype
42
42
  """
43
43
  from .ui import loading_bars, initialize_loading_bar
@@ -144,16 +144,11 @@ def transfer_to_gpu(x, dtype=cp.float32):
144
144
  The `transfer_to_gpu` function in Python converts input data to GPU arrays, optimizing memory usage by
145
145
  batching and handling out-of-memory errors.
146
146
 
147
- :param x: The `x` parameter in the `transfer_to_gpu` function is the input data that you want to transfer to
148
- the GPU for processing. It can be either a NumPy array or a CuPy array. If it's a NumPy array, the
149
- function will convert it to a CuPy array and
150
- :param dtype: The `dtype` parameter in the `transfer_to_gpu` function specifies the data type to which the
151
- input array `x` should be converted when moving it to the GPU. By default, it is set to
152
- `cp.float32`, which is a 32-bit floating-point data type provided by the Cu
153
- :return: The `transfer_to_gpu` function returns the input data `x` converted to a GPU array of type `dtype`
154
- (default is `cp.float32`). If the input `x` is already a GPU array with the same dtype, it returns
155
- `x` as is. If the data size of `x` exceeds 25% of the free GPU memory, it processes the data in
156
- batches to
147
+ :param x: The `x` parameter in the `transfer_to_gpu` function is the input data that you want to transfer to the GPU for processing. It can be either a NumPy array or a CuPy array. If it's a NumPy array, the function will convert it to a CuPy array and
148
+
149
+ :param dtype: The `dtype` parameter in the `transfer_to_gpu` function specifies the data type to which the input array `x` should be converted when moving it to the GPU. By default, it is set to `cp.float32`, which is a 32-bit floating-point data type provided by the CuPy
150
+
151
+ :return: The `transfer_to_gpu` function returns the input data `x` converted to a GPU array of type `dtype` (default is `cp.float32`). If the input `x` is already a GPU array with the same dtype, it returns `x` as is. If the data size of `x` exceeds 25% of the free GPU memory, it processes the data in batches to
157
152
  """
158
153
  from .ui import loading_bars, initialize_loading_bar
159
154
  try:
pyerualjetwork/planeat.py CHANGED
@@ -66,10 +66,10 @@ def define_genomes(input_shape, output_shape, population_size, dtype=np.float32)
66
66
  population_weights[i][j,:] = apply_activation(population_weights[i][j,:], population_activations[i])
67
67
  population_weights[i][j,:] = normalization(population_weights[i][j,:], dtype=dtype)
68
68
 
69
- return np.array(population_weights), population_activations
69
+ return np.array(population_weights, dtype=dtype), population_activations
70
70
 
71
71
 
72
- def evolve(weights, activation_potentiations, what_gen, y_reward, show_info=False, strategy='cross_over', policy='normal_selective', mutations=True, bad_genoms_mutation_prob=None, activation_mutate_prob=0.5, save_best_genom=True, cross_over_mode='tpm', activation_add_prob=0.5, activation_delete_prob=0.5, activation_change_prob=0.5, weight_mutate_prob=1, weight_mutate_rate=32, activation_selection_add_prob=0.5, activation_selection_change_prob=0.5, activation_selection_rate=2, dtype=np.float32):
72
+ def evolve(weights, activation_potentiations, what_gen, y_reward, show_info=False, strategy='cross_over', policy='normal_selective', mutations=True, bad_genoms_mutation_prob=None, activation_mutate_prob=0.5, save_best_genom=True, cross_over_mode='tpm', activation_add_prob=0.5, activation_delete_prob=0.5, activation_change_prob=0.5, weight_mutate_prob=1, weight_mutate_rate=32, activation_selection_add_prob=0.7, activation_selection_change_prob=0.5, activation_selection_rate=2, dtype=np.float32):
73
73
  """
74
74
  Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
75
75
  The function modifies the population's weights and activation functions based on a specified policy, mutation probabilities, and strategy.
@@ -139,7 +139,7 @@ Args:
139
139
  WARNING: if you don't understand do NOT change this value. Default is 32.
140
140
 
141
141
  activation_selection_add_prob (float, optional): The probability of adding an existing activation function for cross over.
142
- from the genome. Must be in the range [0, 1]. Default is 0.5.
142
+ from the genome. Must be in the range [0, 1]. Default is 0.7. (WARNING! More higher values make models more complex. For fast training rise this value.)
143
143
 
144
144
  activation_selection_change_prob (float, optional): The probability of changing an activation function in the genome for cross over.
145
145
  Must be in the range [0, 1]. Default is 0.5.
@@ -67,10 +67,10 @@ def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32)
67
67
  population_weights[i][j,:] = apply_activation(population_weights[i][j,:], population_activations[i])
68
68
  population_weights[i][j,:] = normalization(population_weights[i][j,:], dtype=dtype)
69
69
 
70
- return cp.array(population_weights), population_activations
70
+ return cp.array(population_weights, dtype=dtype), population_activations
71
71
 
72
72
 
73
- def evolve(weights, activation_potentiations, what_gen, y_reward, show_info=False, strategy='cross_over', policy='normal_selective', mutations=True, bad_genoms_mutation_prob=None, activation_mutate_prob=0.5, save_best_genom=True, cross_over_mode='tpm', activation_add_prob=0.5, activation_delete_prob=0.5, activation_change_prob=0.5, weight_mutate_prob=1, weight_mutate_rate=32, activation_selection_add_prob=0.5, activation_selection_change_prob=0.5, activation_selection_rate=2, dtype=cp.float32):
73
+ def evolve(weights, activation_potentiations, what_gen, y_reward, show_info=False, strategy='cross_over', policy='normal_selective', mutations=True, bad_genoms_mutation_prob=None, activation_mutate_prob=0.5, save_best_genom=True, cross_over_mode='tpm', activation_add_prob=0.5, activation_delete_prob=0.5, activation_change_prob=0.5, weight_mutate_prob=1, weight_mutate_rate=32, activation_selection_add_prob=0.7, activation_selection_change_prob=0.5, activation_selection_rate=2, dtype=cp.float32):
74
74
  """
75
75
  Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
76
76
  The function modifies the population's weights and activation functions based on a specified policy, mutation probabilities, and strategy.
@@ -140,7 +140,7 @@ Args:
140
140
  WARNING: if you don't understand do NOT change this value. Default is 32.
141
141
 
142
142
  activation_selection_add_prob (float, optional): The probability of adding an existing activation function for cross over.
143
- from the genome. Must be in the range [0, 1]. Default is 0.5.
143
+ from the genome. Must be in the range [0, 1]. Default is 0.7. (WARNING! More higher values make models more complex. For fast training rise this value.)
144
144
 
145
145
  activation_selection_change_prob (float, optional): The probability of changing an activation function in the genome for cross over.
146
146
  Must be in the range [0, 1]. Default is 0.5.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.1.6
3
+ Version: 4.1.7
4
4
  Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -1,24 +1,24 @@
1
- pyerualjetwork/__init__.py,sha256=47FoEu3nH5W85OrbFZXsfegly9PKg-oHIAk82AvVCKE,2450
1
+ pyerualjetwork/__init__.py,sha256=OjubIysSQiH969c2bNU3ssgHdyFk1VYOF1D-dWNn_JQ,2450
2
2
  pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
3
3
  pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
4
4
  pyerualjetwork/data_operations.py,sha256=ZM24BuPsIAtI0a_Exr4HgCjmlb285wEeO8juFY9sJr0,14680
5
- pyerualjetwork/data_operations_cuda.py,sha256=IrLQkyf5FNNy4kfFcYDToueRnMDdXk7W4ufzpgwxA4k,17267
5
+ pyerualjetwork/data_operations_cuda.py,sha256=UpoJoFhIwTU4xg9dVuLAxLAT4CkRaGsxvtJG9j1xrNo,17629
6
6
  pyerualjetwork/help.py,sha256=OZghUy7GZTgEX_i3NYtgcpzUgCDOi6r2vVUF1ROkFiI,774
7
7
  pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
8
  pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
- pyerualjetwork/memory_operations.py,sha256=g_DU1g_Xx8BXZ253CV_DvhHI65cXaLNT4iBhlPuPN_w,13487
9
+ pyerualjetwork/memory_operations.py,sha256=TTowQVOa3whwphz9y7ed1ZdVyXjfRDbX1b12cpCTNY0,13471
10
10
  pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
11
11
  pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
12
  pyerualjetwork/model_operations.py,sha256=hnhR8dtoICNJWIwGgJ65-LN3GYN_DYH4LMe6YpZVbnI,12967
13
13
  pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
14
14
  pyerualjetwork/plan.py,sha256=ZadbCULBnfd8yrE21-shzifnILzQPZ9jEy6amQxuuvw,35251
15
15
  pyerualjetwork/plan_cuda.py,sha256=y1YoZQCSXGyLduG-IdcSPk2DPMAYG5G2pOfDefRZw0w,36287
16
- pyerualjetwork/planeat.py,sha256=6uEcCF4bV1_W1aQUTKQjfnDgWp6rP2oluKFo5Y37k7o,39517
17
- pyerualjetwork/planeat_cuda.py,sha256=GXYt_00rDKkDKJrhjE8hHOtu4U_pQZM1yZ6XrMpQo2c,39574
16
+ pyerualjetwork/planeat.py,sha256=oUTIW9ykPd_d0YJ3-zfforD9gXAQTNx-ethXP7ByLBQ,39621
17
+ pyerualjetwork/planeat_cuda.py,sha256=oFCa6hKLVkihiSQzxyvkwZYwbk4pGhy7yPeNuUd-YI8,39678
18
18
  pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
19
  pyerualjetwork/visualizations.py,sha256=9naPYMQKpkMcP_GEaBK90FEZAlImT_f-lgRqVCwvcb8,28660
20
20
  pyerualjetwork/visualizations_cuda.py,sha256=blOM-VQnAT_qzM3i_OWjL5C1qnUtYctEvja-a_X4Z0w,29085
21
- pyerualjetwork-4.1.6.dist-info/METADATA,sha256=xRiAQOkHwFGtNVJDRHGgGS6KbFbWm8B3C2dI-dP8GUM,7793
22
- pyerualjetwork-4.1.6.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
- pyerualjetwork-4.1.6.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
- pyerualjetwork-4.1.6.dist-info/RECORD,,
21
+ pyerualjetwork-4.1.7.dist-info/METADATA,sha256=-Netx9Dfim4DWMg5bQ3nl6PAXizHku4b93id2OCD0H4,7793
22
+ pyerualjetwork-4.1.7.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
+ pyerualjetwork-4.1.7.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
+ pyerualjetwork-4.1.7.dist-info/RECORD,,