pyerualjetwork 4.1.0__py3-none-any.whl → 4.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -47,7 +47,7 @@ for package_name in package_names:
47
47
 
48
48
  print(f"PyerualJetwork is ready to use with {err} errors")
49
49
 
50
- __version__ = "4.1.0"
50
+ __version__ = "4.1.2"
51
51
  __update__ = "* Note: CUDA modules need cupy. Enter this command in your terminal: 'pip install cupy-cuda12x' or your cuda version.\n* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
52
52
 
53
53
  def print_version(__version__):
@@ -218,15 +218,12 @@ def scaled_cubic(x, alpha=1.0):
218
218
  def sine_offset(x, beta=0.0):
219
219
  return np.sin(x + beta)
220
220
 
221
-
222
-
223
- def safe_aggregate(current_sum, new_value):
221
+ def safe_add(current_sum, new_value):
224
222
  try:
225
223
  return current_sum + new_value
226
224
  except OverflowError:
227
225
  return np.array(current_sum) + np.array(new_value)
228
226
 
229
-
230
227
  def apply_activation(Input, activation_list):
231
228
  """
232
229
  Applies a sequence of activation functions to the input.
@@ -244,93 +241,93 @@ def apply_activation(Input, activation_list):
244
241
  for i in range(len(activation_list)):
245
242
  try:
246
243
  if activation_list[i] == 'sigmoid':
247
- Input = safe_aggregate(Input, Sigmoid(origin_input))
244
+ Input = safe_add(Input, Sigmoid(origin_input))
248
245
  elif activation_list[i] == 'swish':
249
- Input = safe_aggregate(Input, swish(origin_input))
246
+ Input = safe_add(Input, swish(origin_input))
250
247
  elif activation_list[i] == 'mod_circular':
251
- Input = safe_aggregate(Input, modular_circular_activation(origin_input))
248
+ Input = safe_add(Input, modular_circular_activation(origin_input))
252
249
  elif activation_list[i] == 'tanh_circular':
253
- Input = safe_aggregate(Input, tanh_circular_activation(origin_input))
250
+ Input = safe_add(Input, tanh_circular_activation(origin_input))
254
251
  elif activation_list[i] == 'leaky_relu':
255
- Input = safe_aggregate(Input, leaky_relu(origin_input))
252
+ Input = safe_add(Input, leaky_relu(origin_input))
256
253
  elif activation_list[i] == 'relu':
257
- Input = safe_aggregate(Input, Relu(origin_input))
254
+ Input = safe_add(Input, Relu(origin_input))
258
255
  elif activation_list[i] == 'softplus':
259
- Input = safe_aggregate(Input, softplus(origin_input))
256
+ Input = safe_add(Input, softplus(origin_input))
260
257
  elif activation_list[i] == 'elu':
261
- Input = safe_aggregate(Input, elu(origin_input))
258
+ Input = safe_add(Input, elu(origin_input))
262
259
  elif activation_list[i] == 'gelu':
263
- Input = safe_aggregate(Input, gelu(origin_input))
260
+ Input = safe_add(Input, gelu(origin_input))
264
261
  elif activation_list[i] == 'selu':
265
- Input = safe_aggregate(Input, selu(origin_input))
262
+ Input = safe_add(Input, selu(origin_input))
266
263
  elif activation_list[i] == 'tanh':
267
- Input = safe_aggregate(Input, tanh(origin_input))
264
+ Input = safe_add(Input, tanh(origin_input))
268
265
  elif activation_list[i] == 'sinakt':
269
- Input = safe_aggregate(Input, sinakt(origin_input))
266
+ Input = safe_add(Input, sinakt(origin_input))
270
267
  elif activation_list[i] == 'p_squared':
271
- Input = safe_aggregate(Input, p_squared(origin_input))
268
+ Input = safe_add(Input, p_squared(origin_input))
272
269
  elif activation_list[i] == 'sglu':
273
- Input = safe_aggregate(Input, sglu(origin_input, alpha=1.0))
270
+ Input = safe_add(Input, sglu(origin_input, alpha=1.0))
274
271
  elif activation_list[i] == 'dlrelu':
275
- Input = safe_aggregate(Input, dlrelu(origin_input))
272
+ Input = safe_add(Input, dlrelu(origin_input))
276
273
  elif activation_list[i] == 'exsig':
277
- Input = safe_aggregate(Input, exsig(origin_input))
274
+ Input = safe_add(Input, exsig(origin_input))
278
275
  elif activation_list[i] == 'sin_plus':
279
- Input = safe_aggregate(Input, sin_plus(origin_input))
276
+ Input = safe_add(Input, sin_plus(origin_input))
280
277
  elif activation_list[i] == 'acos':
281
- Input = safe_aggregate(Input, acos(origin_input, alpha=1.0, beta=0.0))
278
+ Input = safe_add(Input, acos(origin_input, alpha=1.0, beta=0.0))
282
279
  elif activation_list[i] == 'gla':
283
- Input = safe_aggregate(Input, gla(origin_input, alpha=1.0, mu=0.0))
280
+ Input = safe_add(Input, gla(origin_input, alpha=1.0, mu=0.0))
284
281
  elif activation_list[i] == 'srelu':
285
- Input = safe_aggregate(Input, srelu(origin_input))
282
+ Input = safe_add(Input, srelu(origin_input))
286
283
  elif activation_list[i] == 'qelu':
287
- Input = safe_aggregate(Input, qelu(origin_input))
284
+ Input = safe_add(Input, qelu(origin_input))
288
285
  elif activation_list[i] == 'isra':
289
- Input = safe_aggregate(Input, isra(origin_input))
286
+ Input = safe_add(Input, isra(origin_input))
290
287
  elif activation_list[i] == 'waveakt':
291
- Input = safe_aggregate(Input, waveakt(origin_input))
288
+ Input = safe_add(Input, waveakt(origin_input))
292
289
  elif activation_list[i] == 'arctan':
293
- Input = safe_aggregate(Input, arctan(origin_input))
290
+ Input = safe_add(Input, arctan(origin_input))
294
291
  elif activation_list[i] == 'bent_identity':
295
- Input = safe_aggregate(Input, bent_identity(origin_input))
292
+ Input = safe_add(Input, bent_identity(origin_input))
296
293
  elif activation_list[i] == 'sech':
297
- Input = safe_aggregate(Input, sech(origin_input))
294
+ Input = safe_add(Input, sech(origin_input))
298
295
  elif activation_list[i] == 'softsign':
299
- Input = safe_aggregate(Input, softsign(origin_input))
296
+ Input = safe_add(Input, softsign(origin_input))
300
297
  elif activation_list[i] == 'pwl':
301
- Input = safe_aggregate(Input, pwl(origin_input))
298
+ Input = safe_add(Input, pwl(origin_input))
302
299
  elif activation_list[i] == 'cubic':
303
- Input = safe_aggregate(Input, cubic(origin_input))
300
+ Input = safe_add(Input, cubic(origin_input))
304
301
  elif activation_list[i] == 'gaussian':
305
- Input = safe_aggregate(Input, gaussian(origin_input))
302
+ Input = safe_add(Input, gaussian(origin_input))
306
303
  elif activation_list[i] == 'sine':
307
- Input = safe_aggregate(Input, sine(origin_input))
304
+ Input = safe_add(Input, sine(origin_input))
308
305
  elif activation_list[i] == 'tanh_square':
309
- Input = safe_aggregate(Input, tanh_square(origin_input))
306
+ Input = safe_add(Input, tanh_square(origin_input))
310
307
  elif activation_list[i] == 'mod_sigmoid':
311
- Input = safe_aggregate(Input, mod_sigmoid(origin_input))
308
+ Input = safe_add(Input, mod_sigmoid(origin_input))
312
309
  elif activation_list[i] == 'linear':
313
- Input = safe_aggregate(Input, origin_input)
310
+ Input = safe_add(Input, origin_input)
314
311
  elif activation_list[i] == 'quartic':
315
- Input = safe_aggregate(Input, quartic(origin_input))
312
+ Input = safe_add(Input, quartic(origin_input))
316
313
  elif activation_list[i] == 'square_quartic':
317
- Input = safe_aggregate(Input, square_quartic(origin_input))
314
+ Input = safe_add(Input, square_quartic(origin_input))
318
315
  elif activation_list[i] == 'cubic_quadratic':
319
- Input = safe_aggregate(Input, cubic_quadratic(origin_input))
316
+ Input = safe_add(Input, cubic_quadratic(origin_input))
320
317
  elif activation_list[i] == 'exp_cubic':
321
- Input = safe_aggregate(Input, exp_cubic(origin_input))
318
+ Input = safe_add(Input, exp_cubic(origin_input))
322
319
  elif activation_list[i] == 'sine_square':
323
- Input = safe_aggregate(Input, sine_square(origin_input))
320
+ Input = safe_add(Input, sine_square(origin_input))
324
321
  elif activation_list[i] == 'logarithmic':
325
- Input = safe_aggregate(Input, logarithmic(origin_input))
322
+ Input = safe_add(Input, logarithmic(origin_input))
326
323
  elif activation_list[i] == 'scaled_cubic':
327
- Input = safe_aggregate(Input, scaled_cubic(origin_input, 1.0))
324
+ Input = safe_add(Input, scaled_cubic(origin_input, 1.0))
328
325
  elif activation_list[i] == 'sine_offset':
329
- Input = safe_aggregate(Input, sine_offset(origin_input, 1.0))
326
+ Input = safe_add(Input, sine_offset(origin_input, 1.0))
330
327
  elif activation_list[i] == 'spiral':
331
- Input = safe_aggregate(Input, spiral_activation(origin_input))
328
+ Input = safe_add(Input, spiral_activation(origin_input))
332
329
  elif activation_list[i] == 'circular':
333
- Input = safe_aggregate(Input, circular_activation(origin_input))
330
+ Input = safe_add(Input, circular_activation(origin_input))
334
331
 
335
332
 
336
333
  except Exception as e:
@@ -18,9 +18,9 @@ def spiral_activation(x):
18
18
 
19
19
  spiral_x = r * cp.cos(theta + r)
20
20
  spiral_y = r * cp.sin(theta + r)
21
-
22
21
 
23
- spiral_output = cp.concatenate(([spiral_x[0]], spiral_y))
22
+
23
+ spiral_output = cp.concatenate([cp.array([spiral_x[0]]), spiral_y])
24
24
 
25
25
  return spiral_output
26
26
 
@@ -219,13 +219,12 @@ def sine_offset(x, beta=0.0):
219
219
 
220
220
 
221
221
 
222
- def safe_aggregate(current_sum, new_value):
222
+ def safe_add(current_sum, new_value):
223
223
  try:
224
224
  return current_sum + new_value
225
225
  except OverflowError:
226
226
  return cp.array(current_sum) + cp.array(new_value)
227
227
 
228
-
229
228
  def apply_activation(Input, activation_list):
230
229
  """
231
230
  Applies a sequence of activation functions to the input.
@@ -243,93 +242,93 @@ def apply_activation(Input, activation_list):
243
242
  for i in range(len(activation_list)):
244
243
  try:
245
244
  if activation_list[i] == 'sigmoid':
246
- Input = safe_aggregate(Input, Sigmoid(origin_input))
245
+ Input = safe_add(Input, Sigmoid(origin_input))
247
246
  elif activation_list[i] == 'swish':
248
- Input = safe_aggregate(Input, swish(origin_input))
247
+ Input = safe_add(Input, swish(origin_input))
249
248
  elif activation_list[i] == 'mod_circular':
250
- Input = safe_aggregate(Input, modular_circular_activation(origin_input))
249
+ Input = safe_add(Input, modular_circular_activation(origin_input))
251
250
  elif activation_list[i] == 'tanh_circular':
252
- Input = safe_aggregate(Input, tanh_circular_activation(origin_input))
251
+ Input = safe_add(Input, tanh_circular_activation(origin_input))
253
252
  elif activation_list[i] == 'leaky_relu':
254
- Input = safe_aggregate(Input, leaky_relu(origin_input))
253
+ Input = safe_add(Input, leaky_relu(origin_input))
255
254
  elif activation_list[i] == 'relu':
256
- Input = safe_aggregate(Input, Relu(origin_input))
255
+ Input = safe_add(Input, Relu(origin_input))
257
256
  elif activation_list[i] == 'softplus':
258
- Input = safe_aggregate(Input, softplus(origin_input))
257
+ Input = safe_add(Input, softplus(origin_input))
259
258
  elif activation_list[i] == 'elu':
260
- Input = safe_aggregate(Input, elu(origin_input))
259
+ Input = safe_add(Input, elu(origin_input))
261
260
  elif activation_list[i] == 'gelu':
262
- Input = safe_aggregate(Input, gelu(origin_input))
261
+ Input = safe_add(Input, gelu(origin_input))
263
262
  elif activation_list[i] == 'selu':
264
- Input = safe_aggregate(Input, selu(origin_input))
263
+ Input = safe_add(Input, selu(origin_input))
265
264
  elif activation_list[i] == 'tanh':
266
- Input = safe_aggregate(Input, tanh(origin_input))
265
+ Input = safe_add(Input, tanh(origin_input))
267
266
  elif activation_list[i] == 'sinakt':
268
- Input = safe_aggregate(Input, sinakt(origin_input))
267
+ Input = safe_add(Input, sinakt(origin_input))
269
268
  elif activation_list[i] == 'p_squared':
270
- Input = safe_aggregate(Input, p_squared(origin_input))
269
+ Input = safe_add(Input, p_squared(origin_input))
271
270
  elif activation_list[i] == 'sglu':
272
- Input = safe_aggregate(Input, sglu(origin_input, alpha=1.0))
271
+ Input = safe_add(Input, sglu(origin_input, alpha=1.0))
273
272
  elif activation_list[i] == 'dlrelu':
274
- Input = safe_aggregate(Input, dlrelu(origin_input))
273
+ Input = safe_add(Input, dlrelu(origin_input))
275
274
  elif activation_list[i] == 'exsig':
276
- Input = safe_aggregate(Input, exsig(origin_input))
275
+ Input = safe_add(Input, exsig(origin_input))
277
276
  elif activation_list[i] == 'sin_plus':
278
- Input = safe_aggregate(Input, sin_plus(origin_input))
277
+ Input = safe_add(Input, sin_plus(origin_input))
279
278
  elif activation_list[i] == 'acos':
280
- Input = safe_aggregate(Input, acos(origin_input, alpha=1.0, beta=0.0))
279
+ Input = safe_add(Input, acos(origin_input, alpha=1.0, beta=0.0))
281
280
  elif activation_list[i] == 'gla':
282
- Input = safe_aggregate(Input, gla(origin_input, alpha=1.0, mu=0.0))
281
+ Input = safe_add(Input, gla(origin_input, alpha=1.0, mu=0.0))
283
282
  elif activation_list[i] == 'srelu':
284
- Input = safe_aggregate(Input, srelu(origin_input))
283
+ Input = safe_add(Input, srelu(origin_input))
285
284
  elif activation_list[i] == 'qelu':
286
- Input = safe_aggregate(Input, qelu(origin_input))
285
+ Input = safe_add(Input, qelu(origin_input))
287
286
  elif activation_list[i] == 'isra':
288
- Input = safe_aggregate(Input, isra(origin_input))
287
+ Input = safe_add(Input, isra(origin_input))
289
288
  elif activation_list[i] == 'waveakt':
290
- Input = safe_aggregate(Input, waveakt(origin_input))
289
+ Input = safe_add(Input, waveakt(origin_input))
291
290
  elif activation_list[i] == 'arctan':
292
- Input = safe_aggregate(Input, arctan(origin_input))
291
+ Input = safe_add(Input, arctan(origin_input))
293
292
  elif activation_list[i] == 'bent_identity':
294
- Input = safe_aggregate(Input, bent_identity(origin_input))
293
+ Input = safe_add(Input, bent_identity(origin_input))
295
294
  elif activation_list[i] == 'sech':
296
- Input = safe_aggregate(Input, sech(origin_input))
295
+ Input = safe_add(Input, sech(origin_input))
297
296
  elif activation_list[i] == 'softsign':
298
- Input = safe_aggregate(Input, softsign(origin_input))
297
+ Input = safe_add(Input, softsign(origin_input))
299
298
  elif activation_list[i] == 'pwl':
300
- Input = safe_aggregate(Input, pwl(origin_input))
299
+ Input = safe_add(Input, pwl(origin_input))
301
300
  elif activation_list[i] == 'cubic':
302
- Input = safe_aggregate(Input, cubic(origin_input))
301
+ Input = safe_add(Input, cubic(origin_input))
303
302
  elif activation_list[i] == 'gaussian':
304
- Input = safe_aggregate(Input, gaussian(origin_input))
303
+ Input = safe_add(Input, gaussian(origin_input))
305
304
  elif activation_list[i] == 'sine':
306
- Input = safe_aggregate(Input, sine(origin_input))
305
+ Input = safe_add(Input, sine(origin_input))
307
306
  elif activation_list[i] == 'tanh_square':
308
- Input = safe_aggregate(Input, tanh_square(origin_input))
307
+ Input = safe_add(Input, tanh_square(origin_input))
309
308
  elif activation_list[i] == 'mod_sigmoid':
310
- Input = safe_aggregate(Input, mod_sigmoid(origin_input))
309
+ Input = safe_add(Input, mod_sigmoid(origin_input))
311
310
  elif activation_list[i] == 'linear':
312
- Input = safe_aggregate(Input, origin_input)
311
+ Input = safe_add(Input, origin_input)
313
312
  elif activation_list[i] == 'quartic':
314
- Input = safe_aggregate(Input, quartic(origin_input))
313
+ Input = safe_add(Input, quartic(origin_input))
315
314
  elif activation_list[i] == 'square_quartic':
316
- Input = safe_aggregate(Input, square_quartic(origin_input))
315
+ Input = safe_add(Input, square_quartic(origin_input))
317
316
  elif activation_list[i] == 'cubic_quadratic':
318
- Input = safe_aggregate(Input, cubic_quadratic(origin_input))
317
+ Input = safe_add(Input, cubic_quadratic(origin_input))
319
318
  elif activation_list[i] == 'exp_cubic':
320
- Input = safe_aggregate(Input, exp_cubic(origin_input))
319
+ Input = safe_add(Input, exp_cubic(origin_input))
321
320
  elif activation_list[i] == 'sine_square':
322
- Input = safe_aggregate(Input, sine_square(origin_input))
321
+ Input = safe_add(Input, sine_square(origin_input))
323
322
  elif activation_list[i] == 'logarithmic':
324
- Input = safe_aggregate(Input, logarithmic(origin_input))
323
+ Input = safe_add(Input, logarithmic(origin_input))
325
324
  elif activation_list[i] == 'scaled_cubic':
326
- Input = safe_aggregate(Input, scaled_cubic(origin_input, 1.0))
325
+ Input = safe_add(Input, scaled_cubic(origin_input, 1.0))
327
326
  elif activation_list[i] == 'sine_offset':
328
- Input = safe_aggregate(Input, sine_offset(origin_input, 1.0))
327
+ Input = safe_add(Input, sine_offset(origin_input, 1.0))
329
328
  elif activation_list[i] == 'spiral':
330
- Input = safe_aggregate(Input, spiral_activation(origin_input))
329
+ Input = safe_add(Input, spiral_activation(origin_input))
331
330
  elif activation_list[i] == 'circular':
332
- Input = safe_aggregate(Input, circular_activation(origin_input))
331
+ Input = safe_add(Input, circular_activation(origin_input))
333
332
 
334
333
  except Exception as e:
335
334
  warnings.warn(f"Error in activation {activation_list[i]}: {str(e)}", RuntimeWarning)
@@ -76,7 +76,7 @@ def decode_one_hot(encoded_data):
76
76
  return decoded_labels
77
77
 
78
78
 
79
- def split(X, y, test_size, random_state, dtype=np.float32):
79
+ def split(X, y, test_size, random_state=42, dtype=np.float32):
80
80
  """
81
81
  Splits the given X (features) and y (labels) data into training and testing subsets.
82
82
 
@@ -84,7 +84,7 @@ def split(X, y, test_size, random_state, dtype=np.float32):
84
84
  X (numpy.ndarray): Features data.
85
85
  y (numpy.ndarray): Labels data.
86
86
  test_size (float or int): Proportion or number of samples for the test subset.
87
- random_state (int or None): Seed for random state.
87
+ random_state (int or None): Seed for random state. Default: 42.
88
88
  dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
89
89
 
90
90
  Returns:
@@ -81,7 +81,7 @@ def decode_one_hot(encoded_data):
81
81
  return decoded_labels
82
82
 
83
83
 
84
- def split(X, y, test_size, random_state, dtype=cp.float32):
84
+ def split(X, y, test_size, random_state=42, dtype=cp.float32, use_cpu=False):
85
85
  """
86
86
  Splits the given X (features) and y (labels) data into training and testing subsets.
87
87
 
@@ -89,9 +89,9 @@ def split(X, y, test_size, random_state, dtype=cp.float32):
89
89
  X (cupy.ndarray): Features data.
90
90
  y (cupy.ndarray): Labels data.
91
91
  test_size (float or int): Proportion or number of samples for the test subset.
92
- random_state (int or None): Seed for random state.
92
+ random_state (int or None): Seed for random state. Default: 42.
93
93
  dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
94
-
94
+ use_cpu (bool): If True, output will be same cpu's split function. Default: False.
95
95
  Returns:
96
96
  tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
97
97
  """
@@ -118,11 +118,17 @@ def split(X, y, test_size, random_state, dtype=cp.float32):
118
118
  else:
119
119
  raise ValueError("test_size should be float or int.")
120
120
 
121
- if random_state is not None:
122
- cp.random.seed(random_state)
121
+ if use_cpu:
122
+ indices = np.arange(num_samples)
123
+ np.random.seed(random_state)
124
+ np.random.shuffle(indices)
125
+ indices = cp.array(indices)
126
+ else:
127
+ if random_state is not None:
128
+ cp.random.seed(random_state)
123
129
 
124
- indices = cp.arange(num_samples)
125
- cp.random.shuffle(indices)
130
+ indices = cp.arange(num_samples)
131
+ cp.random.shuffle(indices)
126
132
 
127
133
  test_indices = indices[:test_size]
128
134
  train_indices = indices[test_size:]
@@ -133,16 +139,22 @@ def split(X, y, test_size, random_state, dtype=cp.float32):
133
139
  return x_train, x_test, y_train, y_test
134
140
 
135
141
 
136
- def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32):
142
+ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32, use_cpu=False):
137
143
  """
138
144
  Generates synthetic examples to balance classes to the specified number of examples per class.
139
145
 
140
146
  Arguments:
147
+
141
148
  x_train -- Input dataset (examples) - cupy array format
149
+
142
150
  y_train -- Class labels (one-hot encoded) - cupy array format
151
+
143
152
  target_samples_per_class -- Desired number of samples per class
153
+
144
154
  dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
145
155
 
156
+ use_cpu (bool): If True, output will be same cpu's manuel_balancer function. Default: False.
157
+
146
158
  Returns:
147
159
  x_balanced -- Balanced input dataset (cupy array format)
148
160
  y_balanced -- Balanced class labels (one-hot encoded, cupy array format)
@@ -176,8 +188,13 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
176
188
  num_samples = len(class_indices)
177
189
 
178
190
  if num_samples > target_samples_per_class:
179
-
180
- selected_indices = cp.random.choice(class_indices, target_samples_per_class, replace=False)
191
+
192
+ if use_cpu:
193
+ selected_indices = np.random.choice(
194
+ class_indices, target_samples_per_class, replace=False)
195
+ else:
196
+ selected_indices = cp.random.choice(class_indices, target_samples_per_class, replace=False)
197
+
181
198
  x_balanced.append(x_train[selected_indices])
182
199
  y_balanced.append(y_train[selected_indices])
183
200
 
@@ -193,13 +210,19 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
193
210
  additional_labels = cp.zeros((samples_to_add, y_train.shape[1]))
194
211
 
195
212
  for i in range(samples_to_add):
213
+
214
+ if use_cpu:
215
+ random_indices = np.random.choice(class_indices.get(), 2, replace=False)
216
+ else:
217
+ random_indices = cp.random.choice(class_indices, 2, replace=False)
196
218
 
197
- random_indices = cp.random.choice(class_indices, 2, replace=False)
198
219
  sample1 = x_train[random_indices[0]]
199
220
  sample2 = x_train[random_indices[1]]
200
221
 
201
-
202
- synthetic_sample = sample1 + (sample2 - sample1) * cp.random.rand()
222
+ if use_cpu:
223
+ synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
224
+ else:
225
+ synthetic_sample = sample1 + (sample2 - sample1) * cp.random.rand()
203
226
 
204
227
  additional_samples[i] = synthetic_sample
205
228
  additional_labels[i] = y_train[class_indices[0]]
@@ -214,7 +237,7 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
214
237
  return x_balanced, y_balanced
215
238
 
216
239
 
217
- def auto_balancer(x_train, y_train, dtype=cp.float32):
240
+ def auto_balancer(x_train, y_train, dtype=cp.float32, use_cpu=False):
218
241
 
219
242
  """
220
243
  Function to balance the training data across different classes.
@@ -226,6 +249,7 @@ def auto_balancer(x_train, y_train, dtype=cp.float32):
226
249
 
227
250
  dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
228
251
 
252
+ use_cpu (bool): If True, output will be same cpu's auto_balancer function. Default: False.
229
253
  Returns:
230
254
  tuple: A tuple containing balanced input data and labels.
231
255
  """
@@ -248,44 +272,50 @@ def auto_balancer(x_train, y_train, dtype=cp.float32):
248
272
  classes = cp.arange(y_train.shape[1])
249
273
  class_count = len(classes)
250
274
 
251
- try:
252
- ClassIndices = {i: cp.where(cp.array(y_train)[:, i] == 1)[
253
- 0] for i in range(class_count)}
254
- classes = [len(ClassIndices[i]) for i in range(class_count)]
255
-
256
- if len(set(classes)) == 1:
257
- print(Fore.WHITE + "INFO: Data have already balanced. from: auto_balancer" + Style.RESET_ALL)
258
- return x_train, y_train
259
-
260
- MinCount = min(classes)
261
-
262
- BalancedIndices = []
263
- for i in tqdm(range(class_count),leave=False, ascii="▱▰",
264
- bar_format= bar_format, desc='Balancing Data',ncols=70):
265
- if len(ClassIndices[i]) > MinCount:
275
+
276
+ ClassIndices = {i: cp.where(cp.array(y_train)[:, i] == 1)[
277
+ 0] for i in range(class_count)}
278
+ classes = [len(ClassIndices[i]) for i in range(class_count)]
279
+
280
+ if len(set(classes)) == 1:
281
+ print(Fore.WHITE + "INFO: Data have already balanced. from: auto_balancer" + Style.RESET_ALL)
282
+ return x_train, y_train
283
+
284
+ MinCount = min(classes)
285
+
286
+ BalancedIndices = []
287
+ for i in tqdm(range(class_count),leave=False, ascii="▱▰",
288
+ bar_format= bar_format, desc='Balancing Data',ncols=70):
289
+ if len(ClassIndices[i]) > MinCount:
290
+ if use_cpu:
291
+ SelectedIndices = np.random.choice(
292
+ ClassIndices[i].get(), MinCount, replace=False)
293
+ else:
266
294
  SelectedIndices = cp.random.choice(
267
295
  ClassIndices[i], MinCount, replace=False)
268
- else:
269
- SelectedIndices = ClassIndices[i]
270
- BalancedIndices.extend(SelectedIndices)
271
-
272
- BalancedInputs = [x_train[idx] for idx in BalancedIndices]
273
- BalancedLabels = [y_train[idx] for idx in BalancedIndices]
296
+ else:
297
+ SelectedIndices = ClassIndices[i]
298
+ BalancedIndices.extend(SelectedIndices)
299
+
300
+ BalancedInputs = [x_train[idx] for idx in BalancedIndices]
301
+ BalancedLabels = [y_train[idx] for idx in BalancedIndices]
274
302
 
303
+ if use_cpu:
304
+ permutation = np.random.permutation(len(BalancedInputs))
305
+ else:
275
306
  permutation = cp.random.permutation(len(BalancedInputs))
276
- BalancedInputs = cp.array(BalancedInputs)[permutation]
277
- BalancedLabels = cp.array(BalancedLabels)[permutation]
278
307
 
279
- print(Fore.GREEN + "Data Succesfully Balanced from: " + str(len(x_train)
280
- ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
281
- except:
282
- print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters")
283
- sys.exit()
308
+ BalancedInputs = cp.array(BalancedInputs)[permutation]
309
+ BalancedLabels = cp.array(BalancedLabels)[permutation]
284
310
 
311
+ print(Fore.GREEN + "Data Succesfully Balanced from: " + str(len(x_train)
312
+ ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
313
+
314
+
285
315
  return BalancedInputs, BalancedLabels
286
316
 
287
317
 
288
- def synthetic_augmentation(x_train, y_train, dtype=cp.float32):
318
+ def synthetic_augmentation(x_train, y_train, dtype=cp.float32, use_cpu=False):
289
319
  """
290
320
  Generates synthetic examples to balance classes with fewer examples using CuPy.
291
321
  Arguments:
@@ -296,6 +326,8 @@ def synthetic_augmentation(x_train, y_train, dtype=cp.float32):
296
326
 
297
327
  dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
298
328
 
329
+ use_cpu (bool): If True, output will be same cpu's synthetic_augmentation function. Default: False.
330
+
299
331
  Returns:
300
332
  x_train_balanced -- Balanced input dataset (cupy array format)
301
333
  y_train_balanced -- Balanced class labels (one-hot encoded, cupy array format)
@@ -336,13 +368,21 @@ def synthetic_augmentation(x_train, y_train, dtype=cp.float32):
336
368
 
337
369
  if num_samples < max_class_count:
338
370
  while num_samples < max_class_count:
339
- random_indices = cp.random.choice(
371
+ if use_cpu:
372
+ random_indices = np.random.choice(
373
+ class_indices, 2, replace=False)
374
+ else:
375
+ random_indices = cp.random.choice(
340
376
  cp.array(class_indices), 2, replace=False)
341
377
  sample1 = x[random_indices[0]]
342
378
  sample2 = x[random_indices[1]]
343
379
 
344
- synthetic_sample = sample1 + \
345
- (sample2 - sample1) * cp.random.rand()
380
+ if use_cpu:
381
+ synthetic_sample = sample1 + \
382
+ (sample2 - sample1) * np.random.rand()
383
+ else:
384
+ synthetic_sample = sample1 + \
385
+ (sample2 - sample1) * cp.random.rand()
346
386
 
347
387
  x_balanced.append(synthetic_sample)
348
388
  y_balanced.append(y[class_indices[0]])
pyerualjetwork/help.py CHANGED
@@ -10,7 +10,7 @@ def activation_potentiation():
10
10
 
11
11
  def docs_and_examples():
12
12
 
13
- print('PLAN document: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PLAN\n')
14
- print('PLAN examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes\n')
15
- print('PLANEAT examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes/PLANEAT\n')
16
- print('PyerualJetwork document and examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork')
13
+ print('PLAN document: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_PLAN\n')
14
+ print('PLAN examples: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_Anaplan/ExampleCodes\n')
15
+ print('PLANEAT examples: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_Anaplan/ExampleCodes/PLANEAT\n')
16
+ print('Anaplan document and examples: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_Anaplan')
@@ -1,7 +1,7 @@
1
1
  import cupy as cp
2
- from .data_operations_cuda import decode_one_hot
3
2
 
4
3
  def metrics(y_ts, test_preds, average='weighted'):
4
+ from .data_operations import decode_one_hot
5
5
  y_test_d = cp.array(decode_one_hot(y_ts))
6
6
  y_pred = cp.array(test_preds)
7
7
 
@@ -50,7 +50,6 @@ def metrics(y_ts, test_preds, average='weighted'):
50
50
  return precision_val.item(), recall_val.item(), f1_val.item()
51
51
 
52
52
 
53
-
54
53
  def roc_curve(y_true, y_score):
55
54
  """
56
55
  Compute Receiver Operating Characteristic (ROC) curve.
@@ -190,6 +190,7 @@ def load_model(model_name,
190
190
  Returns:
191
191
  lists: W(list[num]), activation_potentiation, DataFrame of the model
192
192
  """
193
+ np.set_printoptions(threshold=np.Infinity)
193
194
 
194
195
  try:
195
196
 
@@ -56,7 +56,7 @@ def save_model(model_name,
56
56
 
57
57
  class_count = W.shape[0]
58
58
 
59
- if test_acc != None:
59
+ if test_acc is not None:
60
60
  test_acc= float(test_acc)
61
61
 
62
62
  if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat' and weights_type != 'pkl':
@@ -91,6 +91,9 @@ def save_model(model_name,
91
91
 
92
92
  scaler_params.append(' ')
93
93
 
94
+ scaler_params[0] = scaler_params[0].get()
95
+ scaler_params[1] = scaler_params[1].get()
96
+
94
97
  data = {'MODEL NAME': model_name,
95
98
  'MODEL TYPE': model_type,
96
99
  'CLASS COUNT': class_count,
pyerualjetwork/plan.py CHANGED
@@ -125,15 +125,7 @@ def fit(
125
125
 
126
126
  elif val and (x_val is not None and y_val is not None):
127
127
  x_val = x_val.astype(dtype, copy=False)
128
- if len(y_val[0]) < 256:
129
- if y_val.dtype != np.uint8:
130
- y_val = np.array(y_val, copy=False).astype(np.uint8, copy=False)
131
- elif len(y_val[0]) <= 32767:
132
- if y_val.dtype != np.uint16:
133
- y_val = np.array(y_val, copy=False).astype(np.uint16, copy=False)
134
- else:
135
- if y_val.dtype != np.uint32:
136
- y_val = np.array(y_val, copy=False).astype(np.uint32, copy=False)
128
+ y_val = y_val.astype(dtype, copy=False)
137
129
 
138
130
  val_list = [] if val else None
139
131
  val_count = val_count or 10
@@ -231,7 +223,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
231
223
  tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
232
224
 
233
225
  """
234
- print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
226
+ print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
235
227
 
236
228
  activation_potentiation = all_activations()
237
229
 
@@ -120,22 +120,9 @@ def fit(
120
120
  if len(x_train) != len(y_train):
121
121
  raise ValueError("x_train and y_train must have the same length.")
122
122
 
123
- if val and (x_val is None and y_val is None):
123
+ if val and (x_val is None or y_val is None):
124
124
  x_val, y_val = x_train, y_train
125
125
 
126
- elif val and (x_val is not None and y_val is not None):
127
- x_val = cp.array(x_val, copy=False).astype(dtype, copy=False)
128
-
129
- if len(y_val[0]) < 256:
130
- if y_val.dtype != cp.uint8:
131
- y_val = cp.array(y_val, copy=False).astype(cp.uint8, copy=False)
132
- elif len(y_val[0]) <= 32767:
133
- if y_val.dtype != cp.uint16:
134
- y_val = cp.array(y_val, copy=False).astype(cp.uint16, copy=False)
135
- else:
136
- if y_val.dtype != cp.uint32:
137
- y_val = cp.array(y_val, copy=False).astype(cp.uint32, copy=False)
138
-
139
126
  val_list = [] if val else None
140
127
  val_count = val_count or 10
141
128
  # Defining weights
@@ -146,7 +133,7 @@ def fit(
146
133
 
147
134
  # Training process
148
135
  for index, inp in enumerate(x_train):
149
- inp = cp.array(inp).ravel()
136
+ inp = cp.array(inp, copy=False).ravel()
150
137
  y_decoded = decode_one_hot(y_train)
151
138
  # Weight updates
152
139
  STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded[index], activation_potentiation=activation_potentiation, LTD=LTD)
@@ -232,7 +219,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
232
219
  tuple: A list for model parameters: [Weight matrix, Preds, Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
233
220
 
234
221
  """
235
- print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
222
+ print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
236
223
 
237
224
  activation_potentiation = all_activations()
238
225
 
@@ -297,9 +284,9 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
297
284
 
298
285
  # Initialize progress bar
299
286
  if batch_size == 1:
300
- ncols = 90
287
+ ncols = 100
301
288
  else:
302
- ncols = 103
289
+ ncols = 140
303
290
  progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
304
291
 
305
292
  # Initialize variables
pyerualjetwork/planeat.py CHANGED
@@ -15,10 +15,10 @@ import random
15
15
  from tqdm import tqdm
16
16
 
17
17
  ### LIBRARY IMPORTS ###
18
- from plan import feed_forward
19
- from data_operations import normalization
20
- from ui import loading_bars
21
- from activation_functions import apply_activation, all_activations
18
+ from .plan import feed_forward
19
+ from .data_operations import normalization
20
+ from .ui import loading_bars
21
+ from .activation_functions import apply_activation, all_activations
22
22
 
23
23
  def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
24
24
  """
@@ -16,10 +16,10 @@ import random
16
16
  from tqdm import tqdm
17
17
 
18
18
  ### LIBRARY IMPORTS ###
19
- from plan_cuda import feed_forward
20
- from data_operations_cuda import normalization
21
- from ui import loading_bars
22
- from activation_functions_cuda import apply_activation, all_activations
19
+ from .plan_cuda import feed_forward
20
+ from .data_operations_cuda import normalization
21
+ from .ui import loading_bars
22
+ from .activation_functions_cuda import apply_activation, all_activations
23
23
 
24
24
  def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32):
25
25
  """
@@ -525,8 +525,8 @@ def plot_decision_boundary(x, y, activation_potentiation, W, artist=None, ax=Non
525
525
 
526
526
  def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
527
527
 
528
- from metrics import pca
529
- from data_operations import decode_one_hot
528
+ from .metrics_cuda import pca
529
+ from .data_operations_cuda import decode_one_hot
530
530
 
531
531
  if x.shape[1] > 2:
532
532
 
@@ -587,7 +587,7 @@ def neuron_history(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=Fa
587
587
 
588
588
  title_info = f'{j+1}. Neuron'
589
589
 
590
- art5 = ax1[j].imshow(mat, interpolation='sinc', cmap='viridis')
590
+ art5 = ax1[j].imshow(mat.get(), interpolation='sinc', cmap='viridis')
591
591
 
592
592
  ax1[j].set_aspect('equal')
593
593
  ax1[j].set_xticks([])
@@ -604,7 +604,7 @@ def neuron_history(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=Fa
604
604
 
605
605
  def initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train):
606
606
  """Initializes the visualization setup based on the parameters."""
607
- from data_operations import find_closest_factors
607
+ from .data_operations import find_closest_factors
608
608
  visualization_objects = {}
609
609
 
610
610
  if show_training:
@@ -680,7 +680,7 @@ def display_visualization_for_fit(fig, artist_list, interval):
680
680
 
681
681
  def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
682
682
  """Initialize all visualization components"""
683
- from data_operations import find_closest_factors
683
+ from .data_operations import find_closest_factors
684
684
  viz_objects = {}
685
685
 
686
686
  if show_history:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.1.0
3
+ Version: 4.1.2
4
4
  Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -0,0 +1,23 @@
1
+ pyerualjetwork/__init__.py,sha256=5oH9sQ9xOXUWYxBJt2h4ErHKFw63vREeOnKTdhSQGhk,2542
2
+ pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
3
+ pyerualjetwork/activation_functions_cuda.py,sha256=7U69VfwAIE8STUng2zEwPPQES9NgnkAXsDtVh-EzaZE,11803
4
+ pyerualjetwork/data_operations.py,sha256=2julEScuHsL_ueeJ-JE3hiqw3wibZQW_L2bwwdoXTN0,16552
5
+ pyerualjetwork/data_operations_cuda.py,sha256=uVGcLwhhePkZt2BnO9KrsIMq29CW5L_9ucyxN8Wnevw,18711
6
+ pyerualjetwork/help.py,sha256=OZghUy7GZTgEX_i3NYtgcpzUgCDOi6r2vVUF1ROkFiI,774
7
+ pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
+ pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
+ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
10
+ pyerualjetwork/metrics_cuda.py,sha256=Hz4PCeE5GcVUllZdsgXXdIw-UNqUVpqNxMIlPBNTSKY,5069
11
+ pyerualjetwork/model_operations.py,sha256=eWYiYlXYZzsRgVfF-4CFvjCHaZOGB2378evre8yCzYk,13084
12
+ pyerualjetwork/model_operations_cuda.py,sha256=Hryk2Qi6BwHY9K9G_muDxHW9ILK8dIW6lmwZfioKqYM,13246
13
+ pyerualjetwork/plan.py,sha256=1PDMyBnCsQgyks4esnPobcUNBHbex54JG2oFEV_Q_9g,34336
14
+ pyerualjetwork/plan_cuda.py,sha256=bpI4HVMexL5WiGU30Nj1mzp8f9sOyxuDw7Ka7LqQR7g,33958
15
+ pyerualjetwork/planeat.py,sha256=6uEcCF4bV1_W1aQUTKQjfnDgWp6rP2oluKFo5Y37k7o,39517
16
+ pyerualjetwork/planeat_cuda.py,sha256=GXYt_00rDKkDKJrhjE8hHOtu4U_pQZM1yZ6XrMpQo2c,39574
17
+ pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
18
+ pyerualjetwork/visualizations.py,sha256=DvbiQGlvlKNAgBJ3O3ukAi6uxSheha9SRFh5YX7ZxIA,26678
19
+ pyerualjetwork/visualizations_cuda.py,sha256=hH2FMjbsImAxTLIAUS2pfGSufigV-SbgpVMVrj4lYOE,26733
20
+ pyerualjetwork-4.1.2.dist-info/METADATA,sha256=vSlo45lfiRI3HHq3z4aUrX1LZXG83t14nLM-w8jqbSU,6357
21
+ pyerualjetwork-4.1.2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
22
+ pyerualjetwork-4.1.2.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
23
+ pyerualjetwork-4.1.2.dist-info/RECORD,,
@@ -1,23 +0,0 @@
1
- pyerualjetwork/__init__.py,sha256=buQzAGP2zwBt10ji65TzcupjWYX70rSdlkPzRhmnlDk,2542
2
- pyerualjetwork/activation_functions.py,sha256=UeuuagJWcSoFfmwikDU7O8ph--oySnWDJNqKbEh4SlE,12043
3
- pyerualjetwork/activation_functions_cuda.py,sha256=5F49gKkiRngo0hAaS1KfarxQ7wEyub13WAX_apxf8j8,12069
4
- pyerualjetwork/data_operations.py,sha256=rnOYLLK3YnRdWpEsEQABU0RE950lQQI7971eBLBpqOQ,16536
5
- pyerualjetwork/data_operations_cuda.py,sha256=8jooTsRCC-pEYvtw8c6CsfUUnztDy8DI8-yLf9aX27A,17108
6
- pyerualjetwork/help.py,sha256=pZs7hIhgFkovGLle97d9Qu9m5zKhMh7-OAIphIoSxBg,830
7
- pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
- pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
- pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
10
- pyerualjetwork/metrics_cuda.py,sha256=TCwn5Z_4jQjqPCURX_xtcz9cjsYVzlahgKDA-qCgpU4,5072
11
- pyerualjetwork/model_operations.py,sha256=k_53BJladPm9fBWdlVpS6Uf5IQzpNlJWLH746DXGq_M,13036
12
- pyerualjetwork/model_operations_cuda.py,sha256=Guo0lFaaLiAXwKmnOi8Fz_bL_p38qR46CIhGOg_V1Sw,13138
13
- pyerualjetwork/plan.py,sha256=eHMYN-uzpzdwFnSsSuREOkG4vJdvoHZnRzJUQlcpBrc,34756
14
- pyerualjetwork/plan_cuda.py,sha256=y2TWyUUeyT7r04qxcRbCc42XfakPlMNG1BHSPK0afP4,34551
15
- pyerualjetwork/planeat.py,sha256=8cwWboJtXgFTKq6nFl1T9McbLDmBquKUr12y168PmcM,39513
16
- pyerualjetwork/planeat_cuda.py,sha256=boN-HFwm_D9cT1z0eAR8zgkiD_XOg-J2T2jNFvZweG4,39570
17
- pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
18
- pyerualjetwork/visualizations.py,sha256=DvbiQGlvlKNAgBJ3O3ukAi6uxSheha9SRFh5YX7ZxIA,26678
19
- pyerualjetwork/visualizations_cuda.py,sha256=dA0u85ZIyKqjtoSJ6p3EbEpJs4V4vS5W5ftR6eif8yg,26713
20
- pyerualjetwork-4.1.0.dist-info/METADATA,sha256=StjT-bsNr5C_PsyNauqHbCF4ZaL0JLNGbERMnmGF4lQ,6357
21
- pyerualjetwork-4.1.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
22
- pyerualjetwork-4.1.0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
23
- pyerualjetwork-4.1.0.dist-info/RECORD,,