pyerualjetwork 4.6.7__py3-none-any.whl → 4.6.8b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- __version__ = "4.6.7"
1
+ __version__ = "4.6.8b0"
2
2
  __update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
3
3
  * PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
4
4
  * PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
@@ -7,7 +7,7 @@ import warnings
7
7
 
8
8
  def all_activations():
9
9
 
10
- activations_list = ['linear', 'sigmoid', 'relu', 'tanh', 'circular', 'spiral', 'swish', 'sin_plus', 'mod_circular', 'tanh_circular', 'leaky_relu', 'softplus', 'elu', 'gelu', 'selu', 'sinakt', 'p_squared', 'sglu', 'dlrelu', 'exsig', 'acos', 'gla', 'srelu', 'qelu', 'isra', 'waveakt', 'arctan', 'bent_identity', 'sech', 'softsign', 'pwl', 'cubic', 'gaussian', 'sine', 'tanh_square', 'mod_sigmoid', 'quartic', 'square_quartic', 'cubic_quadratic', 'exp_cubic', 'sine_square', 'logarithmic', 'scaled_cubic', 'sine_offset']
10
+ activations_list = ['linear', 'sigmoid', 'relu', 'tanh', 'circular', 'spiral', 'swish', 'sin_plus', 'mod_circular', 'tanh_circular', 'leaky_relu', 'softplus', 'elu', 'gelu', 'selu', 'sinakt', 'p_squared', 'sglu', 'dlrelu', 'exsig', 'acos', 'gla', 'srelu', 'qelu', 'isra', 'waveakt', 'arctan', 'bent_identity', 'sech', 'softsign', 'pwl', 'cubic', 'gaussian', 'sine', 'tanh_square', 'mod_sigmoid', 'quartic', 'square_quartic', 'cubic_quadratic', 'sine_square', 'logarithmic', 'scaled_cubic', 'sine_offset']
11
11
 
12
12
  return activations_list
13
13
 
@@ -279,7 +279,7 @@ def apply_activation(Input, activation_list):
279
279
 
280
280
  activation_outputs = np.array([activation_functions[act](origin_input) for act in valid_activations])
281
281
 
282
- return Input + np.sum(activation_outputs, axis=0)
282
+ return np.sum(activation_outputs, axis=0)
283
283
 
284
284
  except Exception as e:
285
285
  warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
@@ -6,7 +6,7 @@ import warnings
6
6
 
7
7
  def all_activations():
8
8
 
9
- activations_list = ['linear', 'sigmoid', 'relu', 'tanh', 'circular', 'spiral', 'swish', 'sin_plus', 'mod_circular', 'tanh_circular', 'leaky_relu', 'softplus', 'elu', 'gelu', 'selu', 'sinakt', 'p_squared', 'sglu', 'dlrelu', 'exsig', 'acos', 'gla', 'srelu', 'qelu', 'isra', 'waveakt', 'arctan', 'bent_identity', 'sech', 'softsign', 'pwl', 'cubic', 'gaussian', 'sine', 'tanh_square', 'mod_sigmoid', 'quartic', 'square_quartic', 'cubic_quadratic', 'exp_cubic', 'sine_square', 'logarithmic', 'scaled_cubic', 'sine_offset']
9
+ activations_list = ['linear', 'sigmoid', 'relu', 'tanh', 'circular', 'spiral', 'swish', 'sin_plus', 'mod_circular', 'tanh_circular', 'leaky_relu', 'softplus', 'elu', 'gelu', 'selu', 'sinakt', 'p_squared', 'sglu', 'dlrelu', 'exsig', 'acos', 'gla', 'srelu', 'qelu', 'isra', 'waveakt', 'arctan', 'bent_identity', 'sech', 'softsign', 'pwl', 'cubic', 'gaussian', 'sine', 'tanh_square', 'mod_sigmoid', 'quartic', 'square_quartic', 'cubic_quadratic', 'sine_square', 'logarithmic', 'scaled_cubic', 'sine_offset']
10
10
 
11
11
  return activations_list
12
12
 
@@ -202,9 +202,6 @@ def square_quartic(x):
202
202
  def cubic_quadratic(x):
203
203
  return x**3 * (x**2)
204
204
 
205
- def exp_cubic(x):
206
- return cp.exp(x**3)
207
-
208
205
  def sine_square(x):
209
206
  return cp.sin(x)**2
210
207
 
@@ -266,7 +263,6 @@ def apply_activation(Input, activation_list):
266
263
  'quartic': quartic,
267
264
  'square_quartic': square_quartic,
268
265
  'cubic_quadratic': cubic_quadratic,
269
- 'exp_cubic': exp_cubic,
270
266
  'sine_square': sine_square,
271
267
  'logarithmic': logarithmic,
272
268
  'scaled_cubic': lambda x: scaled_cubic(x, 1.0),
@@ -282,7 +278,7 @@ def apply_activation(Input, activation_list):
282
278
 
283
279
  activation_outputs = cp.array([activation_functions[act](origin_input) for act in valid_activations])
284
280
 
285
- return Input + cp.sum(activation_outputs, axis=0)
281
+ return cp.sum(activation_outputs, axis=0)
286
282
 
287
283
  except Exception as e:
288
284
  warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
@@ -379,22 +379,24 @@ dtype=np.float32):
379
379
 
380
380
 
381
381
  def non_neg_normalization(
382
- Input,
382
+ Input,
383
383
  dtype=np.float32
384
384
  ):
385
385
  """
386
386
  Normalizes the input data [0-1] range.
387
-
388
387
  Args:
389
388
  Input (numpy): Input data to be normalized.
390
-
391
- dtype (numpy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
392
-
389
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
393
390
  Returns:
394
391
  (numpy) Scaled input data after normalization.
395
392
  """
393
+ Input = Input.astype(dtype, copy=False)
394
+ MaxAbs = np.max(np.abs(Input))
396
395
 
397
- MaxAbs = np.max(np.abs(Input.astype(dtype, copy=False)))
396
+ if np.all(Input == Input.flat[0]):
397
+ randomization = np.random.random(Input.shape).astype(dtype)
398
+ return randomization
399
+
398
400
  return (Input + MaxAbs) / (2 * MaxAbs)
399
401
 
400
402
 
@@ -420,22 +420,24 @@ def normalization(
420
420
 
421
421
 
422
422
  def non_neg_normalization(
423
- Input,
424
- dtype=cp.float32
423
+ Input,
424
+ dtype=np.float32
425
425
  ):
426
426
  """
427
427
  Normalizes the input data [0-1] range.
428
-
429
428
  Args:
430
429
  Input (cupy): Input data to be normalized.
431
-
432
430
  dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
433
-
434
431
  Returns:
435
432
  (cupy) Scaled input data after normalization.
436
433
  """
434
+ Input = Input.astype(dtype, copy=False)
435
+ MaxAbs = cp.max(cp.abs(Input))
437
436
 
438
- MaxAbs = cp.max(cp.abs(Input.astype(dtype, copy=False)))
437
+ if cp.all(Input == Input.flat[0]):
438
+ randomization = cp.random.random(Input.shape).astype(dtype)
439
+ return randomization
440
+
439
441
  return (Input + MaxAbs) / (2 * MaxAbs)
440
442
 
441
443
 
@@ -55,6 +55,13 @@ def save_model(model_name,
55
55
  if model_type != 'PLAN' and model_type != 'MLP':
56
56
  raise ValueError("model_type parameter must be 'PLAN' or 'MLP'.")
57
57
 
58
+ if isinstance(activation_potentiation, str):
59
+ activation_potentiation = [activation_potentiation]
60
+ else:
61
+ activation_potentiation = [item if isinstance(item, list) else [item] for item in activation_potentiation]
62
+
63
+ activation = activation_potentiation.copy()
64
+
58
65
  if test_acc != None:
59
66
  test_acc= float(test_acc)
60
67
 
@@ -82,6 +89,7 @@ def save_model(model_name,
82
89
  sys.exit()
83
90
 
84
91
  elif model_type == 'MLP':
92
+
85
93
  class_count = W[-1].shape[0]
86
94
 
87
95
  NeuronCount.append(np.shape(W[0])[1])
@@ -95,33 +103,34 @@ def save_model(model_name,
95
103
  print(Fore.RED + "ERROR: Weight matrices have a problem from: save_model" + Style.RESET_ALL)
96
104
  sys.exit()
97
105
 
106
+
98
107
  SynapseCount.append(' ')
99
108
 
100
- activation_potentiation.append('')
101
- activation_potentiation.insert(0, '')
109
+ activation.append('')
110
+ activation.insert(0, '')
102
111
 
103
- if isinstance(activation_potentiation, str):
104
- activation_potentiation = [activation_potentiation]
105
- activation_potentiation.append('')
112
+ if len(activation) == 1 and model_type == 'PLAN':
113
+ activation = [activation]
114
+ activation.append('')
106
115
 
107
- if len(activation_potentiation) > len(NeuronCount):
108
- for i in range(len(activation_potentiation) - len(NeuronCount)):
116
+ if len(activation) > len(NeuronCount):
117
+ for i in range(len(activation) - len(NeuronCount)):
109
118
  NeuronCount.append('')
110
-
111
- if len(activation_potentiation) > len(SynapseCount):
112
- for i in range(len(activation_potentiation) - len(SynapseCount)):
119
+
120
+ if len(activation) > len(SynapseCount):
121
+ for i in range(len(activation) - len(SynapseCount)):
113
122
  SynapseCount.append('')
114
123
 
115
124
 
116
125
  if scaler_params != None:
117
126
 
118
- if len(scaler_params) > len(activation_potentiation):
127
+ if len(scaler_params) > len(activation):
119
128
 
120
- activation_potentiation += ['']
129
+ activation += ['']
121
130
 
122
- elif len(activation_potentiation) > len(scaler_params):
131
+ elif len(activation) > len(scaler_params):
123
132
 
124
- for i in range(len(activation_potentiation) - len(scaler_params)):
133
+ for i in range(len(activation) - len(scaler_params)):
125
134
 
126
135
  scaler_params.append(' ')
127
136
 
@@ -136,7 +145,7 @@ def save_model(model_name,
136
145
  'WEIGHTS FORMAT': weights_format,
137
146
  'MODEL PATH': model_path,
138
147
  'STANDARD SCALER': scaler_params,
139
- 'ACTIVATION POTENTIATION': activation_potentiation
148
+ 'ACTIVATION POTENTIATION': activation
140
149
  }
141
150
 
142
151
  df = pd.DataFrame(data)
@@ -305,6 +314,11 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
305
314
  W = model[get_weights()]
306
315
  model_type = model[get_model_type()]
307
316
 
317
+ if isinstance(activation_potentiation, str):
318
+ activation_potentiation = [activation_potentiation]
319
+ else:
320
+ activation_potentiation = [item if isinstance(item, list) else [item] for item in activation_potentiation]
321
+
308
322
  Input = standard_scaler(None, Input, scaler_params)
309
323
 
310
324
  if model_type == 'MLP':
@@ -328,7 +342,7 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
328
342
  sys.exit()
329
343
 
330
344
 
331
- def reverse_predict_model_ssd(output, model_name, model_path='', dtype=np.float32):
345
+ def reverse_predict_model_ssd(output, model_name, model_path=''):
332
346
 
333
347
  """
334
348
  reverse prediction function from storage
@@ -340,8 +354,6 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=np.float3
340
354
 
341
355
  model_path (str): Path of the model. Default: ''
342
356
 
343
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
344
-
345
357
  Returns:
346
358
  ndarray: Input from the model.
347
359
  """
@@ -386,6 +398,11 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
386
398
 
387
399
  Input = standard_scaler(None, Input, scaler_params)
388
400
 
401
+ if isinstance(activation_potentiation, str):
402
+ activation_potentiation = [activation_potentiation]
403
+ else:
404
+ activation_potentiation = [item if isinstance(item, list) else [item] for item in activation_potentiation]
405
+
389
406
  if is_mlp:
390
407
 
391
408
  layer = Input
@@ -406,7 +423,7 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
406
423
  print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + Style.RESET_ALL)
407
424
  sys.exit()
408
425
 
409
- def reverse_predict_model_ram(output, W, dtype=np.float32):
426
+ def reverse_predict_model_ram(output, W):
410
427
 
411
428
  """
412
429
  reverse prediction function from memory
@@ -417,8 +434,6 @@ def reverse_predict_model_ram(output, W, dtype=np.float32):
417
434
 
418
435
  W (list of ndarrays): Weights of the model.
419
436
 
420
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
421
-
422
437
  Returns:
423
438
  ndarray: Input from the model.
424
439
  """
@@ -59,6 +59,13 @@ def save_model(model_name,
59
59
  if model_type != 'PLAN' and model_type != 'MLP':
60
60
  raise ValueError("model_type parameter must be 'PLAN' or 'MLP'.")
61
61
 
62
+ if isinstance(activation_potentiation, str):
63
+ activation_potentiation = [activation_potentiation]
64
+ else:
65
+ activation_potentiation = [item if isinstance(item, list) else [item] for item in activation_potentiation]
66
+
67
+ activation = activation_potentiation.copy()
68
+
62
69
  if test_acc is not None:
63
70
  test_acc= float(test_acc)
64
71
 
@@ -101,30 +108,30 @@ def save_model(model_name,
101
108
 
102
109
  SynapseCount.append(' ')
103
110
 
104
- activation_potentiation.append('')
105
- activation_potentiation.insert(0, '')
111
+ activation.append('')
112
+ activation.insert(0, '')
106
113
 
107
- if isinstance(activation_potentiation, str):
108
- activation_potentiation = [activation_potentiation]
109
- activation_potentiation.append('')
114
+ if len(activation) == 1 and model_type == 'PLAN':
115
+ activation = [activation]
116
+ activation.append('')
110
117
 
111
- if len(activation_potentiation) > len(NeuronCount):
112
- for i in range(len(activation_potentiation) - len(NeuronCount)):
118
+ if len(activation) > len(NeuronCount):
119
+ for i in range(len(activation) - len(NeuronCount)):
113
120
  NeuronCount.append('')
114
121
 
115
- if len(activation_potentiation) > len(SynapseCount):
116
- for i in range(len(activation_potentiation) - len(SynapseCount)):
122
+ if len(activation) > len(SynapseCount):
123
+ for i in range(len(activation) - len(SynapseCount)):
117
124
  SynapseCount.append('')
118
125
 
119
126
  if scaler_params != None:
120
127
 
121
- if len(scaler_params) > len(activation_potentiation):
128
+ if len(scaler_params) > len(activation):
122
129
 
123
- activation_potentiation += ['']
130
+ activation += ['']
124
131
 
125
- elif len(activation_potentiation) > len(scaler_params):
132
+ elif len(activation) > len(scaler_params):
126
133
 
127
- for i in range(len(activation_potentiation) - len(scaler_params)):
134
+ for i in range(len(activation) - len(scaler_params)):
128
135
 
129
136
  scaler_params.append(' ')
130
137
 
@@ -142,7 +149,7 @@ def save_model(model_name,
142
149
  'WEIGHTS FORMAT': weights_format,
143
150
  'MODEL PATH': model_path,
144
151
  'STANDARD SCALER': scaler_params,
145
- 'ACTIVATION POTENTIATION': activation_potentiation
152
+ 'ACTIVATION POTENTIATION': activation
146
153
  }
147
154
 
148
155
  df = pd.DataFrame(data)
@@ -315,6 +322,11 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
315
322
  W = model[get_weights()]
316
323
  model_type = model[get_model_type()]
317
324
 
325
+ if isinstance(activation_potentiation, str):
326
+ activation_potentiation = [activation_potentiation]
327
+ else:
328
+ activation_potentiation = [item if isinstance(item, list) else [item] for item in activation_potentiation]
329
+
318
330
  Input = standard_scaler(None, Input, scaler_params)
319
331
 
320
332
  if model_type == 'MLP':
@@ -395,7 +407,12 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
395
407
  from .activation_functions_cuda import apply_activation
396
408
 
397
409
  try:
398
-
410
+
411
+ if isinstance(activation_potentiation, str):
412
+ activation_potentiation = [activation_potentiation]
413
+ else:
414
+ activation_potentiation = [item if isinstance(item, list) else [item] for item in activation_potentiation]
415
+
399
416
  Input = standard_scaler(None, Input, scaler_params)
400
417
 
401
418
  if is_mlp:
pyerualjetwork/plan.py CHANGED
@@ -404,6 +404,11 @@ def evaluate(
404
404
 
405
405
  if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
406
406
 
407
+ if isinstance(activation_potentiation, str):
408
+ activation_potentiation = [activation_potentiation]
409
+ else:
410
+ activation_potentiation = [item if isinstance(item, list) else [item] for item in activation_potentiation]
411
+
407
412
  x_test = apply_activation(x_test, activation_potentiation)
408
413
 
409
414
  result = x_test @ W.T
@@ -413,6 +413,11 @@ def evaluate(
413
413
 
414
414
  if auto_normalization: x_test = normalization(x_test, dtype=x_test.dtype)
415
415
 
416
+ if isinstance(activation_potentiation, str):
417
+ activation_potentiation = [activation_potentiation]
418
+ else:
419
+ activation_potentiation = [item if isinstance(item, list) else [item] for item in activation_potentiation]
420
+
416
421
  x_test = apply_activation(x_test, activation_potentiation)
417
422
 
418
423
  result = x_test @ W.T
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.6.7
3
+ Version: 4.6.8b0
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -0,0 +1,25 @@
1
+ pyerualjetwork/__init__.py,sha256=u-vA_SDH_vmQI2fEUzbCFlIJG84uHxcir0gEXc-UvgQ,1281
2
+ pyerualjetwork/activation_functions.py,sha256=0fNOHXd490HC6gadKwb0AuBjw34dWq3GRZkg5iWO27c,7621
3
+ pyerualjetwork/activation_functions_cuda.py,sha256=X5-dtYBkGp35kyL21eLR7mbYUdttxElu-WqXmwmkV9E,7672
4
+ pyerualjetwork/data_operations.py,sha256=XKYG9-mLa3qKAXUjejuD7V8aJKjpl5PdQwKzPFjpKgs,15437
5
+ pyerualjetwork/data_operations_cuda.py,sha256=zqiHXDRtC8qDlVlN6lLoZn9uQgkm40aKFfFjWjurCxQ,17538
6
+ pyerualjetwork/fitness_functions.py,sha256=urRdeMvUhNgWxD4ZGHCRdQlIf9cTWYMvF3_aVBojRqY,1235
7
+ pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
8
+ pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
9
+ pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
10
+ pyerualjetwork/memory_operations.py,sha256=0yCOHcgiNyF4ccMcRlL1Q9F_byG4nzjhmkbpXE_yU6E,13401
11
+ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
12
+ pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
13
+ pyerualjetwork/model_operations.py,sha256=SYMXYNFFLz2YUvmp9lSKXd2L1vCwhyL_AUjL3UYCkZw,15134
14
+ pyerualjetwork/model_operations_cuda.py,sha256=GE_71JLTItAFXz8iW60QeO4XKJ8fGeHgxETmHtW9drc,16204
15
+ pyerualjetwork/plan.py,sha256=cjVblo8TxTHX-GZPvgQvJZ34nOmzxSvtCrQi9K-Mhog,23268
16
+ pyerualjetwork/plan_cuda.py,sha256=ZEU_b_EoA-zPk7Gn94L_XBZz1v4mn8DOUsjTNV6fp8Q,24230
17
+ pyerualjetwork/planeat.py,sha256=prbkUIrD37Y_b7MmTuGg4rGHXfqHIjLFMbs7TnnEy9E,44645
18
+ pyerualjetwork/planeat_cuda.py,sha256=i6WDHkUEAMK7IHNBilM29xyYWq2qvPNpF9idcAkC1EU,44650
19
+ pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
20
+ pyerualjetwork/visualizations.py,sha256=utnX9zQhzmtvBJLOLNGm2jecVVk4zHXABQdjb0XzJac,28352
21
+ pyerualjetwork/visualizations_cuda.py,sha256=gnoaaazZ-nc9E1ImqXrZBRgQ4Rnpi2qh2yGJ2eLKMlE,28807
22
+ pyerualjetwork-4.6.8b0.dist-info/METADATA,sha256=nB7S7mpl1snTt4T8QzTSs8ShpPEpny_at6JXeqW5h2c,7507
23
+ pyerualjetwork-4.6.8b0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
24
+ pyerualjetwork-4.6.8b0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
25
+ pyerualjetwork-4.6.8b0.dist-info/RECORD,,
@@ -1,25 +0,0 @@
1
- pyerualjetwork/__init__.py,sha256=qAWAFwiPtMCcyatR3z8bCv1Q33nVY7aqHH7j712tyoc,1279
2
- pyerualjetwork/activation_functions.py,sha256=Ms0AGBqkJuCA42ht64MSQnO54Td_1eDGquedpoBDVbc,7642
3
- pyerualjetwork/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
4
- pyerualjetwork/data_operations.py,sha256=Y_RdxkjLEszFgeo4VDWIX1keF2syP-88KesLXA5sRyY,15280
5
- pyerualjetwork/data_operations_cuda.py,sha256=9tyD3Bbv5__stuUampgh3_GbMhb_kmTTJmZi7BJsvuA,17381
6
- pyerualjetwork/fitness_functions.py,sha256=urRdeMvUhNgWxD4ZGHCRdQlIf9cTWYMvF3_aVBojRqY,1235
7
- pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
8
- pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
9
- pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
10
- pyerualjetwork/memory_operations.py,sha256=0yCOHcgiNyF4ccMcRlL1Q9F_byG4nzjhmkbpXE_yU6E,13401
11
- pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
12
- pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
13
- pyerualjetwork/model_operations.py,sha256=KaJS2IM5Uk527DqN2ENdKX99yJElymfTmE3226j2HwM,14941
14
- pyerualjetwork/model_operations_cuda.py,sha256=0MyokoskDTnMKugu4kU8PP4F5tLN6RYNUx_5SWvqoyg,15562
15
- pyerualjetwork/plan.py,sha256=UyIvPmvHCHwczlc9KHolE4y6CPEeBfhnRN5yznSbnoM,23028
16
- pyerualjetwork/plan_cuda.py,sha256=iteqgv7x9Z2Pj4vGOZs6HXS3r0bNaF_smr7ZXaOdRnw,23990
17
- pyerualjetwork/planeat.py,sha256=prbkUIrD37Y_b7MmTuGg4rGHXfqHIjLFMbs7TnnEy9E,44645
18
- pyerualjetwork/planeat_cuda.py,sha256=i6WDHkUEAMK7IHNBilM29xyYWq2qvPNpF9idcAkC1EU,44650
19
- pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
20
- pyerualjetwork/visualizations.py,sha256=utnX9zQhzmtvBJLOLNGm2jecVVk4zHXABQdjb0XzJac,28352
21
- pyerualjetwork/visualizations_cuda.py,sha256=gnoaaazZ-nc9E1ImqXrZBRgQ4Rnpi2qh2yGJ2eLKMlE,28807
22
- pyerualjetwork-4.6.7.dist-info/METADATA,sha256=191yqow3K5p68q4nmtWhIx95BDSOSj-xlwcT7w6Tmms,7505
23
- pyerualjetwork-4.6.7.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
24
- pyerualjetwork-4.6.7.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
25
- pyerualjetwork-4.6.7.dist-info/RECORD,,