pyerualjetwork 4.4__py3-none-any.whl → 4.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- __version__ = "4.4"
1
+ __version__ = "4.5.1"
2
2
  __update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
3
3
  * PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
4
4
  * PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
@@ -17,9 +17,6 @@ def encode_one_hot(y_train, y_test=None, summary=False):
17
17
  tuple: One-hot encoded y_train and (if given) y_test.
18
18
  """
19
19
  from .memory_operations import optimize_labels
20
-
21
- y_train = optimize_labels(y_train, one_hot_encoded=False, cuda=False)
22
- y_test = optimize_labels(y_test, one_hot_encoded=False, cuda=False)
23
20
 
24
21
  classes = np.unique(y_train)
25
22
  class_count = len(classes)
@@ -34,11 +31,14 @@ def encode_one_hot(y_train, y_test=None, summary=False):
34
31
  y_train_encoded = np.zeros((y_train.shape[0], class_count), dtype=y_train.dtype)
35
32
  for i, label in enumerate(y_train):
36
33
  y_train_encoded[i, class_to_index[label]] = 1
34
+ y_train_encoded = optimize_labels(y_train_encoded, one_hot_encoded=True, cuda=False)
37
35
 
38
36
  if y_test is not None:
39
37
  y_test_encoded = np.zeros((y_test.shape[0], class_count), dtype=y_test.dtype)
40
38
  for i, label in enumerate(y_test):
41
39
  y_test_encoded[i, class_to_index[label]] = 1
40
+ y_test_encoded = optimize_labels(y_test_encoded, one_hot_encoded=True, cuda=False)
41
+
42
42
  return y_train_encoded, y_test_encoded
43
43
 
44
44
  return y_train_encoded
@@ -77,11 +77,7 @@ def split(X, y, test_size, random_state=42, dtype=np.float32):
77
77
  Returns:
78
78
  tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
79
79
  """
80
- from .memory_operations import transfer_to_cpu, optimize_labels
81
-
82
- X = transfer_to_cpu(X, dtype=dtype)
83
- y = optimize_labels(y, one_hot_encoded=False, cuda=False)
84
-
80
+
85
81
  num_samples = X.shape[0]
86
82
 
87
83
  if isinstance(test_size, float):
@@ -114,15 +110,21 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=np.float32
114
110
  """
115
111
  Generates synthetic examples to balance classes to the specified number of examples per class.
116
112
 
117
- Arguments:
118
- x_train -- Input dataset (examples) - NumPy array format
119
- y_train -- Class labels (one-hot encoded) - NumPy array format
120
- target_samples_per_class -- Desired number of samples per class
121
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
113
+ Args:
114
+
115
+ x_train: numpy array format
116
+
117
+ y_train (one-hot encoded): numpy array format
118
+
119
+ target_samples_per_class (int): Desired number of samples per class
120
+
121
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
122
+
123
+ shuffle_in_cpu (bool): If True, output will be same cpu's manuel_balancer function. Default: False. (Use this for direct comparison of cpu training.)
122
124
 
123
125
  Returns:
124
- x_balanced -- Balanced input dataset (NumPy array format)
125
- y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
126
+ x_balanced -- Balanced input dataset (numpy array format)
127
+ y_balanced -- Balanced class labels (one-hot encoded, numpy array format)
126
128
  """
127
129
  from .ui import loading_bars
128
130
  from .memory_operations import transfer_to_cpu
@@ -185,15 +187,19 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=np.float32
185
187
  def auto_balancer(x_train, y_train, dtype=np.float32):
186
188
 
187
189
  """
188
- Function to balance the training data across different classes.
190
+ Function to balance (to min) the training data across different classes.
189
191
 
190
- Arguments:
191
- x_train (list): Input data for training.
192
- y_train (list): Labels corresponding to the input data. one-hot encoded.
193
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
192
+ Args:
193
+ x_train (list): Input data for training.
194
+
195
+ y_train (list): Labels corresponding to the input data. (one-hot encoded)
196
+
197
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
198
+
199
+ shuffle_in_cpu (bool): If True, output will be same cpu's auto_balancer function. Default: False. (Use this for direct comparison of cpu training.)
194
200
 
195
- Returns:
196
- tuple: A tuple containing balanced input data and labels.
201
+ Returns:
202
+ tuple: A tuple containing balanced input data and labels.
197
203
  """
198
204
  from .ui import loading_bars
199
205
  from .memory_operations import transfer_to_cpu
@@ -248,16 +254,18 @@ def auto_balancer(x_train, y_train, dtype=np.float32):
248
254
 
249
255
  def synthetic_augmentation(x, y, dtype=np.float32):
250
256
  """
251
- Generates synthetic examples to balance classes with fewer examples.
257
+ Generates synthetic examples to balance classes with fewer examples using numpy.
258
+ Args:
259
+ x_train: numpy array format
252
260
 
253
- Arguments:
254
- x -- Input dataset (examples) - array format
255
- y -- Class labels (one-hot encoded) - array format
256
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
261
+ y_train: numpy array format
262
+
263
+ dtype (numpy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
264
+
265
+ shuffle_in_cpu (bool): If True, output will be same cpu's synthetic_augmentation function. Default: False. (Use this for direct comparison of cpu training.)
257
266
 
258
267
  Returns:
259
- x_balanced -- Balanced input dataset (array format)
260
- y_balanced -- Balanced class labels (one-hot encoded, array format)
268
+ x_train_balanced, y_train_balanced (numpy array format)
261
269
  """
262
270
  from .ui import loading_bars
263
271
  from .memory_operations import transfer_to_cpu
@@ -313,19 +321,16 @@ def standard_scaler(x_train=None, x_test=None, scaler_params=None, dtype=np.floa
313
321
  Standardizes training and test datasets. x_test may be None.
314
322
 
315
323
  Args:
316
- x_train: numpy.ndarray
324
+ x_train (numpy.ndarray):
317
325
 
318
- x_test: numpy.ndarray (optional)
326
+ x_test (numpy.ndarray): (optional)
319
327
 
320
- scaler_params (optional for using model)
328
+ scaler_params (tuple): (optional for using model)
321
329
 
322
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
330
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
323
331
 
324
332
  Returns:
325
- list:
326
- Scaler parameters: mean and std
327
- tuple
328
- Standardized training and test datasets
333
+ Scaler parameters, Standardized training (and if test given) datasets. (tuple)
329
334
  """
330
335
  if x_train is not None and scaler_params is None and x_test is not None:
331
336
  x_train = x_train.astype(dtype, copy=False)
@@ -373,6 +378,26 @@ dtype=np.float32):
373
378
  return (Input / MaxAbs)
374
379
 
375
380
 
381
+ def non_neg_normalization(
382
+ Input,
383
+ dtype=np.float32
384
+ ):
385
+ """
386
+ Normalizes the input data [0-1] range.
387
+
388
+ Args:
389
+ Input (numpy): Input data to be normalized.
390
+
391
+ dtype (numpy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
392
+
393
+ Returns:
394
+ (numpy) Scaled input data after normalization.
395
+ """
396
+
397
+ MaxAbs = np.max(np.abs(Input.astype(dtype, copy=False)))
398
+ return (Input + MaxAbs) / (2 * MaxAbs)
399
+
400
+
376
401
  def find_closest_factors(a):
377
402
 
378
403
  root = int(math.sqrt(a))
@@ -14,14 +14,11 @@ def encode_one_hot(y_train, y_test=None, summary=False):
14
14
  summary (bool): If True, prints the class-to-index mapping. Default: False
15
15
 
16
16
  Returns:
17
- tuple: One-hot encoded y_train and (if given) y_test.
17
+ tuple: One-hot encoded y_train and (if given: y_test).
18
18
  """
19
19
 
20
20
  from .memory_operations import optimize_labels, transfer_to_cpu
21
21
 
22
- y_train = optimize_labels(y_train, one_hot_encoded=False, cuda=True)
23
- y_test = optimize_labels(y_test, one_hot_encoded=False, cuda=True)
24
-
25
22
  y_train = transfer_to_cpu(y_train,dtype=y_train.dtype)
26
23
  y_test = transfer_to_cpu(y_test,dtype=y_test.dtype)
27
24
 
@@ -38,14 +35,17 @@ def encode_one_hot(y_train, y_test=None, summary=False):
38
35
  y_train_encoded = np.zeros((y_train.shape[0], class_count), dtype=y_train.dtype)
39
36
  for i, label in enumerate(y_train):
40
37
  y_train_encoded[i, class_to_index[label]] = 1
38
+ y_train_encoded = optimize_labels(y_train_encoded, one_hot_encoded=True, cuda=True)
41
39
 
42
40
  if y_test is not None:
43
41
  y_test_encoded = np.zeros((y_test.shape[0], class_count), dtype=y_test.dtype)
44
42
  for i, label in enumerate(y_test):
45
43
  y_test_encoded[i, class_to_index[label]] = 1
46
- return cp.array(y_train_encoded, dtype=y_train.dtype), cp.array(y_test_encoded, dtype=y_test.dtype)
44
+ y_test_encoded = optimize_labels(y_test_encoded, one_hot_encoded=True, cuda=True)
45
+
46
+ return y_train_encoded, y_test_encoded
47
47
 
48
- return cp.array(y_train_encoded, dtype=y_train.dtype)
48
+ return y_train_encoded
49
49
 
50
50
 
51
51
  def decode_one_hot(encoded_data):
@@ -77,7 +77,7 @@ def split(X, y, test_size, random_state=42, dtype=cp.float32, shuffle_in_cpu=Fal
77
77
 
78
78
  random_state (int or None): Seed for random state. Default: 42.
79
79
 
80
- dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
80
+ dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16.
81
81
 
82
82
  shuffle_in_cpu (bool): If True, output will be same cpu's split function. Default: False. (Use this for direct comparison of cpu training.)
83
83
  Returns:
@@ -126,21 +126,21 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
126
126
  """
127
127
  Generates synthetic examples to balance classes to the specified number of examples per class.
128
128
 
129
- Arguments:
129
+ Args:
130
130
 
131
- x_train -- Input dataset (examples) - cupy array format
131
+ x_train: cupy array format
132
132
 
133
- y_train -- Class labels (one-hot encoded) - cupy array format
133
+ y_train (one-hot encoded): cupy array format
134
134
 
135
- target_samples_per_class -- Desired number of samples per class
135
+ target_samples_per_class (int): Desired number of samples per class
136
136
 
137
- dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
137
+ dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
138
138
 
139
- shuffle_in_cpu (bool): If True, output will be same cpu's manuel_balancer function. Default: False. (Use this for direct comparison of cpu training.)
139
+ shuffle_in_cpu (bool): If True, output will be same cpu's manuel_balancer function. Default: False. (Use this for direct comparison of cpu training.)
140
140
 
141
141
  Returns:
142
- x_balanced -- Balanced input dataset (cupy array format)
143
- y_balanced -- Balanced class labels (one-hot encoded, cupy array format)
142
+ x_balanced -- Balanced input dataset (cupy array format)
143
+ y_balanced -- Balanced class labels (one-hot encoded, cupy array format)
144
144
  """
145
145
  from .ui import loading_bars
146
146
  from .memory_operations import transfer_to_gpu
@@ -216,19 +216,21 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
216
216
  def auto_balancer(x_train, y_train, dtype=cp.float32, shuffle_in_cpu=False):
217
217
 
218
218
  """
219
- Function to balance the training data across different classes.
219
+ Function to balance (to min) the training data across different classes.
220
220
 
221
- Arguments:
222
- x_train (list): Input data for training.
223
-
224
- y_train (list): Labels corresponding to the input data. (one-hot encoded)
225
-
226
- dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
221
+ Args:
222
+ x_train (list): Input data for training.
223
+
224
+ y_train (list): Labels corresponding to the input data. (one-hot encoded)
225
+
226
+ dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
227
+
228
+ shuffle_in_cpu (bool): If True, output will be same cpu's auto_balancer function. Default: False. (Use this for direct comparison of cpu training.)
227
229
 
228
- shuffle_in_cpu (bool): If True, output will be same cpu's auto_balancer function. Default: False. (Use this for direct comparison of cpu training.)
229
230
  Returns:
230
- tuple: A tuple containing balanced input data and labels.
231
+ tuple: A tuple containing balanced input data and labels.
231
232
  """
233
+
232
234
  from .ui import loading_bars
233
235
  from .memory_operations import transfer_to_gpu
234
236
 
@@ -287,19 +289,17 @@ def auto_balancer(x_train, y_train, dtype=cp.float32, shuffle_in_cpu=False):
287
289
  def synthetic_augmentation(x_train, y_train, dtype=cp.float32, shuffle_in_cpu=False):
288
290
  """
289
291
  Generates synthetic examples to balance classes with fewer examples using CuPy.
290
- Arguments:
291
-
292
- x_train -- Input dataset (examples) - cupy array format
293
-
294
- y_train -- Class labels (one-hot encoded) - cupy array format
295
-
296
- dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
297
-
298
- shuffle_in_cpu (bool): If True, output will be same cpu's synthetic_augmentation function. Default: False. (Use this for direct comparison of cpu training.)
292
+ Args:
293
+ x_train: cupy array format
294
+
295
+ y_train: cupy array format
296
+
297
+ dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
298
+
299
+ shuffle_in_cpu (bool): If True, output will be same cpu's synthetic_augmentation function. Default: False. (Use this for direct comparison of cpu training.)
299
300
 
300
301
  Returns:
301
- x_train_balanced -- Balanced input dataset (cupy array format)
302
- y_train_balanced -- Balanced class labels (one-hot encoded, cupy array format)
302
+ x_train_balanced, y_train_balanced (cupy array format)
303
303
  """
304
304
  from .ui import loading_bars
305
305
  from .memory_operations import transfer_to_gpu
@@ -360,19 +360,16 @@ def standard_scaler(x_train=None, x_test=None, scaler_params=None, dtype=cp.floa
360
360
  Standardizes training and test datasets. x_test may be None.
361
361
 
362
362
  Args:
363
- x_train: cupy.ndarray
363
+ x_train (cupy.ndarray):
364
364
 
365
- x_test: cupy.ndarray (optional)
365
+ x_test (cupy.ndarray): (optional)
366
366
 
367
- scaler_params (optional for using model)
367
+ scaler_params (tuple): (optional for using model)
368
368
 
369
- dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
369
+ dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
370
370
 
371
371
  Returns:
372
- list:
373
- Scaler parameters: mean and std
374
- tuple
375
- Standardized training and test datasets
372
+ Scaler parameters, Standardized training (and if test given) datasets. (tuple)
376
373
  """
377
374
  if x_train is not None and scaler_params is None and x_test is not None:
378
375
  x_train = x_train.astype(dtype, copy=False)
@@ -412,7 +409,7 @@ def normalization(
412
409
  Args:
413
410
  Input (num): Input data to be normalized.
414
411
 
415
- dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
412
+ dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
416
413
 
417
414
  Returns:
418
415
  (num) Scaled input data after normalization.
@@ -421,6 +418,27 @@ def normalization(
421
418
  MaxAbs = cp.max(cp.abs(Input.astype(dtype, copy=False)))
422
419
  return (Input / MaxAbs)
423
420
 
421
+
422
+ def non_neg_normalization(
423
+ Input,
424
+ dtype=cp.float32
425
+ ):
426
+ """
427
+ Normalizes the input data [0-1] range.
428
+
429
+ Args:
430
+ Input (cupy): Input data to be normalized.
431
+
432
+ dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16.
433
+
434
+ Returns:
435
+ (cupy) Scaled input data after normalization.
436
+ """
437
+
438
+ MaxAbs = cp.max(cp.abs(Input.astype(dtype, copy=False)))
439
+ return (Input + MaxAbs) / (2 * MaxAbs)
440
+
441
+
424
442
  def find_closest_factors(a):
425
443
 
426
444
  root = int(math.sqrt(a))
@@ -1,16 +1,16 @@
1
+ import numpy as np
2
+
1
3
  def wals(acc, loss, acc_impact, loss_impact):
2
4
  """
3
5
  The WALS(weighted accuracy-loss score) function calculates a weighted sum of accuracy and loss based on their respective impacts.
4
6
 
5
7
  :param acc: The `acc` parameter represents the accuracy of a model or system
6
- :param loss: The `loss` parameter in the `wals` function represents the amount of loss incurred. It
7
- is used in the calculation to determine the overall impact based on the accuracy and loss impacts
8
- provided
9
- :param acc_impact: The `acc_impact` parameter represents the impact of accuracy on the overall score
10
- calculation in the `wals` function. It is a multiplier that determines how much the accuracy
11
- contributes to the final result
12
- :param loss_impact: The `loss_impact` parameter in the `wals` function represents the weight of loss value when calculating the overall impact. It is used to determine how
13
- much the loss affects the final result compared to the accuracy impact
8
+ :param loss: The `loss` parameter in the `wals` function represents the amount of loss incurred. It is used in the calculation to determine the overall impact based on the accuracy and loss impacts provided
9
+ :param acc_impact: The `acc_impact` parameter represents the impact of accuracy on the overall score calculation in the `wals` function. It is a multiplier that determines how much the accuracy contributes to the final result
10
+ :param loss_impact: The `loss_impact` parameter in the `wals` function represents the weight of loss value when calculating the overall impact. It is used to determine how much the loss affects the final result compared to the accuracy impact
14
11
  :return: the weighted sum of accuracy and loss based on their respective impacts.
15
12
  """
16
- return (acc * acc_impact) + (-loss * loss_impact)
13
+ loss += np.finfo(float).eps
14
+ loss_impact += np.finfo(float).eps
15
+
16
+ return (acc * acc_impact) + ((loss_impact / loss) * loss_impact)
@@ -34,11 +34,11 @@ def transfer_to_cpu(x, dtype=np.float32):
34
34
  The `transfer_to_cpu` function converts data to a specified data type on the CPU, handling memory constraints
35
35
  by batching the conversion process and ensuring complete GPU memory cleanup.
36
36
 
37
- param x: Input data to transfer to CPU (CuPy array)
37
+ :param x: Input data to transfer to CPU (CuPy array)
38
38
 
39
- param dtype: Target NumPy dtype for the output array (default: np.float32)
39
+ :param dtype: Target NumPy dtype for the output array (default: np.float32)
40
40
 
41
- return: NumPy array with the specified dtype
41
+ :return: NumPy array with the specified dtype
42
42
  """
43
43
  from .ui import loading_bars, initialize_loading_bar
44
44
  try:
@@ -125,11 +125,8 @@ def get_optimal_batch_size_for_gpu(x, data_size_bytes):
125
125
  The function calculates the optimal batch size for a GPU based on available memory and data size.
126
126
 
127
127
  :param x: A list or array containing the data elements that will be processed on the GPU
128
- :param data_size_bytes: The `data_size_bytes` parameter represents the total size of the data in
129
- bytes that you want to process on the GPU. This could be the size of a single batch of data or the
130
- total size of the dataset, depending on how you are structuring your computations
131
- :return: the optimal batch size that can be used for processing the given data on the GPU, based on
132
- the available free memory on the GPU and the size of the data elements.
128
+ :param data_size_bytes: The `data_size_bytes` parameter represents the total size of the data in bytes that you want to process on the GPU. This could be the size of a single batch of data or the total size of the dataset, depending on how you are structuring your computations
129
+ :return: the optimal batch size that can be used for processing the given data on the GPU, based on the available free memory on the GPU and the size of the data elements.
133
130
  """
134
131
  free_memory = cp.get_default_memory_pool().free_bytes()
135
132
  device_memory = cp.cuda.runtime.memGetInfo()[0]
@@ -246,21 +243,10 @@ def optimize_labels(y, one_hot_encoded=True, cuda=False):
246
243
  The function `optimize_labels` optimizes the data type of labels based on their length and encoding
247
244
  format.
248
245
 
249
- :param y: The `optimize_labels` function is designed to optimize the data type of the input labels
250
- `y` based on certain conditions. The function checks if the labels are in one-hot encoded format or
251
- not, and then based on the length of the labels and the specified data types (`uint8`, `uint
252
- :param one_hot_encoded: The `one_hot_encoded` parameter in the `optimize_labels` function indicates
253
- whether the labels are in one-hot encoded format or not. If `one_hot_encoded` is set to `True`, it
254
- means that the labels are in one-hot encoded format, and the function will check the length of the,
255
- defaults to True (optional)
256
- :param cuda: The `cuda` parameter in the `optimize_labels` function is a boolean flag that indicates
257
- whether to use CUDA for computations. If `cuda` is set to `True`, the function will use the CuPy
258
- library for array operations, which can leverage GPU acceleration. If `cuda` is `False, defaults to
259
- False (optional)
260
- :return: The function `optimize_labels` returns the input array `y` after optimizing its data type
261
- based on the specified conditions. If `one_hot_encoded` is True, it checks the length of the
262
- elements in `y` and converts the data type to uint8, uint16, or uint32 accordingly. If
263
- `one_hot_encoded` is False, it checks the length of `y` itself and
246
+ :param y: The `optimize_labels` function is designed to optimize the data type of the input labels `y` based on certain conditions. The function checks if the labels are in one-hot encoded format or not, and then based on the length of the labels and the specified data types (`uint8`, `uint
247
+ :param one_hot_encoded: The `one_hot_encoded` parameter in the `optimize_labels` function indicates whether the labels are in one-hot encoded format or not. If `one_hot_encoded` is set to `True`, it means that the labels are in one-hot encoded format, and the function will check the length of the, defaults to True (optional)
248
+ :param cuda: The `cuda` parameter in the `optimize_labels` function is a boolean flag that indicates whether to use CUDA for computations. If `cuda` is set to `True`, the function will use the CuPy library for array operations, which can leverage GPU acceleration. If `cuda` is `False, defaults to False (optional)
249
+ :return: The function `optimize_labels` returns the input array `y` after optimizing its data type based on the specified conditions. If `one_hot_encoded` is True, it checks the length of the elements in `y` and converts the data type to uint8, uint16, or uint32 accordingly. If `one_hot_encoded` is False, it checks the length of `y` itself and
264
250
  """
265
251
 
266
252
  if cuda: array_type = cp
@@ -23,33 +23,21 @@ def save_model(model_name,
23
23
 
24
24
  """
25
25
  Function to save a potentiation learning artificial neural network model.
26
-
27
- Arguments:
28
-
29
- model_name (str): Name of the model.
30
-
31
- model_type (str): Type of the model. default: 'PLAN'
32
-
33
- test_acc (float): Test accuracy of the model. default: None
34
-
35
- weights_type (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
36
-
37
- WeightFormat (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
38
-
39
- model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
40
-
41
- scaler_params (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
42
-
43
- W: Weights of the model.
44
-
45
- activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
46
-
47
- show_architecture (bool): It draws model architecture. True or False. Default: False
48
-
49
- show_info (bool): Prints model details into console. default: True
26
+ Args:
27
+ model_name: (str): Name of the model.
28
+ W: Weights of the model.
29
+ scaler_params: (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
30
+ model_type: (str): Type of the model. default: 'PLAN'
31
+ test_acc: (float): Test accuracy of the model. default: None
32
+ model_path: (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
33
+ activation_potentiation: (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
34
+ weights_type: (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
35
+ weights_format: (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
36
+ show_architecture: (bool): It draws model architecture. True or False. Default: False
37
+ show_info: (bool): Prints model details into console. default: True
50
38
 
51
39
  Returns:
52
- No return.
40
+ No return.
53
41
  """
54
42
 
55
43
  from .visualizations import draw_model_architecture
@@ -181,14 +169,14 @@ def load_model(model_name,
181
169
  """
182
170
  Function to load a potentiation learning model.
183
171
 
184
- Arguments:
172
+ Args:
185
173
 
186
- model_name (str): Name of the model.
174
+ model_name (str): Name of the model.
187
175
 
188
- model_path (str): Path where the model is saved.
176
+ model_path (str): Path where the model is saved.
189
177
 
190
178
  Returns:
191
- lists: W(list[num]), activation_potentiation, DataFrame of the model
179
+ lists: W(list[num]), activation_potentiation, DataFrame of the model
192
180
  """
193
181
 
194
182
  try:
@@ -244,18 +232,18 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
244
232
  Function to make a prediction using a potentiation learning artificial neural network (PLAN).
245
233
  from storage
246
234
 
247
- Arguments:
235
+ Args:
248
236
 
249
- Input (list or ndarray): Input data for the model (single vector or single matrix).
237
+ Input (list or ndarray): Input data for the model (single vector or single matrix).
250
238
 
251
- model_name (str): Name of the model.
239
+ model_name (str): Name of the model.
252
240
 
253
- model_path (str): Path of the model. Default: ''
241
+ model_path (str): Path of the model. Default: ''
254
242
 
255
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
243
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
256
244
 
257
245
  Returns:
258
- ndarray: Output from the model.
246
+ ndarray: Output from the model.
259
247
  """
260
248
 
261
249
  from .activation_functions import apply_activation
@@ -285,18 +273,18 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=np.float3
285
273
 
286
274
  """
287
275
  reverse prediction function from storage
288
- Arguments:
276
+ Args:
289
277
 
290
- output (list or ndarray): output layer for the model (single probability vector, output layer of trained model).
278
+ output (list or ndarray): output layer for the model (single probability vector, output layer of trained model).
291
279
 
292
- model_name (str): Name of the model.
280
+ model_name (str): Name of the model.
293
281
 
294
- model_path (str): Path of the model. Default: ''
282
+ model_path (str): Path of the model. Default: ''
295
283
 
296
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
297
-
284
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
285
+
298
286
  Returns:
299
- ndarray: Input from the model.
287
+ ndarray: Input from the model.
300
288
  """
301
289
 
302
290
  model = load_model(model_name, model_path)
@@ -318,20 +306,20 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
318
306
  Function to make a prediction using a potentiation learning artificial neural network (PLAN).
319
307
  from memory.
320
308
 
321
- Arguments:
309
+ Args:
322
310
 
323
- Input (list or ndarray): Input data for the model (single vector or single matrix).
311
+ Input (list or ndarray): Input data for the model (single vector or single matrix).
324
312
 
325
- W (list of ndarrays): Weights of the model.
313
+ W (list of ndarrays): Weights of the model.
326
314
 
327
- scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
328
-
329
- activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
315
+ scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
316
+
317
+ activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
330
318
 
331
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
319
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
332
320
 
333
321
  Returns:
334
- ndarray: Output from the model.
322
+ ndarray: Output from the model.
335
323
  """
336
324
 
337
325
  from .data_operations import standard_scaler
@@ -358,16 +346,16 @@ def reverse_predict_model_ram(output, W, dtype=np.float32):
358
346
  """
359
347
  reverse prediction function from memory
360
348
 
361
- Arguments:
349
+ Args:
362
350
 
363
- output (list or ndarray): output layer for the model (single probability vector, output layer of trained model).
351
+ output (list or ndarray): output layer for the model (single probability vector, output layer of trained model).
364
352
 
365
- W (list of ndarrays): Weights of the model.
353
+ W (list of ndarrays): Weights of the model.
366
354
 
367
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
355
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
368
356
 
369
357
  Returns:
370
- ndarray: Input from the model.
358
+ ndarray: Input from the model.
371
359
  """
372
360
 
373
361
  try: