pyerualjetwork 3.3.4__py3-none-any.whl → 4.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +7 -16
- pyerualjetwork/activation_functions.py +120 -146
- pyerualjetwork/activation_functions_cuda.py +341 -0
- pyerualjetwork/data_operations.py +79 -77
- pyerualjetwork/data_operations_cuda.py +461 -0
- pyerualjetwork/help.py +4 -4
- pyerualjetwork/loss_functions_cuda.py +21 -0
- pyerualjetwork/memory_operations.py +298 -0
- pyerualjetwork/metrics_cuda.py +163 -0
- pyerualjetwork/model_operations.py +81 -23
- pyerualjetwork/model_operations_cuda.py +421 -0
- pyerualjetwork/plan.py +229 -210
- pyerualjetwork/plan_cuda.py +696 -0
- pyerualjetwork/planeat.py +60 -43
- pyerualjetwork/planeat_cuda.py +744 -0
- pyerualjetwork/visualizations.py +153 -129
- pyerualjetwork/visualizations_cuda.py +826 -0
- {pyerualjetwork-3.3.4.dist-info → pyerualjetwork-4.1.9.dist-info}/METADATA +45 -21
- pyerualjetwork-4.1.9.dist-info/RECORD +24 -0
- pyerualjetwork-3.3.4.dist-info/RECORD +0 -15
- {pyerualjetwork-3.3.4.dist-info → pyerualjetwork-4.1.9.dist-info}/WHEEL +0 -0
- {pyerualjetwork-3.3.4.dist-info → pyerualjetwork-4.1.9.dist-info}/top_level.txt +0 -0
@@ -10,12 +10,17 @@ def encode_one_hot(y_train, y_test=None, summary=False):
|
|
10
10
|
|
11
11
|
Args:
|
12
12
|
y_train (numpy.ndarray): Train label data.
|
13
|
-
y_test (numpy.ndarray): Test label data. (optional).
|
14
|
-
summary (bool): If True, prints the class-to-index mapping. Default: False
|
15
|
-
|
13
|
+
y_test (numpy.ndarray): Test label data one-hot encoded. (optional).
|
14
|
+
summary (bool, optional): If True, prints the class-to-index mapping. Default: False
|
15
|
+
|
16
16
|
Returns:
|
17
|
-
tuple: One-hot encoded y_train
|
17
|
+
tuple: One-hot encoded y_train and (if given) y_test.
|
18
18
|
"""
|
19
|
+
from .memory_operations import optimize_labels
|
20
|
+
|
21
|
+
y_train = optimize_labels(y_train, one_hot_encoded=False, cuda=False)
|
22
|
+
y_test = optimize_labels(y_test, one_hot_encoded=False, cuda=False)
|
23
|
+
|
19
24
|
classes = np.unique(y_train)
|
20
25
|
class_count = len(classes)
|
21
26
|
|
@@ -26,12 +31,12 @@ def encode_one_hot(y_train, y_test=None, summary=False):
|
|
26
31
|
for cls, idx in class_to_index.items():
|
27
32
|
print(f" {idx}: {cls}")
|
28
33
|
|
29
|
-
y_train_encoded = np.zeros((y_train.shape[0], class_count))
|
34
|
+
y_train_encoded = np.zeros((y_train.shape[0], class_count), dtype=y_train.dtype)
|
30
35
|
for i, label in enumerate(y_train):
|
31
36
|
y_train_encoded[i, class_to_index[label]] = 1
|
32
37
|
|
33
38
|
if y_test is not None:
|
34
|
-
y_test_encoded = np.zeros((y_test.shape[0], class_count))
|
39
|
+
y_test_encoded = np.zeros((y_test.shape[0], class_count), dtype=y_test.dtype)
|
35
40
|
for i, label in enumerate(y_test):
|
36
41
|
y_test_encoded[i, class_to_index[label]] = 1
|
37
42
|
return y_train_encoded, y_test_encoded
|
@@ -55,7 +60,7 @@ def decode_one_hot(encoded_data):
|
|
55
60
|
return decoded_labels
|
56
61
|
|
57
62
|
|
58
|
-
def split(X, y, test_size, random_state):
|
63
|
+
def split(X, y, test_size, random_state=42, dtype=np.float32):
|
59
64
|
"""
|
60
65
|
Splits the given X (features) and y (labels) data into training and testing subsets.
|
61
66
|
|
@@ -63,11 +68,16 @@ def split(X, y, test_size, random_state):
|
|
63
68
|
X (numpy.ndarray): Features data.
|
64
69
|
y (numpy.ndarray): Labels data.
|
65
70
|
test_size (float or int): Proportion or number of samples for the test subset.
|
66
|
-
random_state (int or None): Seed for random state.
|
71
|
+
random_state (int or None): Seed for random state. Default: 42.
|
72
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
67
73
|
|
68
74
|
Returns:
|
69
75
|
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
70
76
|
"""
|
77
|
+
from .memory_operations import transfer_to_cpu, optimize_labels
|
78
|
+
|
79
|
+
X = transfer_to_cpu(X, dtype=dtype)
|
80
|
+
y = optimize_labels(y, one_hot_encoded=False, cuda=False)
|
71
81
|
|
72
82
|
num_samples = X.shape[0]
|
73
83
|
|
@@ -92,10 +102,12 @@ def split(X, y, test_size, random_state):
|
|
92
102
|
x_train, x_test = X[train_indices], X[test_indices]
|
93
103
|
y_train, y_test = y[train_indices], y[test_indices]
|
94
104
|
|
105
|
+
del X, y
|
106
|
+
|
95
107
|
return x_train, x_test, y_train, y_test
|
96
108
|
|
97
109
|
|
98
|
-
def manuel_balancer(x_train, y_train, target_samples_per_class):
|
110
|
+
def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=np.float32):
|
99
111
|
"""
|
100
112
|
Generates synthetic examples to balance classes to the specified number of examples per class.
|
101
113
|
|
@@ -103,21 +115,18 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
|
|
103
115
|
x_train -- Input dataset (examples) - NumPy array format
|
104
116
|
y_train -- Class labels (one-hot encoded) - NumPy array format
|
105
117
|
target_samples_per_class -- Desired number of samples per class
|
118
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
106
119
|
|
107
120
|
Returns:
|
108
121
|
x_balanced -- Balanced input dataset (NumPy array format)
|
109
122
|
y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
|
110
123
|
"""
|
111
124
|
from .ui import loading_bars
|
112
|
-
|
113
|
-
bar_format = loading_bars()[0]
|
125
|
+
from .memory_operations import transfer_to_cpu
|
114
126
|
|
115
|
-
|
116
|
-
x_train = np.array(x_train)
|
117
|
-
y_train = np.array(y_train)
|
118
|
-
except:
|
119
|
-
pass
|
127
|
+
x_train = transfer_to_cpu(x_train, dtype=dtype)
|
120
128
|
|
129
|
+
bar_format = loading_bars()[0]
|
121
130
|
classes = np.arange(y_train.shape[1])
|
122
131
|
class_count = len(classes)
|
123
132
|
|
@@ -143,8 +152,8 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
|
|
143
152
|
if num_samples < target_samples_per_class:
|
144
153
|
|
145
154
|
samples_to_add = target_samples_per_class - num_samples
|
146
|
-
additional_samples = np.zeros((samples_to_add, x_train.shape[1]))
|
147
|
-
additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
|
155
|
+
additional_samples = np.zeros((samples_to_add, x_train.shape[1]), dtype=x_train.dtype)
|
156
|
+
additional_labels = np.zeros((samples_to_add, y_train.shape[1]), dtype=y_train.dtype)
|
148
157
|
|
149
158
|
for i in range(samples_to_add):
|
150
159
|
|
@@ -162,33 +171,38 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
|
|
162
171
|
x_balanced.append(additional_samples)
|
163
172
|
y_balanced.append(additional_labels)
|
164
173
|
|
165
|
-
x_balanced = np.vstack(x_balanced)
|
166
|
-
y_balanced = np.vstack(y_balanced)
|
174
|
+
x_balanced = np.vstack(x_balanced, dtype=x_train.dtype)
|
175
|
+
y_balanced = np.vstack(y_balanced, dtype=y_train.dtype)
|
176
|
+
|
177
|
+
del x_train, y_train
|
167
178
|
|
168
179
|
return x_balanced, y_balanced
|
169
180
|
|
170
181
|
|
171
|
-
def auto_balancer(x_train, y_train):
|
182
|
+
def auto_balancer(x_train, y_train, dtype=np.float32):
|
172
183
|
|
173
184
|
"""
|
174
185
|
Function to balance the training data across different classes.
|
175
186
|
|
176
187
|
Arguments:
|
177
188
|
x_train (list): Input data for training.
|
178
|
-
y_train (list): Labels corresponding to the input data.
|
189
|
+
y_train (list): Labels corresponding to the input data. one-hot encoded.
|
190
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
179
191
|
|
180
192
|
Returns:
|
181
193
|
tuple: A tuple containing balanced input data and labels.
|
182
194
|
"""
|
183
195
|
from .ui import loading_bars
|
196
|
+
from .memory_operations import transfer_to_cpu
|
197
|
+
|
198
|
+
x_train = transfer_to_cpu(x_train, dtype=dtype)
|
184
199
|
|
185
200
|
bar_format = loading_bars()[0]
|
186
|
-
|
187
201
|
classes = np.arange(y_train.shape[1])
|
188
202
|
class_count = len(classes)
|
189
203
|
|
190
204
|
try:
|
191
|
-
ClassIndices = {i: np.where(
|
205
|
+
ClassIndices = {i: np.where(y_train[:, i] == 1)[
|
192
206
|
0] for i in range(class_count)}
|
193
207
|
classes = [len(ClassIndices[i]) for i in range(class_count)]
|
194
208
|
|
@@ -221,28 +235,34 @@ def auto_balancer(x_train, y_train):
|
|
221
235
|
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters")
|
222
236
|
sys.exit()
|
223
237
|
|
224
|
-
|
238
|
+
BalancedInputs = BalancedInputs.astype(dtype, copy=False)
|
239
|
+
BalancedLabels = BalancedLabels.astype(dtype=y_train.dtype, copy=False)
|
225
240
|
|
241
|
+
del x_train, y_train
|
226
242
|
|
227
|
-
|
243
|
+
return BalancedInputs, BalancedLabels
|
244
|
+
|
245
|
+
|
246
|
+
def synthetic_augmentation(x, y, dtype=np.float32):
|
228
247
|
"""
|
229
248
|
Generates synthetic examples to balance classes with fewer examples.
|
230
249
|
|
231
250
|
Arguments:
|
232
251
|
x -- Input dataset (examples) - array format
|
233
252
|
y -- Class labels (one-hot encoded) - array format
|
253
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
234
254
|
|
235
255
|
Returns:
|
236
256
|
x_balanced -- Balanced input dataset (array format)
|
237
257
|
y_balanced -- Balanced class labels (one-hot encoded, array format)
|
238
258
|
"""
|
239
259
|
from .ui import loading_bars
|
260
|
+
from .memory_operations import transfer_to_cpu
|
261
|
+
|
262
|
+
x = transfer_to_cpu(x, dtype=dtype)
|
240
263
|
|
241
264
|
bar_format = loading_bars()[0]
|
242
|
-
|
243
|
-
x = x_train
|
244
|
-
y = y_train
|
245
|
-
classes = np.arange(y_train.shape[1])
|
265
|
+
classes = np.arange(y.shape[1])
|
246
266
|
class_count = len(classes)
|
247
267
|
|
248
268
|
class_distribution = {i: 0 for i in range(class_count)}
|
@@ -277,18 +297,26 @@ def synthetic_augmentation(x_train, y_train):
|
|
277
297
|
|
278
298
|
num_samples += 1
|
279
299
|
|
300
|
+
x_balanced = np.array(x_balanced).astype(dtype, copy=False)
|
301
|
+
y_balanced = np.array(y_balanced).astype(dtype=y.dtype, copy=False)
|
302
|
+
|
303
|
+
del x, y
|
280
304
|
|
281
|
-
return
|
305
|
+
return x_balanced, y_balanced
|
282
306
|
|
283
307
|
|
284
|
-
def standard_scaler(x_train=None, x_test=None, scaler_params=None):
|
308
|
+
def standard_scaler(x_train=None, x_test=None, scaler_params=None, dtype=np.float32):
|
285
309
|
"""
|
286
310
|
Standardizes training and test datasets. x_test may be None.
|
287
311
|
|
288
312
|
Args:
|
289
|
-
|
290
|
-
|
313
|
+
x_train: numpy.ndarray
|
314
|
+
|
315
|
+
x_test: numpy.ndarray (optional)
|
316
|
+
|
291
317
|
scaler_params (optional for using model)
|
318
|
+
|
319
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
292
320
|
|
293
321
|
Returns:
|
294
322
|
list:
|
@@ -296,17 +324,9 @@ def standard_scaler(x_train=None, x_test=None, scaler_params=None):
|
|
296
324
|
tuple
|
297
325
|
Standardized training and test datasets
|
298
326
|
"""
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
x_train = x_train.tolist()
|
303
|
-
x_test = x_test.tolist()
|
304
|
-
|
305
|
-
except:
|
306
|
-
|
307
|
-
pass
|
308
|
-
|
309
|
-
if x_train != None and scaler_params == None and x_test != None:
|
327
|
+
if x_train is not None and scaler_params is None and x_test is not None:
|
328
|
+
x_train = x_train.astype(dtype, copy=False)
|
329
|
+
x_test = x_test.astype(dtype, copy=False)
|
310
330
|
|
311
331
|
mean = np.mean(x_train, axis=0)
|
312
332
|
std = np.std(x_train, axis=0)
|
@@ -321,53 +341,32 @@ def standard_scaler(x_train=None, x_test=None, scaler_params=None):
|
|
321
341
|
|
322
342
|
return scaler_params, train_data_scaled, test_data_scaled
|
323
343
|
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
mean = np.mean(x_train, axis=0)
|
328
|
-
std = np.std(x_train, axis=0)
|
329
|
-
train_data_scaled = (x_train - mean) / std
|
344
|
+
if scaler_params is None and x_train is None and x_test is not None:
|
345
|
+
return x_test.astype(dtype, copy=False) # sample data not scaled
|
330
346
|
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
return scaler_params, train_data_scaled
|
336
|
-
except:
|
337
|
-
|
338
|
-
# this model is not scaled
|
339
|
-
|
340
|
-
return x_test
|
341
|
-
|
342
|
-
if scaler_params != None:
|
347
|
+
if scaler_params is not None:
|
348
|
+
x_test = x_test.astype(dtype, copy=False)
|
349
|
+
scaled_data = (x_test - scaler_params[0]) / scaler_params[1]
|
350
|
+
scaled_data = np.nan_to_num(scaled_data, nan=0)
|
343
351
|
|
344
|
-
|
345
|
-
|
346
|
-
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
347
|
-
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
348
|
-
|
349
|
-
except:
|
350
|
-
|
351
|
-
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
352
|
-
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
353
|
-
|
354
|
-
return test_data_scaled
|
352
|
+
return scaled_data # sample data scaled
|
355
353
|
|
356
354
|
|
357
355
|
def normalization(
|
358
|
-
Input # num: Input data to be normalized.
|
359
|
-
):
|
356
|
+
Input, # num: Input data to be normalized.
|
357
|
+
dtype=np.float32):
|
360
358
|
"""
|
361
359
|
Normalizes the input data using maximum absolute scaling.
|
362
360
|
|
363
361
|
Args:
|
364
362
|
Input (num): Input data to be normalized.
|
363
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
365
364
|
|
366
365
|
Returns:
|
367
366
|
(num) Scaled input data after normalization.
|
368
367
|
"""
|
369
368
|
|
370
|
-
MaxAbs = np.max(np.abs(Input))
|
369
|
+
MaxAbs = np.max(np.abs(Input.astype(dtype, copy=False)))
|
371
370
|
return (Input / MaxAbs)
|
372
371
|
|
373
372
|
|
@@ -383,6 +382,9 @@ def find_closest_factors(a):
|
|
383
382
|
|
384
383
|
def batcher(x_test, y_test, batch_size=1):
|
385
384
|
|
385
|
+
if batch_size == 1:
|
386
|
+
return x_test, y_test
|
387
|
+
|
386
388
|
y_labels = np.argmax(y_test, axis=1)
|
387
389
|
|
388
390
|
sampled_x, sampled_y = [], []
|