pyerualjetwork 4.3.8.dev15__py3-none-any.whl → 4.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/activation_functions.py +2 -2
- pyerualjetwork/activation_functions_cuda.py +63 -114
- pyerualjetwork/data_operations_cuda.py +1 -1
- pyerualjetwork/fitness_functions.py +72 -0
- pyerualjetwork/fitness_functions_cuda.py +85 -0
- pyerualjetwork/model_operations.py +14 -14
- pyerualjetwork/model_operations_cuda.py +16 -17
- pyerualjetwork/plan.py +159 -382
- pyerualjetwork/plan_cuda.py +149 -387
- pyerualjetwork/planeat.py +24 -54
- pyerualjetwork/planeat_cuda.py +11 -47
- pyerualjetwork/visualizations.py +33 -30
- pyerualjetwork/visualizations_cuda.py +22 -24
- {pyerualjetwork-4.3.8.dev15.dist-info → pyerualjetwork-4.3.9.dist-info}/METADATA +3 -19
- pyerualjetwork-4.3.9.dist-info/RECORD +24 -0
- pyerualjetwork-4.3.9.dist-info/top_level.txt +1 -0
- pyerualjetwork/loss_functions.py +0 -21
- pyerualjetwork/loss_functions_cuda.py +0 -21
- pyerualjetwork-4.3.8.dev15.dist-info/RECORD +0 -45
- pyerualjetwork-4.3.8.dev15.dist-info/top_level.txt +0 -2
- pyerualjetwork_afterburner/__init__.py +0 -11
- pyerualjetwork_afterburner/activation_functions.py +0 -290
- pyerualjetwork_afterburner/activation_functions_cuda.py +0 -289
- pyerualjetwork_afterburner/data_operations.py +0 -406
- pyerualjetwork_afterburner/data_operations_cuda.py +0 -461
- pyerualjetwork_afterburner/help.py +0 -17
- pyerualjetwork_afterburner/loss_functions.py +0 -21
- pyerualjetwork_afterburner/loss_functions_cuda.py +0 -21
- pyerualjetwork_afterburner/memory_operations.py +0 -298
- pyerualjetwork_afterburner/metrics.py +0 -190
- pyerualjetwork_afterburner/metrics_cuda.py +0 -163
- pyerualjetwork_afterburner/model_operations.py +0 -408
- pyerualjetwork_afterburner/model_operations_cuda.py +0 -420
- pyerualjetwork_afterburner/parallel.py +0 -118
- pyerualjetwork_afterburner/plan.py +0 -432
- pyerualjetwork_afterburner/plan_cuda.py +0 -441
- pyerualjetwork_afterburner/planeat.py +0 -793
- pyerualjetwork_afterburner/planeat_cuda.py +0 -752
- pyerualjetwork_afterburner/ui.py +0 -22
- pyerualjetwork_afterburner/visualizations.py +0 -823
- pyerualjetwork_afterburner/visualizations_cuda.py +0 -825
- {pyerualjetwork-4.3.8.dev15.dist-info → pyerualjetwork-4.3.9.dist-info}/WHEEL +0 -0
@@ -1,461 +0,0 @@
|
|
1
|
-
from tqdm import tqdm
|
2
|
-
import cupy as cp
|
3
|
-
from colorama import Fore, Style
|
4
|
-
import math
|
5
|
-
import numpy as np
|
6
|
-
|
7
|
-
def encode_one_hot(y_train, y_test=None, summary=False):
|
8
|
-
"""
|
9
|
-
Performs one-hot encoding on y_train and y_test data.
|
10
|
-
|
11
|
-
Args:
|
12
|
-
y_train (cupy.ndarray): Train label data.
|
13
|
-
y_test (cupy.ndarray): Test label data. (optional).
|
14
|
-
summary (bool): If True, prints the class-to-index mapping. Default: False
|
15
|
-
|
16
|
-
Returns:
|
17
|
-
tuple: One-hot encoded y_train and (if given) y_test.
|
18
|
-
"""
|
19
|
-
|
20
|
-
from .memory_operations import optimize_labels, transfer_to_cpu
|
21
|
-
|
22
|
-
y_train = optimize_labels(y_train, one_hot_encoded=False, cuda=True)
|
23
|
-
y_test = optimize_labels(y_test, one_hot_encoded=False, cuda=True)
|
24
|
-
|
25
|
-
y_train = transfer_to_cpu(y_train,dtype=y_train.dtype)
|
26
|
-
y_test = transfer_to_cpu(y_test,dtype=y_test.dtype)
|
27
|
-
|
28
|
-
classes = np.unique(y_train)
|
29
|
-
class_count = len(classes)
|
30
|
-
|
31
|
-
class_to_index = {cls: idx for idx, cls in enumerate(classes)}
|
32
|
-
|
33
|
-
if summary:
|
34
|
-
print("Class-to-index mapping:")
|
35
|
-
for cls, idx in class_to_index.items():
|
36
|
-
print(f" {idx}: {cls}")
|
37
|
-
|
38
|
-
y_train_encoded = np.zeros((y_train.shape[0], class_count), dtype=y_train.dtype)
|
39
|
-
for i, label in enumerate(y_train):
|
40
|
-
y_train_encoded[i, class_to_index[label]] = 1
|
41
|
-
|
42
|
-
if y_test is not None:
|
43
|
-
y_test_encoded = np.zeros((y_test.shape[0], class_count), dtype=y_test.dtype)
|
44
|
-
for i, label in enumerate(y_test):
|
45
|
-
y_test_encoded[i, class_to_index[label]] = 1
|
46
|
-
return cp.array(y_train_encoded, dtype=y_train.dtype), cp.array(y_test_encoded, dtype=y_test.dtype)
|
47
|
-
|
48
|
-
return cp.array(y_train_encoded, dtype=y_train.dtype)
|
49
|
-
|
50
|
-
|
51
|
-
def decode_one_hot(encoded_data):
|
52
|
-
"""
|
53
|
-
Decodes one-hot encoded data to original categorical labels.
|
54
|
-
|
55
|
-
Args:
|
56
|
-
encoded_data (cupy.ndarray): One-hot encoded data with shape (n_samples, n_classes).
|
57
|
-
|
58
|
-
Returns:
|
59
|
-
cupy.ndarray: Decoded categorical labels with shape (n_samples,).
|
60
|
-
"""
|
61
|
-
|
62
|
-
if encoded_data.ndim == 1: return cp.argmax(encoded_data)
|
63
|
-
else: return cp.argmax(encoded_data, axis=1)
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
def split(X, y, test_size, random_state=42, dtype=cp.float32, shuffle_in_cpu=False):
|
68
|
-
"""
|
69
|
-
Splits the given X (features) and y (labels) data into training and testing subsets.
|
70
|
-
|
71
|
-
Args:
|
72
|
-
X (cupy.ndarray): Features data.
|
73
|
-
|
74
|
-
y (cupy.ndarray): Labels data.
|
75
|
-
|
76
|
-
test_size (float or int): Proportion or number of samples for the test subset.
|
77
|
-
|
78
|
-
random_state (int or None): Seed for random state. Default: 42.
|
79
|
-
|
80
|
-
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
81
|
-
|
82
|
-
shuffle_in_cpu (bool): If True, output will be same cpu's split function. Default: False. (Use this for direct comparison of cpu training.)
|
83
|
-
Returns:
|
84
|
-
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
85
|
-
"""
|
86
|
-
from .memory_operations import transfer_to_gpu, optimize_labels
|
87
|
-
|
88
|
-
X = transfer_to_gpu(X, dtype=dtype)
|
89
|
-
y = optimize_labels(y, one_hot_encoded=False, cuda=True)
|
90
|
-
|
91
|
-
num_samples = X.shape[0]
|
92
|
-
|
93
|
-
if isinstance(test_size, float):
|
94
|
-
test_size = int(test_size * num_samples)
|
95
|
-
elif isinstance(test_size, int):
|
96
|
-
if test_size > num_samples:
|
97
|
-
raise ValueError(
|
98
|
-
"test_size cannot be larger than the number of samples.")
|
99
|
-
else:
|
100
|
-
raise ValueError("test_size should be float or int.")
|
101
|
-
|
102
|
-
if shuffle_in_cpu:
|
103
|
-
indices = np.arange(num_samples)
|
104
|
-
np.random.seed(random_state)
|
105
|
-
np.random.shuffle(indices)
|
106
|
-
indices = cp.array(indices)
|
107
|
-
else:
|
108
|
-
if random_state is not None:
|
109
|
-
cp.random.seed(random_state)
|
110
|
-
|
111
|
-
indices = cp.arange(num_samples)
|
112
|
-
cp.random.shuffle(indices)
|
113
|
-
|
114
|
-
test_indices = indices[:test_size]
|
115
|
-
train_indices = indices[test_size:]
|
116
|
-
|
117
|
-
x_train, x_test = X[train_indices], X[test_indices]
|
118
|
-
y_train, y_test = y[train_indices], y[test_indices]
|
119
|
-
del X
|
120
|
-
del y
|
121
|
-
cp.cuda.MemoryPool().free_all_blocks()
|
122
|
-
return x_train, x_test, y_train, y_test
|
123
|
-
|
124
|
-
|
125
|
-
def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32, shuffle_in_cpu=False):
|
126
|
-
"""
|
127
|
-
Generates synthetic examples to balance classes to the specified number of examples per class.
|
128
|
-
|
129
|
-
Arguments:
|
130
|
-
|
131
|
-
x_train -- Input dataset (examples) - cupy array format
|
132
|
-
|
133
|
-
y_train -- Class labels (one-hot encoded) - cupy array format
|
134
|
-
|
135
|
-
target_samples_per_class -- Desired number of samples per class
|
136
|
-
|
137
|
-
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
138
|
-
|
139
|
-
shuffle_in_cpu (bool): If True, output will be same cpu's manuel_balancer function. Default: False. (Use this for direct comparison of cpu training.)
|
140
|
-
|
141
|
-
Returns:
|
142
|
-
x_balanced -- Balanced input dataset (cupy array format)
|
143
|
-
y_balanced -- Balanced class labels (one-hot encoded, cupy array format)
|
144
|
-
"""
|
145
|
-
from .ui import loading_bars
|
146
|
-
from .memory_operations import transfer_to_gpu
|
147
|
-
|
148
|
-
bar_format = loading_bars()[0]
|
149
|
-
x_train = transfer_to_gpu(x_train, dtype=dtype)
|
150
|
-
y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
|
151
|
-
|
152
|
-
classes = cp.arange(y_train.shape[1])
|
153
|
-
class_count = len(classes)
|
154
|
-
|
155
|
-
x_balanced = []
|
156
|
-
y_balanced = []
|
157
|
-
|
158
|
-
for class_label in tqdm(range(class_count),leave=False, ascii="▱▰",
|
159
|
-
bar_format=bar_format,desc='Augmenting Data',ncols= 52):
|
160
|
-
class_indices = cp.where(cp.argmax(y_train, axis=1) == class_label)[0]
|
161
|
-
num_samples = len(class_indices)
|
162
|
-
|
163
|
-
if num_samples > target_samples_per_class:
|
164
|
-
|
165
|
-
if shuffle_in_cpu:
|
166
|
-
selected_indices = np.random.choice(
|
167
|
-
class_indices.get(), target_samples_per_class, replace=False)
|
168
|
-
else:
|
169
|
-
selected_indices = cp.random.choice(class_indices, target_samples_per_class, replace=False)
|
170
|
-
|
171
|
-
x_balanced.append(x_train[selected_indices])
|
172
|
-
y_balanced.append(y_train[selected_indices])
|
173
|
-
|
174
|
-
else:
|
175
|
-
|
176
|
-
x_balanced.append(x_train[class_indices])
|
177
|
-
y_balanced.append(y_train[class_indices])
|
178
|
-
|
179
|
-
if num_samples < target_samples_per_class:
|
180
|
-
|
181
|
-
samples_to_add = target_samples_per_class - num_samples
|
182
|
-
additional_samples = cp.zeros((samples_to_add, x_train.shape[1]), dtype=x_train.dtype)
|
183
|
-
additional_labels = cp.zeros((samples_to_add, y_train.shape[1]), dtype=y_train.dtype)
|
184
|
-
|
185
|
-
for i in range(samples_to_add):
|
186
|
-
|
187
|
-
if shuffle_in_cpu:
|
188
|
-
random_indices = np.random.choice(class_indices.get(), 2, replace=False)
|
189
|
-
else:
|
190
|
-
random_indices = cp.random.choice(class_indices, 2, replace=False)
|
191
|
-
|
192
|
-
sample1 = x_train[random_indices[0]]
|
193
|
-
sample2 = x_train[random_indices[1]]
|
194
|
-
|
195
|
-
if shuffle_in_cpu:
|
196
|
-
synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
|
197
|
-
else:
|
198
|
-
synthetic_sample = sample1 + (sample2 - sample1) * cp.random.rand()
|
199
|
-
|
200
|
-
additional_samples[i] = synthetic_sample
|
201
|
-
additional_labels[i] = y_train[class_indices[0]]
|
202
|
-
|
203
|
-
|
204
|
-
x_balanced.append(additional_samples)
|
205
|
-
y_balanced.append(additional_labels)
|
206
|
-
|
207
|
-
x_balanced = cp.vstack(x_balanced, dtype=x_train.dtype)
|
208
|
-
y_balanced = cp.vstack(y_balanced, dtype=y_train.dtype)
|
209
|
-
|
210
|
-
del x_train, y_train
|
211
|
-
cp.cuda.MemoryPool().free_all_blocks()
|
212
|
-
|
213
|
-
return x_balanced, y_balanced
|
214
|
-
|
215
|
-
|
216
|
-
def auto_balancer(x_train, y_train, dtype=cp.float32, shuffle_in_cpu=False):
|
217
|
-
|
218
|
-
"""
|
219
|
-
Function to balance the training data across different classes.
|
220
|
-
|
221
|
-
Arguments:
|
222
|
-
x_train (list): Input data for training.
|
223
|
-
|
224
|
-
y_train (list): Labels corresponding to the input data. (one-hot encoded)
|
225
|
-
|
226
|
-
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
227
|
-
|
228
|
-
shuffle_in_cpu (bool): If True, output will be same cpu's auto_balancer function. Default: False. (Use this for direct comparison of cpu training.)
|
229
|
-
Returns:
|
230
|
-
tuple: A tuple containing balanced input data and labels.
|
231
|
-
"""
|
232
|
-
from .ui import loading_bars
|
233
|
-
from .memory_operations import transfer_to_gpu
|
234
|
-
|
235
|
-
x_train = transfer_to_gpu(x_train, dtype=dtype)
|
236
|
-
y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
|
237
|
-
|
238
|
-
bar_format = loading_bars()[0]
|
239
|
-
|
240
|
-
classes = cp.arange(y_train.shape[1], dtype=y_train.dtype)
|
241
|
-
class_count = len(classes)
|
242
|
-
|
243
|
-
|
244
|
-
ClassIndices = {i: cp.where(y_train[:, i] == 1)[
|
245
|
-
0] for i in range(class_count)}
|
246
|
-
classes = [len(ClassIndices[i]) for i in range(class_count)]
|
247
|
-
|
248
|
-
if len(set(classes)) == 1:
|
249
|
-
print(Fore.WHITE + "INFO: Data have already balanced. from: auto_balancer" + Style.RESET_ALL)
|
250
|
-
return x_train, y_train
|
251
|
-
|
252
|
-
MinCount = min(classes)
|
253
|
-
|
254
|
-
BalancedIndices = []
|
255
|
-
for i in tqdm(range(class_count),leave=False, ascii="▱▰",
|
256
|
-
bar_format= bar_format, desc='Balancing Data',ncols=70):
|
257
|
-
if len(ClassIndices[i]) > MinCount:
|
258
|
-
if shuffle_in_cpu:
|
259
|
-
SelectedIndices = np.random.choice(
|
260
|
-
ClassIndices[i].get(), MinCount, replace=False)
|
261
|
-
else:
|
262
|
-
SelectedIndices = cp.random.choice(
|
263
|
-
ClassIndices[i], MinCount, replace=False)
|
264
|
-
else:
|
265
|
-
SelectedIndices = ClassIndices[i]
|
266
|
-
BalancedIndices.extend(SelectedIndices)
|
267
|
-
|
268
|
-
BalancedInputs = [x_train[idx] for idx in BalancedIndices]
|
269
|
-
BalancedLabels = [y_train[idx] for idx in BalancedIndices]
|
270
|
-
|
271
|
-
if shuffle_in_cpu:
|
272
|
-
permutation = np.random.permutation(len(BalancedInputs))
|
273
|
-
else:
|
274
|
-
permutation = cp.random.permutation(len(BalancedInputs))
|
275
|
-
|
276
|
-
BalancedInputs = cp.array(BalancedInputs)[permutation]
|
277
|
-
BalancedLabels = cp.array(BalancedLabels)[permutation]
|
278
|
-
|
279
|
-
print(Fore.GREEN + "Data Succesfully Balanced from: " + str(len(x_train)
|
280
|
-
) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
|
281
|
-
del x_train, y_train
|
282
|
-
cp.cuda.MemoryPool().free_all_blocks()
|
283
|
-
|
284
|
-
return BalancedInputs, BalancedLabels
|
285
|
-
|
286
|
-
|
287
|
-
def synthetic_augmentation(x_train, y_train, dtype=cp.float32, shuffle_in_cpu=False):
|
288
|
-
"""
|
289
|
-
Generates synthetic examples to balance classes with fewer examples using CuPy.
|
290
|
-
Arguments:
|
291
|
-
|
292
|
-
x_train -- Input dataset (examples) - cupy array format
|
293
|
-
|
294
|
-
y_train -- Class labels (one-hot encoded) - cupy array format
|
295
|
-
|
296
|
-
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
297
|
-
|
298
|
-
shuffle_in_cpu (bool): If True, output will be same cpu's synthetic_augmentation function. Default: False. (Use this for direct comparison of cpu training.)
|
299
|
-
|
300
|
-
Returns:
|
301
|
-
x_train_balanced -- Balanced input dataset (cupy array format)
|
302
|
-
y_train_balanced -- Balanced class labels (one-hot encoded, cupy array format)
|
303
|
-
"""
|
304
|
-
from .ui import loading_bars
|
305
|
-
from .memory_operations import transfer_to_gpu
|
306
|
-
|
307
|
-
x = transfer_to_gpu(x_train, dtype=dtype)
|
308
|
-
y = transfer_to_gpu(y_train, dtype=y_train.dtype)
|
309
|
-
|
310
|
-
bar_format = loading_bars()[0]
|
311
|
-
|
312
|
-
classes = cp.arange(y_train.shape[1])
|
313
|
-
class_count = len(classes)
|
314
|
-
class_distribution = {i: 0 for i in range(class_count)}
|
315
|
-
|
316
|
-
for label in y:
|
317
|
-
class_distribution[cp.argmax(label).item()] += 1
|
318
|
-
|
319
|
-
max_class_count = max(class_distribution.values())
|
320
|
-
x_balanced = list(x)
|
321
|
-
y_balanced = list(y)
|
322
|
-
|
323
|
-
for class_label in tqdm(range(class_count), leave=False, ascii="▱▰",
|
324
|
-
bar_format=bar_format, desc='Augmenting Data', ncols=52):
|
325
|
-
class_indices = [i for i, label in enumerate(y) if cp.argmax(label) == class_label]
|
326
|
-
num_samples = len(class_indices)
|
327
|
-
|
328
|
-
if num_samples < max_class_count:
|
329
|
-
while num_samples < max_class_count:
|
330
|
-
if shuffle_in_cpu:
|
331
|
-
random_indices = np.random.choice(
|
332
|
-
class_indices, 2, replace=False)
|
333
|
-
else:
|
334
|
-
random_indices = cp.random.choice(
|
335
|
-
cp.array(class_indices), 2, replace=False)
|
336
|
-
sample1 = x[random_indices[0]]
|
337
|
-
sample2 = x[random_indices[1]]
|
338
|
-
|
339
|
-
if shuffle_in_cpu:
|
340
|
-
synthetic_sample = sample1 + \
|
341
|
-
(sample2 - sample1) * np.random.rand()
|
342
|
-
else:
|
343
|
-
synthetic_sample = sample1 + \
|
344
|
-
(sample2 - sample1) * cp.random.rand()
|
345
|
-
|
346
|
-
x_balanced.append(synthetic_sample)
|
347
|
-
y_balanced.append(y[class_indices[0]])
|
348
|
-
num_samples += 1
|
349
|
-
|
350
|
-
x_balanced = cp.array(x_balanced)
|
351
|
-
y_balanced = cp.array(y_balanced)
|
352
|
-
|
353
|
-
del x_train, y_train, x, y
|
354
|
-
cp.cuda.MemoryPool().free_all_blocks()
|
355
|
-
|
356
|
-
return x_balanced, y_balanced
|
357
|
-
|
358
|
-
def standard_scaler(x_train=None, x_test=None, scaler_params=None, dtype=cp.float32):
|
359
|
-
"""
|
360
|
-
Standardizes training and test datasets. x_test may be None.
|
361
|
-
|
362
|
-
Args:
|
363
|
-
x_train: cupy.ndarray
|
364
|
-
|
365
|
-
x_test: cupy.ndarray (optional)
|
366
|
-
|
367
|
-
scaler_params (optional for using model)
|
368
|
-
|
369
|
-
dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
370
|
-
|
371
|
-
Returns:
|
372
|
-
list:
|
373
|
-
Scaler parameters: mean and std
|
374
|
-
tuple
|
375
|
-
Standardized training and test datasets
|
376
|
-
"""
|
377
|
-
if x_train is not None and scaler_params is None and x_test is not None:
|
378
|
-
x_train = x_train.astype(dtype, copy=False)
|
379
|
-
x_test = x_test.astype(dtype, copy=False)
|
380
|
-
|
381
|
-
mean = cp.mean(x_train, axis=0)
|
382
|
-
std = cp.std(x_train, axis=0)
|
383
|
-
|
384
|
-
train_data_scaled = (x_train - mean) / std
|
385
|
-
test_data_scaled = (x_test - mean) / std
|
386
|
-
|
387
|
-
train_data_scaled = cp.nan_to_num(train_data_scaled, nan=0)
|
388
|
-
test_data_scaled = cp.nan_to_num(test_data_scaled, nan=0)
|
389
|
-
|
390
|
-
scaler_params = [mean, std]
|
391
|
-
|
392
|
-
return scaler_params, train_data_scaled, test_data_scaled
|
393
|
-
|
394
|
-
if scaler_params is None and x_train is None and x_test is not None:
|
395
|
-
return x_test.astype(dtype, copy=False) # sample data not scaled
|
396
|
-
|
397
|
-
if scaler_params is not None:
|
398
|
-
x_test = x_test.astype(dtype, copy=False)
|
399
|
-
scaled_data = (x_test - scaler_params[0]) / scaler_params[1]
|
400
|
-
scaled_data = cp.nan_to_num(scaled_data, nan=0)
|
401
|
-
|
402
|
-
return scaled_data # sample data scaled
|
403
|
-
|
404
|
-
|
405
|
-
def normalization(
|
406
|
-
Input, # num: Input data to be normalized.
|
407
|
-
dtype=cp.float32
|
408
|
-
):
|
409
|
-
"""
|
410
|
-
Normalizes the input data using maximum absolute scaling.
|
411
|
-
|
412
|
-
Args:
|
413
|
-
Input (num): Input data to be normalized.
|
414
|
-
|
415
|
-
dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
416
|
-
|
417
|
-
Returns:
|
418
|
-
(num) Scaled input data after normalization.
|
419
|
-
"""
|
420
|
-
|
421
|
-
MaxAbs = cp.max(cp.abs(Input.astype(dtype, copy=False)))
|
422
|
-
return (Input / MaxAbs)
|
423
|
-
|
424
|
-
def find_closest_factors(a):
|
425
|
-
|
426
|
-
root = int(math.sqrt(a))
|
427
|
-
|
428
|
-
for i in range(root, 0, -1):
|
429
|
-
if a % i == 0:
|
430
|
-
j = a // i
|
431
|
-
return i, j
|
432
|
-
|
433
|
-
def batcher(x_test, y_test, batch_size=1):
|
434
|
-
|
435
|
-
if batch_size == 1:
|
436
|
-
return x_test, y_test
|
437
|
-
|
438
|
-
y_labels = cp.argmax(y_test, axis=1)
|
439
|
-
|
440
|
-
unique_labels = cp.unique(y_labels)
|
441
|
-
total_samples = sum(
|
442
|
-
int(cp.sum(y_labels == class_label) * batch_size) for class_label in unique_labels
|
443
|
-
)
|
444
|
-
|
445
|
-
sampled_x = cp.empty((total_samples, x_test.shape[1]), dtype=x_test.dtype)
|
446
|
-
sampled_y = cp.empty((total_samples, y_test.shape[1]), dtype=y_test.dtype)
|
447
|
-
|
448
|
-
offset = 0
|
449
|
-
for class_label in unique_labels:
|
450
|
-
class_indices = cp.where(y_labels == class_label)[0]
|
451
|
-
|
452
|
-
num_samples = int(len(class_indices) * batch_size)
|
453
|
-
|
454
|
-
sampled_indices = cp.random.choice(class_indices, num_samples, replace=False)
|
455
|
-
|
456
|
-
sampled_x[offset:offset + num_samples] = x_test[sampled_indices]
|
457
|
-
sampled_y[offset:offset + num_samples] = y_test[sampled_indices]
|
458
|
-
|
459
|
-
offset += num_samples
|
460
|
-
|
461
|
-
return sampled_x, sampled_y
|
@@ -1,17 +0,0 @@
|
|
1
|
-
from activation_functions import all_activations
|
2
|
-
|
3
|
-
|
4
|
-
def activation_potentiation():
|
5
|
-
|
6
|
-
activations_list = all_activations()
|
7
|
-
|
8
|
-
print('All available activations: ', activations_list, "\n\nYOU CAN COMBINE EVERY ACTIVATION. EXAMPLE: ['linear', 'tanh'] or ['waveakt', 'linear', 'sine'].")
|
9
|
-
|
10
|
-
return activations_list
|
11
|
-
|
12
|
-
def docs_and_examples():
|
13
|
-
|
14
|
-
print('PLAN document: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_PLAN\n')
|
15
|
-
print('PLAN examples: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_Anaplan/ExampleCodes\n')
|
16
|
-
print('PLANEAT examples: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_Anaplan/ExampleCodes/PLANEAT\n')
|
17
|
-
print('Anaplan document and examples: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_Anaplan')
|
@@ -1,21 +0,0 @@
|
|
1
|
-
|
2
|
-
import numpy as np
|
3
|
-
|
4
|
-
def categorical_crossentropy(y_true_batch, y_pred_batch):
|
5
|
-
epsilon = 1e-7
|
6
|
-
y_pred_batch = np.clip(y_pred_batch, epsilon, 1. - epsilon)
|
7
|
-
|
8
|
-
losses = -np.sum(y_true_batch * np.log(y_pred_batch), axis=1)
|
9
|
-
|
10
|
-
mean_loss = np.mean(losses)
|
11
|
-
return mean_loss
|
12
|
-
|
13
|
-
|
14
|
-
def binary_crossentropy(y_true_batch, y_pred_batch):
|
15
|
-
epsilon = 1e-7
|
16
|
-
y_pred_batch = np.clip(y_pred_batch, epsilon, 1. - epsilon)
|
17
|
-
|
18
|
-
losses = -np.mean(y_true_batch * np.log(y_pred_batch) + (1 - y_true_batch) * np.log(1 - y_pred_batch), axis=1)
|
19
|
-
|
20
|
-
mean_loss = np.mean(losses)
|
21
|
-
return mean_loss
|
@@ -1,21 +0,0 @@
|
|
1
|
-
|
2
|
-
import cupy as cp
|
3
|
-
|
4
|
-
def categorical_crossentropy(y_true_batch, y_pred_batch):
|
5
|
-
epsilon = 1e-7
|
6
|
-
y_pred_batch = cp.clip(y_pred_batch, epsilon, 1. - epsilon)
|
7
|
-
|
8
|
-
losses = -cp.sum(y_true_batch * cp.log(y_pred_batch), axis=1)
|
9
|
-
|
10
|
-
mean_loss = cp.mean(losses)
|
11
|
-
return mean_loss
|
12
|
-
|
13
|
-
|
14
|
-
def binary_crossentropy(y_true_batch, y_pred_batch):
|
15
|
-
epsilon = 1e-7
|
16
|
-
y_pred_batch = cp.clip(y_pred_batch, epsilon, 1. - epsilon)
|
17
|
-
|
18
|
-
losses = -cp.mean(y_true_batch * cp.log(y_pred_batch) + (1 - y_true_batch) * cp.log(1 - y_pred_batch), axis=1)
|
19
|
-
|
20
|
-
mean_loss = cp.mean(losses)
|
21
|
-
return mean_loss
|