pyerualjetwork 4.1.0__py3-none-any.whl → 4.1.2b0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/activation_functions_cuda.py +2 -2
- pyerualjetwork/data_operations_cuda.py +93 -46
- pyerualjetwork/model_operations_cuda.py +4 -2
- pyerualjetwork/plan.py +1 -1
- pyerualjetwork/plan_cuda.py +5 -18
- pyerualjetwork/planeat.py +4 -4
- pyerualjetwork/planeat_cuda.py +4 -4
- {pyerualjetwork-4.1.0.dist-info → pyerualjetwork-4.1.2b0.dist-info}/METADATA +1 -1
- pyerualjetwork-4.1.2b0.dist-info/RECORD +23 -0
- pyerualjetwork-4.1.0.dist-info/RECORD +0 -23
- {pyerualjetwork-4.1.0.dist-info → pyerualjetwork-4.1.2b0.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.1.0.dist-info → pyerualjetwork-4.1.2b0.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -47,7 +47,7 @@ for package_name in package_names:
|
|
47
47
|
|
48
48
|
print(f"PyerualJetwork is ready to use with {err} errors")
|
49
49
|
|
50
|
-
__version__ = "4.1.
|
50
|
+
__version__ = "4.1.2b0"
|
51
51
|
__update__ = "* Note: CUDA modules need cupy. Enter this command in your terminal: 'pip install cupy-cuda12x' or your cuda version.\n* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
|
52
52
|
|
53
53
|
def print_version(__version__):
|
@@ -18,9 +18,9 @@ def spiral_activation(x):
|
|
18
18
|
|
19
19
|
spiral_x = r * cp.cos(theta + r)
|
20
20
|
spiral_y = r * cp.sin(theta + r)
|
21
|
-
|
22
21
|
|
23
|
-
|
22
|
+
|
23
|
+
spiral_output = cp.concatenate([cp.array([spiral_x[0]]), spiral_y])
|
24
24
|
|
25
25
|
return spiral_output
|
26
26
|
|
@@ -81,17 +81,22 @@ def decode_one_hot(encoded_data):
|
|
81
81
|
return decoded_labels
|
82
82
|
|
83
83
|
|
84
|
-
def split(X, y, test_size, random_state, dtype=cp.float32):
|
84
|
+
def split(X, y, test_size, random_state=42, dtype=cp.float32, use_cpu=False):
|
85
85
|
"""
|
86
86
|
Splits the given X (features) and y (labels) data into training and testing subsets.
|
87
87
|
|
88
88
|
Args:
|
89
89
|
X (cupy.ndarray): Features data.
|
90
|
+
|
90
91
|
y (cupy.ndarray): Labels data.
|
92
|
+
|
91
93
|
test_size (float or int): Proportion or number of samples for the test subset.
|
92
|
-
|
94
|
+
|
95
|
+
random_state (int or None): Seed for random state. Default: 42.
|
96
|
+
|
93
97
|
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
94
|
-
|
98
|
+
|
99
|
+
use_cpu (bool): If True, output will be same cpu's split function. Default: False.
|
95
100
|
Returns:
|
96
101
|
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
97
102
|
"""
|
@@ -118,11 +123,17 @@ def split(X, y, test_size, random_state, dtype=cp.float32):
|
|
118
123
|
else:
|
119
124
|
raise ValueError("test_size should be float or int.")
|
120
125
|
|
121
|
-
if
|
122
|
-
|
126
|
+
if use_cpu:
|
127
|
+
indices = np.arange(num_samples)
|
128
|
+
np.random.seed(random_state)
|
129
|
+
np.random.shuffle(indices)
|
130
|
+
indices = cp.array(indices)
|
131
|
+
else:
|
132
|
+
if random_state is not None:
|
133
|
+
cp.random.seed(random_state)
|
123
134
|
|
124
|
-
|
125
|
-
|
135
|
+
indices = cp.arange(num_samples)
|
136
|
+
cp.random.shuffle(indices)
|
126
137
|
|
127
138
|
test_indices = indices[:test_size]
|
128
139
|
train_indices = indices[test_size:]
|
@@ -133,16 +144,22 @@ def split(X, y, test_size, random_state, dtype=cp.float32):
|
|
133
144
|
return x_train, x_test, y_train, y_test
|
134
145
|
|
135
146
|
|
136
|
-
def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32):
|
147
|
+
def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32, use_cpu=False):
|
137
148
|
"""
|
138
149
|
Generates synthetic examples to balance classes to the specified number of examples per class.
|
139
150
|
|
140
151
|
Arguments:
|
152
|
+
|
141
153
|
x_train -- Input dataset (examples) - cupy array format
|
154
|
+
|
142
155
|
y_train -- Class labels (one-hot encoded) - cupy array format
|
156
|
+
|
143
157
|
target_samples_per_class -- Desired number of samples per class
|
158
|
+
|
144
159
|
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
145
160
|
|
161
|
+
use_cpu (bool): If True, output will be same cpu's manuel_balancer function. Default: False.
|
162
|
+
|
146
163
|
Returns:
|
147
164
|
x_balanced -- Balanced input dataset (cupy array format)
|
148
165
|
y_balanced -- Balanced class labels (one-hot encoded, cupy array format)
|
@@ -176,8 +193,13 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
|
|
176
193
|
num_samples = len(class_indices)
|
177
194
|
|
178
195
|
if num_samples > target_samples_per_class:
|
179
|
-
|
180
|
-
|
196
|
+
|
197
|
+
if use_cpu:
|
198
|
+
selected_indices = np.random.choice(
|
199
|
+
class_indices, target_samples_per_class, replace=False)
|
200
|
+
else:
|
201
|
+
selected_indices = cp.random.choice(class_indices, target_samples_per_class, replace=False)
|
202
|
+
|
181
203
|
x_balanced.append(x_train[selected_indices])
|
182
204
|
y_balanced.append(y_train[selected_indices])
|
183
205
|
|
@@ -193,13 +215,19 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
|
|
193
215
|
additional_labels = cp.zeros((samples_to_add, y_train.shape[1]))
|
194
216
|
|
195
217
|
for i in range(samples_to_add):
|
218
|
+
|
219
|
+
if use_cpu:
|
220
|
+
random_indices = np.random.choice(class_indices.get(), 2, replace=False)
|
221
|
+
else:
|
222
|
+
random_indices = cp.random.choice(class_indices, 2, replace=False)
|
196
223
|
|
197
|
-
random_indices = cp.random.choice(class_indices, 2, replace=False)
|
198
224
|
sample1 = x_train[random_indices[0]]
|
199
225
|
sample2 = x_train[random_indices[1]]
|
200
226
|
|
201
|
-
|
202
|
-
|
227
|
+
if use_cpu:
|
228
|
+
synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
|
229
|
+
else:
|
230
|
+
synthetic_sample = sample1 + (sample2 - sample1) * cp.random.rand()
|
203
231
|
|
204
232
|
additional_samples[i] = synthetic_sample
|
205
233
|
additional_labels[i] = y_train[class_indices[0]]
|
@@ -214,7 +242,7 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
|
|
214
242
|
return x_balanced, y_balanced
|
215
243
|
|
216
244
|
|
217
|
-
def auto_balancer(x_train, y_train, dtype=cp.float32):
|
245
|
+
def auto_balancer(x_train, y_train, dtype=cp.float32, use_cpu=False):
|
218
246
|
|
219
247
|
"""
|
220
248
|
Function to balance the training data across different classes.
|
@@ -226,6 +254,8 @@ def auto_balancer(x_train, y_train, dtype=cp.float32):
|
|
226
254
|
|
227
255
|
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
228
256
|
|
257
|
+
use_cpu (bool): If True, output will be same cpu's auto_balancer function. Default: False.
|
258
|
+
|
229
259
|
Returns:
|
230
260
|
tuple: A tuple containing balanced input data and labels.
|
231
261
|
"""
|
@@ -248,44 +278,50 @@ def auto_balancer(x_train, y_train, dtype=cp.float32):
|
|
248
278
|
classes = cp.arange(y_train.shape[1])
|
249
279
|
class_count = len(classes)
|
250
280
|
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
281
|
+
|
282
|
+
ClassIndices = {i: cp.where(cp.array(y_train)[:, i] == 1)[
|
283
|
+
0] for i in range(class_count)}
|
284
|
+
classes = [len(ClassIndices[i]) for i in range(class_count)]
|
285
|
+
|
286
|
+
if len(set(classes)) == 1:
|
287
|
+
print(Fore.WHITE + "INFO: Data have already balanced. from: auto_balancer" + Style.RESET_ALL)
|
288
|
+
return x_train, y_train
|
289
|
+
|
290
|
+
MinCount = min(classes)
|
291
|
+
|
292
|
+
BalancedIndices = []
|
293
|
+
for i in tqdm(range(class_count),leave=False, ascii="▱▰",
|
294
|
+
bar_format= bar_format, desc='Balancing Data',ncols=70):
|
295
|
+
if len(ClassIndices[i]) > MinCount:
|
296
|
+
if use_cpu:
|
297
|
+
SelectedIndices = np.random.choice(
|
298
|
+
ClassIndices[i].get(), MinCount, replace=False)
|
299
|
+
else:
|
266
300
|
SelectedIndices = cp.random.choice(
|
267
301
|
ClassIndices[i], MinCount, replace=False)
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
302
|
+
else:
|
303
|
+
SelectedIndices = ClassIndices[i]
|
304
|
+
BalancedIndices.extend(SelectedIndices)
|
305
|
+
|
306
|
+
BalancedInputs = [x_train[idx] for idx in BalancedIndices]
|
307
|
+
BalancedLabels = [y_train[idx] for idx in BalancedIndices]
|
274
308
|
|
309
|
+
if use_cpu:
|
310
|
+
permutation = np.random.permutation(len(BalancedInputs))
|
311
|
+
else:
|
275
312
|
permutation = cp.random.permutation(len(BalancedInputs))
|
276
|
-
BalancedInputs = cp.array(BalancedInputs)[permutation]
|
277
|
-
BalancedLabels = cp.array(BalancedLabels)[permutation]
|
278
313
|
|
279
|
-
|
280
|
-
|
281
|
-
except:
|
282
|
-
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters")
|
283
|
-
sys.exit()
|
314
|
+
BalancedInputs = cp.array(BalancedInputs)[permutation]
|
315
|
+
BalancedLabels = cp.array(BalancedLabels)[permutation]
|
284
316
|
|
317
|
+
print(Fore.GREEN + "Data Succesfully Balanced from: " + str(len(x_train)
|
318
|
+
) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
|
319
|
+
|
320
|
+
|
285
321
|
return BalancedInputs, BalancedLabels
|
286
322
|
|
287
323
|
|
288
|
-
def synthetic_augmentation(x_train, y_train, dtype=cp.float32):
|
324
|
+
def synthetic_augmentation(x_train, y_train, dtype=cp.float32, use_cpu=False):
|
289
325
|
"""
|
290
326
|
Generates synthetic examples to balance classes with fewer examples using CuPy.
|
291
327
|
Arguments:
|
@@ -296,6 +332,8 @@ def synthetic_augmentation(x_train, y_train, dtype=cp.float32):
|
|
296
332
|
|
297
333
|
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
298
334
|
|
335
|
+
use_cpu (bool): If True, output will be same cpu's synthetic_augmentation function. Default: False.
|
336
|
+
|
299
337
|
Returns:
|
300
338
|
x_train_balanced -- Balanced input dataset (cupy array format)
|
301
339
|
y_train_balanced -- Balanced class labels (one-hot encoded, cupy array format)
|
@@ -336,13 +374,21 @@ def synthetic_augmentation(x_train, y_train, dtype=cp.float32):
|
|
336
374
|
|
337
375
|
if num_samples < max_class_count:
|
338
376
|
while num_samples < max_class_count:
|
339
|
-
|
377
|
+
if use_cpu:
|
378
|
+
random_indices = np.random.choice(
|
379
|
+
class_indices, 2, replace=False)
|
380
|
+
else:
|
381
|
+
random_indices = cp.random.choice(
|
340
382
|
cp.array(class_indices), 2, replace=False)
|
341
383
|
sample1 = x[random_indices[0]]
|
342
384
|
sample2 = x[random_indices[1]]
|
343
385
|
|
344
|
-
|
345
|
-
|
386
|
+
if use_cpu:
|
387
|
+
synthetic_sample = sample1 + \
|
388
|
+
(sample2 - sample1) * np.random.rand()
|
389
|
+
else:
|
390
|
+
synthetic_sample = sample1 + \
|
391
|
+
(sample2 - sample1) * cp.random.rand()
|
346
392
|
|
347
393
|
x_balanced.append(synthetic_sample)
|
348
394
|
y_balanced.append(y[class_indices[0]])
|
@@ -353,6 +399,7 @@ def synthetic_augmentation(x_train, y_train, dtype=cp.float32):
|
|
353
399
|
|
354
400
|
return x_balanced, y_balanced
|
355
401
|
|
402
|
+
|
356
403
|
def standard_scaler(x_train=None, x_test=None, scaler_params=None, dtype=cp.float32):
|
357
404
|
"""
|
358
405
|
Standardizes training and test datasets. x_test may be None.
|
@@ -56,7 +56,7 @@ def save_model(model_name,
|
|
56
56
|
|
57
57
|
class_count = W.shape[0]
|
58
58
|
|
59
|
-
if test_acc
|
59
|
+
if test_acc is not None:
|
60
60
|
test_acc= float(test_acc)
|
61
61
|
|
62
62
|
if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat' and weights_type != 'pkl':
|
@@ -70,7 +70,6 @@ def save_model(model_name,
|
|
70
70
|
NeuronCount = 0
|
71
71
|
SynapseCount = 0
|
72
72
|
|
73
|
-
|
74
73
|
try:
|
75
74
|
NeuronCount += cp.shape(W)[0] + cp.shape(W)[1]
|
76
75
|
SynapseCount += cp.shape(W)[0] * cp.shape(W)[1]
|
@@ -91,6 +90,9 @@ def save_model(model_name,
|
|
91
90
|
|
92
91
|
scaler_params.append(' ')
|
93
92
|
|
93
|
+
scaler_params[0] = scaler_params[0].get()
|
94
|
+
scaler_params[1] = scaler_params[1].get()
|
95
|
+
|
94
96
|
data = {'MODEL NAME': model_name,
|
95
97
|
'MODEL TYPE': model_type,
|
96
98
|
'CLASS COUNT': class_count,
|
pyerualjetwork/plan.py
CHANGED
@@ -145,7 +145,7 @@ def fit(
|
|
145
145
|
|
146
146
|
# Training process
|
147
147
|
for index, inp in enumerate(x_train):
|
148
|
-
inp = np.array(inp, copy=False
|
148
|
+
inp = np.array(inp, copy=False).ravel()
|
149
149
|
y_decoded = decode_one_hot(y_train)
|
150
150
|
# Weight updates
|
151
151
|
STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded[index], activation_potentiation=activation_potentiation, LTD=LTD)
|
pyerualjetwork/plan_cuda.py
CHANGED
@@ -120,22 +120,9 @@ def fit(
|
|
120
120
|
if len(x_train) != len(y_train):
|
121
121
|
raise ValueError("x_train and y_train must have the same length.")
|
122
122
|
|
123
|
-
if val and (x_val is None
|
123
|
+
if val and (x_val is None or y_val is None):
|
124
124
|
x_val, y_val = x_train, y_train
|
125
125
|
|
126
|
-
elif val and (x_val is not None and y_val is not None):
|
127
|
-
x_val = cp.array(x_val, copy=False).astype(dtype, copy=False)
|
128
|
-
|
129
|
-
if len(y_val[0]) < 256:
|
130
|
-
if y_val.dtype != cp.uint8:
|
131
|
-
y_val = cp.array(y_val, copy=False).astype(cp.uint8, copy=False)
|
132
|
-
elif len(y_val[0]) <= 32767:
|
133
|
-
if y_val.dtype != cp.uint16:
|
134
|
-
y_val = cp.array(y_val, copy=False).astype(cp.uint16, copy=False)
|
135
|
-
else:
|
136
|
-
if y_val.dtype != cp.uint32:
|
137
|
-
y_val = cp.array(y_val, copy=False).astype(cp.uint32, copy=False)
|
138
|
-
|
139
126
|
val_list = [] if val else None
|
140
127
|
val_count = val_count or 10
|
141
128
|
# Defining weights
|
@@ -146,7 +133,7 @@ def fit(
|
|
146
133
|
|
147
134
|
# Training process
|
148
135
|
for index, inp in enumerate(x_train):
|
149
|
-
inp = cp.array(inp).ravel()
|
136
|
+
inp = cp.array(inp, copy=False).ravel()
|
150
137
|
y_decoded = decode_one_hot(y_train)
|
151
138
|
# Weight updates
|
152
139
|
STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded[index], activation_potentiation=activation_potentiation, LTD=LTD)
|
@@ -232,7 +219,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
232
219
|
tuple: A list for model parameters: [Weight matrix, Preds, Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
|
233
220
|
|
234
221
|
"""
|
235
|
-
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'
|
222
|
+
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
|
236
223
|
|
237
224
|
activation_potentiation = all_activations()
|
238
225
|
|
@@ -297,9 +284,9 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
|
|
297
284
|
|
298
285
|
# Initialize progress bar
|
299
286
|
if batch_size == 1:
|
300
|
-
ncols =
|
287
|
+
ncols = 100
|
301
288
|
else:
|
302
|
-
ncols =
|
289
|
+
ncols = 140
|
303
290
|
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
304
291
|
|
305
292
|
# Initialize variables
|
pyerualjetwork/planeat.py
CHANGED
@@ -15,10 +15,10 @@ import random
|
|
15
15
|
from tqdm import tqdm
|
16
16
|
|
17
17
|
### LIBRARY IMPORTS ###
|
18
|
-
from plan import feed_forward
|
19
|
-
from data_operations import normalization
|
20
|
-
from ui import loading_bars
|
21
|
-
from activation_functions import apply_activation, all_activations
|
18
|
+
from .plan import feed_forward
|
19
|
+
from .data_operations import normalization
|
20
|
+
from .ui import loading_bars
|
21
|
+
from .activation_functions import apply_activation, all_activations
|
22
22
|
|
23
23
|
def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
|
24
24
|
"""
|
pyerualjetwork/planeat_cuda.py
CHANGED
@@ -16,10 +16,10 @@ import random
|
|
16
16
|
from tqdm import tqdm
|
17
17
|
|
18
18
|
### LIBRARY IMPORTS ###
|
19
|
-
from plan_cuda import feed_forward
|
20
|
-
from data_operations_cuda import normalization
|
21
|
-
from ui import loading_bars
|
22
|
-
from activation_functions_cuda import apply_activation, all_activations
|
19
|
+
from .plan_cuda import feed_forward
|
20
|
+
from .data_operations_cuda import normalization
|
21
|
+
from .ui import loading_bars
|
22
|
+
from .activation_functions_cuda import apply_activation, all_activations
|
23
23
|
|
24
24
|
def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32):
|
25
25
|
"""
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.1.
|
3
|
+
Version: 4.1.2b0
|
4
4
|
Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -0,0 +1,23 @@
|
|
1
|
+
pyerualjetwork/__init__.py,sha256=UyANALO6S5ruGG5ZiMuQ0cO8GZD1HZWENAPLkMsl0aE,2544
|
2
|
+
pyerualjetwork/activation_functions.py,sha256=UeuuagJWcSoFfmwikDU7O8ph--oySnWDJNqKbEh4SlE,12043
|
3
|
+
pyerualjetwork/activation_functions_cuda.py,sha256=6pdCthpiGmnMwRfuduGsvwN8cNv3UKNUPOjscxyAWqE,12075
|
4
|
+
pyerualjetwork/data_operations.py,sha256=rnOYLLK3YnRdWpEsEQABU0RE950lQQI7971eBLBpqOQ,16536
|
5
|
+
pyerualjetwork/data_operations_cuda.py,sha256=FBcZloHAyzuJnF2L1yJY1PS-3VJ8zlq5pgfxr8z0oKc,18768
|
6
|
+
pyerualjetwork/help.py,sha256=pZs7hIhgFkovGLle97d9Qu9m5zKhMh7-OAIphIoSxBg,830
|
7
|
+
pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
8
|
+
pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
9
|
+
pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
|
10
|
+
pyerualjetwork/metrics_cuda.py,sha256=TCwn5Z_4jQjqPCURX_xtcz9cjsYVzlahgKDA-qCgpU4,5072
|
11
|
+
pyerualjetwork/model_operations.py,sha256=k_53BJladPm9fBWdlVpS6Uf5IQzpNlJWLH746DXGq_M,13036
|
12
|
+
pyerualjetwork/model_operations_cuda.py,sha256=-_klhwLo3z3bLIm5LXjgXGW657R203ty-Po-5wDuJBM,13244
|
13
|
+
pyerualjetwork/plan.py,sha256=-3v0PNNlxL1gx8CTMdvD7HgtJSZt8lEPh7hceIILdDk,34743
|
14
|
+
pyerualjetwork/plan_cuda.py,sha256=bpI4HVMexL5WiGU30Nj1mzp8f9sOyxuDw7Ka7LqQR7g,33958
|
15
|
+
pyerualjetwork/planeat.py,sha256=6uEcCF4bV1_W1aQUTKQjfnDgWp6rP2oluKFo5Y37k7o,39517
|
16
|
+
pyerualjetwork/planeat_cuda.py,sha256=GXYt_00rDKkDKJrhjE8hHOtu4U_pQZM1yZ6XrMpQo2c,39574
|
17
|
+
pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
|
18
|
+
pyerualjetwork/visualizations.py,sha256=DvbiQGlvlKNAgBJ3O3ukAi6uxSheha9SRFh5YX7ZxIA,26678
|
19
|
+
pyerualjetwork/visualizations_cuda.py,sha256=dA0u85ZIyKqjtoSJ6p3EbEpJs4V4vS5W5ftR6eif8yg,26713
|
20
|
+
pyerualjetwork-4.1.2b0.dist-info/METADATA,sha256=GRbK73NLWrSsX5io1gp1ZZCMjuqr7ofhPjED0z0ZijI,6359
|
21
|
+
pyerualjetwork-4.1.2b0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
22
|
+
pyerualjetwork-4.1.2b0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
23
|
+
pyerualjetwork-4.1.2b0.dist-info/RECORD,,
|
@@ -1,23 +0,0 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=buQzAGP2zwBt10ji65TzcupjWYX70rSdlkPzRhmnlDk,2542
|
2
|
-
pyerualjetwork/activation_functions.py,sha256=UeuuagJWcSoFfmwikDU7O8ph--oySnWDJNqKbEh4SlE,12043
|
3
|
-
pyerualjetwork/activation_functions_cuda.py,sha256=5F49gKkiRngo0hAaS1KfarxQ7wEyub13WAX_apxf8j8,12069
|
4
|
-
pyerualjetwork/data_operations.py,sha256=rnOYLLK3YnRdWpEsEQABU0RE950lQQI7971eBLBpqOQ,16536
|
5
|
-
pyerualjetwork/data_operations_cuda.py,sha256=8jooTsRCC-pEYvtw8c6CsfUUnztDy8DI8-yLf9aX27A,17108
|
6
|
-
pyerualjetwork/help.py,sha256=pZs7hIhgFkovGLle97d9Qu9m5zKhMh7-OAIphIoSxBg,830
|
7
|
-
pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
|
8
|
-
pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
|
9
|
-
pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
|
10
|
-
pyerualjetwork/metrics_cuda.py,sha256=TCwn5Z_4jQjqPCURX_xtcz9cjsYVzlahgKDA-qCgpU4,5072
|
11
|
-
pyerualjetwork/model_operations.py,sha256=k_53BJladPm9fBWdlVpS6Uf5IQzpNlJWLH746DXGq_M,13036
|
12
|
-
pyerualjetwork/model_operations_cuda.py,sha256=Guo0lFaaLiAXwKmnOi8Fz_bL_p38qR46CIhGOg_V1Sw,13138
|
13
|
-
pyerualjetwork/plan.py,sha256=eHMYN-uzpzdwFnSsSuREOkG4vJdvoHZnRzJUQlcpBrc,34756
|
14
|
-
pyerualjetwork/plan_cuda.py,sha256=y2TWyUUeyT7r04qxcRbCc42XfakPlMNG1BHSPK0afP4,34551
|
15
|
-
pyerualjetwork/planeat.py,sha256=8cwWboJtXgFTKq6nFl1T9McbLDmBquKUr12y168PmcM,39513
|
16
|
-
pyerualjetwork/planeat_cuda.py,sha256=boN-HFwm_D9cT1z0eAR8zgkiD_XOg-J2T2jNFvZweG4,39570
|
17
|
-
pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
|
18
|
-
pyerualjetwork/visualizations.py,sha256=DvbiQGlvlKNAgBJ3O3ukAi6uxSheha9SRFh5YX7ZxIA,26678
|
19
|
-
pyerualjetwork/visualizations_cuda.py,sha256=dA0u85ZIyKqjtoSJ6p3EbEpJs4V4vS5W5ftR6eif8yg,26713
|
20
|
-
pyerualjetwork-4.1.0.dist-info/METADATA,sha256=StjT-bsNr5C_PsyNauqHbCF4ZaL0JLNGbERMnmGF4lQ,6357
|
21
|
-
pyerualjetwork-4.1.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
22
|
-
pyerualjetwork-4.1.0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
23
|
-
pyerualjetwork-4.1.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|