pyerualjetwork 4.8__py3-none-any.whl → 5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +41 -1
- pyerualjetwork/data_operations.py +31 -1
- pyerualjetwork/data_operations_cuda.py +30 -0
- pyerualjetwork/{planeat.py → ene.py} +48 -31
- pyerualjetwork/{planeat_cuda.py → ene_cuda.py} +45 -28
- pyerualjetwork/help.py +2 -2
- pyerualjetwork/memory_operations.py +26 -0
- pyerualjetwork/model_operations.py +83 -38
- pyerualjetwork/model_operations_cuda.py +79 -33
- pyerualjetwork/{plan.py → neu.py} +67 -52
- pyerualjetwork/{plan_cuda.py → neu_cuda.py} +74 -59
- pyerualjetwork/visualizations.py +10 -10
- pyerualjetwork/visualizations_cuda.py +10 -10
- {pyerualjetwork-4.8.dist-info → pyerualjetwork-5.dist-info}/METADATA +19 -16
- pyerualjetwork-5.dist-info/RECORD +25 -0
- pyerualjetwork-4.8.dist-info/RECORD +0 -25
- {pyerualjetwork-4.8.dist-info → pyerualjetwork-5.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.8.dist-info → pyerualjetwork-5.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,45 @@
|
|
1
|
+
"""
|
2
|
+
|
3
|
+
|
4
|
+
Model Operations
|
5
|
+
================
|
6
|
+
This module hosts functions for handling all operational processes related to models, including:
|
7
|
+
|
8
|
+
- Saving and loading models
|
9
|
+
- Making predictions from memory
|
10
|
+
- Making predictions from storage
|
11
|
+
- Retrieving model weights
|
12
|
+
- Retrieving model activation functions
|
13
|
+
- Retrieving model accuracy
|
14
|
+
- Running the model in reverse (applicable to PLAN models)
|
15
|
+
|
16
|
+
Module functions:
|
17
|
+
-----------------
|
18
|
+
- save_model()
|
19
|
+
- load_model()
|
20
|
+
- predict_from_storage()
|
21
|
+
- predict_from_memory()
|
22
|
+
- reverse_predict_from_storage()
|
23
|
+
- reverse_predict_from_memory()
|
24
|
+
- get_weights()
|
25
|
+
- get_act()
|
26
|
+
- get_preds()
|
27
|
+
- get_preds_softmax()
|
28
|
+
- get_acc()
|
29
|
+
- get_scaler()
|
30
|
+
- get_model_type()
|
31
|
+
|
32
|
+
Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
|
33
|
+
|
34
|
+
PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
35
|
+
|
36
|
+
- Author: Hasan Can Beydili
|
37
|
+
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
38
|
+
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
39
|
+
- Instagram: https://www.instagram.com/canbeydilj
|
40
|
+
- Contact: tchasancan@gmail.com
|
41
|
+
"""
|
42
|
+
|
1
43
|
import numpy as np
|
2
44
|
from colorama import Fore, Style
|
3
45
|
import sys
|
@@ -14,7 +56,7 @@ def save_model(model_name,
|
|
14
56
|
scaler_params=None,
|
15
57
|
test_acc=None,
|
16
58
|
model_path='',
|
17
|
-
|
59
|
+
activations=['linear'],
|
18
60
|
weights_type='npy',
|
19
61
|
weights_format='raw',
|
20
62
|
show_architecture=False,
|
@@ -36,7 +78,7 @@ def save_model(model_name,
|
|
36
78
|
|
37
79
|
model_path: (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
|
38
80
|
|
39
|
-
|
81
|
+
activations: (list[str]): For deeper PLAN networks, activation function parameters. Or activation function parameters for MLP layers. For more information please run this code: neu.activations_list() default: ['linear']
|
40
82
|
|
41
83
|
weights_type: (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
|
42
84
|
|
@@ -55,12 +97,12 @@ def save_model(model_name,
|
|
55
97
|
if model_type != 'PLAN' and model_type != 'MLP':
|
56
98
|
raise ValueError("model_type parameter must be 'PLAN' or 'MLP'.")
|
57
99
|
|
58
|
-
if isinstance(
|
59
|
-
|
100
|
+
if isinstance(activations, str):
|
101
|
+
activations = [activations]
|
60
102
|
else:
|
61
|
-
|
103
|
+
activations = [item if isinstance(item, list) else [item] for item in activations]
|
62
104
|
|
63
|
-
activation =
|
105
|
+
activation = activations.copy()
|
64
106
|
|
65
107
|
if test_acc != None:
|
66
108
|
test_acc= float(test_acc)
|
@@ -145,7 +187,7 @@ def save_model(model_name,
|
|
145
187
|
'WEIGHTS FORMAT': weights_format,
|
146
188
|
'MODEL PATH': model_path,
|
147
189
|
'STANDARD SCALER': scaler_params,
|
148
|
-
'ACTIVATION
|
190
|
+
'ACTIVATION FUNCTIONS': activation
|
149
191
|
}
|
150
192
|
|
151
193
|
df = pd.DataFrame(data)
|
@@ -242,9 +284,13 @@ def load_model(model_name,
|
|
242
284
|
|
243
285
|
sys.exit()
|
244
286
|
|
245
|
-
|
246
|
-
|
247
|
-
|
287
|
+
try:
|
288
|
+
activations = list(df['ACTIVATION FUNCTIONS']) # for PyerualJetwork >=5 Versions.
|
289
|
+
except KeyError:
|
290
|
+
activations = list(df['ACTIVATION POTENTIATION']) # for PyerualJetwork <5 Versions.
|
291
|
+
|
292
|
+
activations = [x for x in activations if not (isinstance(x, float) and np.isnan(x))]
|
293
|
+
activations = [item for item in activations if item != '']
|
248
294
|
|
249
295
|
scaler_params = df['STANDARD SCALER'].tolist()
|
250
296
|
|
@@ -280,14 +326,14 @@ def load_model(model_name,
|
|
280
326
|
if WeightType == 'mat':
|
281
327
|
W = W['w']
|
282
328
|
|
283
|
-
return W, None, None,
|
329
|
+
return W, None, None, activations, scaler_params, None, model_type
|
284
330
|
|
285
331
|
|
286
332
|
|
287
|
-
def
|
333
|
+
def predict_from_storage(Input, model_name, model_path=''):
|
288
334
|
|
289
335
|
"""
|
290
|
-
Function to make a prediction
|
336
|
+
Function to make a prediction
|
291
337
|
from storage
|
292
338
|
|
293
339
|
Args:
|
@@ -296,8 +342,7 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
|
|
296
342
|
model_name (str): Name of the model.
|
297
343
|
|
298
344
|
model_path (str): Path of the model. Default: ''
|
299
|
-
|
300
|
-
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64, np.float16. (optional)
|
345
|
+
|
301
346
|
Returns:
|
302
347
|
ndarray: Output from the model.
|
303
348
|
"""
|
@@ -309,15 +354,15 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
|
|
309
354
|
|
310
355
|
model = load_model(model_name, model_path)
|
311
356
|
|
312
|
-
|
357
|
+
activations = model[get_act()]
|
313
358
|
scaler_params = model[get_scaler()]
|
314
359
|
W = model[get_weights()]
|
315
360
|
model_type = model[get_model_type()]
|
316
361
|
|
317
|
-
if isinstance(
|
318
|
-
|
319
|
-
elif isinstance(
|
320
|
-
|
362
|
+
if isinstance(activations, str):
|
363
|
+
activations = [activations]
|
364
|
+
elif isinstance(activations, list):
|
365
|
+
activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
|
321
366
|
|
322
367
|
Input = standard_scaler(None, Input, scaler_params)
|
323
368
|
|
@@ -325,24 +370,24 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
|
|
325
370
|
|
326
371
|
layer = Input
|
327
372
|
for i in range(len(W)):
|
328
|
-
if i != len(W) - 1 and i != 0: layer = apply_activation(layer,
|
373
|
+
if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
|
329
374
|
layer = layer @ W[i].T
|
330
375
|
|
331
376
|
return layer
|
332
377
|
|
333
378
|
else:
|
334
379
|
|
335
|
-
Input = apply_activation(Input,
|
380
|
+
Input = apply_activation(Input, activations)
|
336
381
|
result = Input @ W.T
|
337
382
|
|
338
383
|
return result
|
339
384
|
|
340
385
|
except:
|
341
|
-
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from:
|
386
|
+
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: predict_model_storage." + Style.RESET_ALL)
|
342
387
|
sys.exit()
|
343
388
|
|
344
389
|
|
345
|
-
def
|
390
|
+
def reverse_predict_from_storage(output, model_name, model_path=''):
|
346
391
|
|
347
392
|
"""
|
348
393
|
reverse prediction function from storage
|
@@ -366,15 +411,15 @@ def reverse_predict_model_ssd(output, model_name, model_path=''):
|
|
366
411
|
Input = W.T @ output
|
367
412
|
return Input
|
368
413
|
except:
|
369
|
-
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from:
|
414
|
+
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: reverse_predict_model_storage." + Style.RESET_ALL)
|
370
415
|
sys.exit()
|
371
416
|
|
372
417
|
|
373
418
|
|
374
|
-
def
|
419
|
+
def predict_from_memory(Input, W, scaler_params=None, activations=['linear'], is_mlp=False):
|
375
420
|
|
376
421
|
"""
|
377
|
-
Function to make a prediction
|
422
|
+
Function to make a prediction.
|
378
423
|
from memory.
|
379
424
|
|
380
425
|
Args:
|
@@ -384,7 +429,7 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
384
429
|
|
385
430
|
scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
|
386
431
|
|
387
|
-
|
432
|
+
activations (list[str]): activation list for deep PLAN or activation list for MLP layers. Default: ['linear']
|
388
433
|
|
389
434
|
is_mlp (bool, optional): Predict from PLAN model or MLP model ? Default: False (PLAN)
|
390
435
|
Returns:
|
@@ -398,32 +443,32 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
398
443
|
|
399
444
|
Input = standard_scaler(None, Input, scaler_params)
|
400
445
|
|
401
|
-
if isinstance(
|
402
|
-
|
403
|
-
elif isinstance(
|
404
|
-
|
446
|
+
if isinstance(activations, str):
|
447
|
+
activations = [activations]
|
448
|
+
elif isinstance(activations, list):
|
449
|
+
activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
|
405
450
|
|
406
451
|
if is_mlp:
|
407
452
|
|
408
453
|
layer = Input
|
409
454
|
for i in range(len(W)):
|
410
|
-
if i != len(W) - 1 and i != 0: layer = apply_activation(layer,
|
455
|
+
if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
|
411
456
|
layer = layer @ W[i].T
|
412
457
|
|
413
458
|
return layer
|
414
459
|
|
415
460
|
else:
|
416
461
|
|
417
|
-
Input = apply_activation(Input,
|
462
|
+
Input = apply_activation(Input, activations)
|
418
463
|
result = Input @ W.T
|
419
464
|
|
420
465
|
return result
|
421
466
|
|
422
467
|
except:
|
423
|
-
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from:
|
468
|
+
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_memory." + Style.RESET_ALL)
|
424
469
|
sys.exit()
|
425
470
|
|
426
|
-
def
|
471
|
+
def reverse_predict_from_memory(output, W):
|
427
472
|
|
428
473
|
"""
|
429
474
|
reverse prediction function from memory
|
@@ -443,7 +488,7 @@ def reverse_predict_model_ram(output, W):
|
|
443
488
|
return Input
|
444
489
|
|
445
490
|
except:
|
446
|
-
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from:
|
491
|
+
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: reverse_predict_model_memory." + Style.RESET_ALL)
|
447
492
|
sys.exit()
|
448
493
|
|
449
494
|
|
@@ -462,7 +507,7 @@ def get_acc():
|
|
462
507
|
return 2
|
463
508
|
|
464
509
|
|
465
|
-
def
|
510
|
+
def get_act():
|
466
511
|
|
467
512
|
return 3
|
468
513
|
|
@@ -1,3 +1,45 @@
|
|
1
|
+
"""
|
2
|
+
|
3
|
+
|
4
|
+
Model Operations on CUDA
|
5
|
+
========================
|
6
|
+
This module hosts functions for handling all operational processes related to models on CUDA GPU, including:
|
7
|
+
|
8
|
+
- Saving and loading models
|
9
|
+
- Making predictions from memory
|
10
|
+
- Making predictions from storage
|
11
|
+
- Retrieving model weights
|
12
|
+
- Retrieving model activation functions
|
13
|
+
- Retrieving model accuracy
|
14
|
+
- Running the model in reverse (applicable to PLAN models)
|
15
|
+
|
16
|
+
Module functions:
|
17
|
+
-----------------
|
18
|
+
- save_model()
|
19
|
+
- load_model()
|
20
|
+
- predict_from_storage()
|
21
|
+
- predict_from_memory()
|
22
|
+
- reverse_predict_from_storage()
|
23
|
+
- reverse_predict_from_memory()
|
24
|
+
- get_weights()
|
25
|
+
- get_act()
|
26
|
+
- get_preds()
|
27
|
+
- get_preds_softmax()
|
28
|
+
- get_acc()
|
29
|
+
- get_scaler()
|
30
|
+
- get_model_type()
|
31
|
+
|
32
|
+
Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
|
33
|
+
|
34
|
+
PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
35
|
+
|
36
|
+
- Author: Hasan Can Beydili
|
37
|
+
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
38
|
+
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
39
|
+
- Instagram: https://www.instagram.com/canbeydilj
|
40
|
+
- Contact: tchasancan@gmail.com
|
41
|
+
"""
|
42
|
+
|
1
43
|
import cupy as cp
|
2
44
|
import numpy as np
|
3
45
|
from colorama import Fore, Style
|
@@ -15,7 +57,7 @@ def save_model(model_name,
|
|
15
57
|
scaler_params=None,
|
16
58
|
test_acc=None,
|
17
59
|
model_path='',
|
18
|
-
|
60
|
+
activations=['linear'],
|
19
61
|
weights_type='npy',
|
20
62
|
weights_format='raw',
|
21
63
|
show_architecture=None,
|
@@ -39,7 +81,7 @@ def save_model(model_name,
|
|
39
81
|
|
40
82
|
model_path: (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
|
41
83
|
|
42
|
-
|
84
|
+
activations: (list[str]): For deeper PLAN networks, activation function parameters. Or activation function parameters for MLP layers. For more information please run this code: plan.activations_list() default: ['linear']
|
43
85
|
|
44
86
|
weights_type: (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
|
45
87
|
|
@@ -65,12 +107,12 @@ def save_model(model_name,
|
|
65
107
|
if isinstance(W, np.ndarray):
|
66
108
|
W = np.copy(W)
|
67
109
|
|
68
|
-
if isinstance(
|
69
|
-
|
110
|
+
if isinstance(activations, str):
|
111
|
+
activations = [activations]
|
70
112
|
else:
|
71
|
-
|
113
|
+
activations = [item if isinstance(item, list) else [item] for item in activations]
|
72
114
|
|
73
|
-
activation =
|
115
|
+
activation = activations.copy()
|
74
116
|
|
75
117
|
if test_acc is not None:
|
76
118
|
test_acc= float(test_acc)
|
@@ -161,7 +203,7 @@ def save_model(model_name,
|
|
161
203
|
'WEIGHTS FORMAT': weights_format,
|
162
204
|
'MODEL PATH': model_path,
|
163
205
|
'STANDARD SCALER': scaler_params,
|
164
|
-
'ACTIVATION
|
206
|
+
'ACTIVATION FUNCTIONS': activation
|
165
207
|
}
|
166
208
|
|
167
209
|
df = pd.DataFrame(data)
|
@@ -248,13 +290,17 @@ def load_model(model_name,
|
|
248
290
|
|
249
291
|
except:
|
250
292
|
|
251
|
-
print(Fore.RED + "ERROR: Model Path error. acceptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" + Style.RESET_ALL)
|
293
|
+
print(Fore.RED + "ERROR: Model Path or Model Name error. acceptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" + Style.RESET_ALL)
|
252
294
|
|
253
295
|
sys.exit()
|
254
296
|
|
255
|
-
|
256
|
-
|
257
|
-
|
297
|
+
try:
|
298
|
+
activations = list(df['ACTIVATION FUNCTIONS']) # for PyerualJetwork >=5 Versions.
|
299
|
+
except KeyError:
|
300
|
+
activations = list(df['ACTIVATION POTENTIATION']) # for PyerualJetwork <5 Versions.
|
301
|
+
|
302
|
+
activations = [x for x in activations if not (isinstance(x, float) and cp.isnan(x))]
|
303
|
+
activations = [item for item in activations if item != '']
|
258
304
|
|
259
305
|
scaler_params_cpu = df['STANDARD SCALER'].tolist()
|
260
306
|
|
@@ -297,14 +343,14 @@ def load_model(model_name,
|
|
297
343
|
W = W.tolist()
|
298
344
|
W = [cp.array(item) for item in W]
|
299
345
|
|
300
|
-
return W, None, None,
|
346
|
+
return W, None, None, activations, scaler_params, None, model_type
|
301
347
|
|
302
348
|
|
303
349
|
|
304
|
-
def
|
350
|
+
def predict_from_storage(Input, model_name, model_path='', dtype=cp.float32):
|
305
351
|
|
306
352
|
"""
|
307
|
-
Function to make a prediction
|
353
|
+
Function to make a prediction
|
308
354
|
from storage
|
309
355
|
|
310
356
|
Args:
|
@@ -328,15 +374,15 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
|
328
374
|
|
329
375
|
model = load_model(model_name, model_path)
|
330
376
|
|
331
|
-
|
377
|
+
activations = model[get_act()]
|
332
378
|
scaler_params = model[get_scaler()]
|
333
379
|
W = model[get_weights()]
|
334
380
|
model_type = model[get_model_type()]
|
335
381
|
|
336
|
-
if isinstance(
|
337
|
-
|
338
|
-
elif isinstance(
|
339
|
-
|
382
|
+
if isinstance(activations, str):
|
383
|
+
activations = [activations]
|
384
|
+
elif isinstance(activations, list):
|
385
|
+
activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
|
340
386
|
|
341
387
|
Input = standard_scaler(None, Input, scaler_params)
|
342
388
|
|
@@ -344,14 +390,14 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
|
344
390
|
|
345
391
|
layer = Input
|
346
392
|
for i in range(len(W)):
|
347
|
-
if i != len(W) - 1 and i != 0: layer = apply_activation(layer,
|
393
|
+
if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
|
348
394
|
layer = layer @ W[i].T
|
349
395
|
|
350
396
|
return layer
|
351
397
|
|
352
398
|
else:
|
353
399
|
|
354
|
-
Input = apply_activation(Input,
|
400
|
+
Input = apply_activation(Input, activations)
|
355
401
|
result = Input @ W.T
|
356
402
|
|
357
403
|
return result
|
@@ -362,7 +408,7 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
|
362
408
|
|
363
409
|
|
364
410
|
|
365
|
-
def
|
411
|
+
def reverse_predict_from_storage(output, model_name, model_path='', dtype=cp.float32):
|
366
412
|
|
367
413
|
"""
|
368
414
|
reverse prediction function from storage
|
@@ -394,10 +440,10 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=cp.float3
|
|
394
440
|
sys.exit()
|
395
441
|
|
396
442
|
|
397
|
-
def
|
443
|
+
def predict_from_memory(Input, W, scaler_params=None, activations=['linear'], is_mlp=False):
|
398
444
|
|
399
445
|
"""
|
400
|
-
Function to make a prediction
|
446
|
+
Function to make a prediction
|
401
447
|
from memory.
|
402
448
|
|
403
449
|
Args:
|
@@ -407,7 +453,7 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
407
453
|
|
408
454
|
scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
|
409
455
|
|
410
|
-
|
456
|
+
activations (list[str]): activation list for deep PLAN or activation list for MLP layers. Default: ['linear']
|
411
457
|
|
412
458
|
is_mlp (bool, optional): Predict from PLAN model or MLP model ? Default: False (PLAN)
|
413
459
|
Returns:
|
@@ -419,10 +465,10 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
419
465
|
|
420
466
|
try:
|
421
467
|
|
422
|
-
if isinstance(
|
423
|
-
|
424
|
-
elif isinstance(
|
425
|
-
|
468
|
+
if isinstance(activations, str):
|
469
|
+
activations = [activations]
|
470
|
+
elif isinstance(activations, list):
|
471
|
+
activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
|
426
472
|
|
427
473
|
Input = standard_scaler(None, Input, scaler_params)
|
428
474
|
|
@@ -430,14 +476,14 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
430
476
|
|
431
477
|
layer = Input
|
432
478
|
for i in range(len(W)):
|
433
|
-
if i != len(W) - 1 and i != 0: layer = apply_activation(layer,
|
479
|
+
if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
|
434
480
|
layer = layer @ W[i].T
|
435
481
|
|
436
482
|
return layer
|
437
483
|
|
438
484
|
else:
|
439
485
|
|
440
|
-
Input = apply_activation(Input,
|
486
|
+
Input = apply_activation(Input, activations)
|
441
487
|
result = Input @ W.T
|
442
488
|
|
443
489
|
return result
|
@@ -447,7 +493,7 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
447
493
|
sys.exit()
|
448
494
|
|
449
495
|
|
450
|
-
def
|
496
|
+
def reverse_predict_from_memory(output, W, dtype=cp.float32):
|
451
497
|
|
452
498
|
"""
|
453
499
|
reverse prediction function from memory
|
@@ -490,7 +536,7 @@ def get_acc():
|
|
490
536
|
return 2
|
491
537
|
|
492
538
|
|
493
|
-
def
|
539
|
+
def get_act():
|
494
540
|
|
495
541
|
return 3
|
496
542
|
|