pyerualjetwork 5.37__py3-none-any.whl → 5.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,692 @@
1
+ """
2
+
3
+
4
+ Model Operations
5
+ =======================
6
+ This module hosts functions for handling all operational processes related to models, including:
7
+
8
+ - Saving and loading models
9
+ - Making predictions from memory
10
+ - Making predictions from storage
11
+ - Retrieving model weights
12
+ - Retrieving model activation functions
13
+ - Retrieving model accuracy
14
+ - Running the model in reverse (applicable to PLAN models)
15
+
16
+ Module functions:
17
+ -----------------
18
+ - get_model_template()
19
+ - save_model()
20
+ - load_model()
21
+ - predict_from_storage()
22
+ - predict_from_memory()
23
+ - reverse_predict_from_storage()
24
+ - reverse_predict_from_memory()
25
+
26
+ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
27
+
28
+ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
29
+
30
+ - Creator: Hasan Can Beydili
31
+ - YouTube: https://www.youtube.com/@HasanCanBeydili
32
+ - Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
33
+ - Instagram: https://www.instagram.com/canbeydilj
34
+ - Contact: tchasancan@gmail.com
35
+ """
36
+
37
+ import numpy as np
38
+ import cupy as cp
39
+ from colorama import Fore, Style
40
+ from datetime import datetime
41
+ import pickle
42
+ from scipy import io
43
+ import scipy.io as sio
44
+ import pandas as pd
45
+ from collections import namedtuple
46
+
47
+ def get_model_template():
48
+
49
+ Model = namedtuple("Model", [
50
+ "weights",
51
+ "predictions",
52
+ "accuracy",
53
+ "activations",
54
+ "scaler_params",
55
+ "softmax_predictions",
56
+ "model_type",
57
+ "weights_type",
58
+ "weights_format",
59
+ "model_version",
60
+ "model_df",
61
+ "activation_potentiation"
62
+ ])
63
+
64
+ return Model
65
+
66
+
67
+ def save_model(model,
68
+ model_name,
69
+ model_path='',
70
+ weights_type='npy',
71
+ weights_format='raw',
72
+ show_architecture=False,
73
+ show_info=True
74
+ ):
75
+
76
+ """
77
+ Function to save a potentiation learning artificial neural network model.
78
+ Args:
79
+
80
+ model (tuple): Trained model.
81
+
82
+ model_name: (str): Name of the model.
83
+
84
+ model_path: (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
85
+
86
+ weights_type: (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
87
+
88
+ weights_format: (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
89
+
90
+ show_architecture: (bool): It draws model architecture. True or False. Default: False. NOTE! draw architecture only works for PLAN models. Not MLP models for now, but it will be.
91
+
92
+ show_info: (bool): Prints model details into console. default: True
93
+
94
+ Returns:
95
+ No return.
96
+ """
97
+
98
+ from .cpu.visualizations import draw_model_architecture
99
+ from . import __version__
100
+
101
+ model_type = model.model_type
102
+ activations = model.activations
103
+ activation_potentiation = model.activation_potentiation
104
+ scaler_params = model.scaler_params
105
+ W = model.weights
106
+ acc = model.accuracy
107
+
108
+ if model_type != 'PLAN' and model_type != 'MLP' and model_type != 'PTNN':
109
+ raise ValueError("model_type parameter must be 'PLAN', 'MLP' or 'PTNN'.")
110
+
111
+ if model_type == 'PTNN' and activation_potentiation == []:
112
+ raise ValueError('PTNN models need extra activation_potentiation parameter.')
113
+
114
+ if isinstance(activations, str):
115
+ activations = [activations]
116
+ else:
117
+ activations = [item if isinstance(item, list) else [item] for item in activations]
118
+
119
+ activations = activations.copy()
120
+
121
+ if model_type == 'PTNN':
122
+ if isinstance(activation_potentiation, str):
123
+ activation_potentiation = [activation_potentiation]
124
+ else:
125
+ activation_potentiation = [item if isinstance(item, list) else [item] for item in activation_potentiation]
126
+
127
+ activation_potentiation = activation_potentiation.copy()
128
+
129
+ if acc != None:
130
+ acc= float(acc)
131
+
132
+ if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat' and weights_type != 'pkl':
133
+ raise ValueError(Fore.RED + "ERROR: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' or 'pkl' from: save_model" + Style.RESET_ALL)
134
+
135
+ if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
136
+ raise ValueError(Fore.RED + "ERROR: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" + Style.RESET_ALL)
137
+
138
+ NeuronCount = []
139
+ SynapseCount = []
140
+
141
+ if model_type == 'PLAN':
142
+ class_count = W.shape[0]
143
+
144
+ try:
145
+ NeuronCount.append(np.shape(W)[1])
146
+ NeuronCount.append(np.shape(W)[0])
147
+ SynapseCount.append(np.shape(W)[0] * np.shape(W)[1])
148
+ except AttributeError as e:
149
+ raise AttributeError(Fore.RED + "ERROR: W does not have a shape attribute. Check if W is a valid matrix." + Style.RESET_ALL) from e
150
+ except IndexError as e:
151
+ raise IndexError(Fore.RED + "ERROR: W has an unexpected shape format. Ensure it has two dimensions." + Style.RESET_ALL) from e
152
+ except (TypeError, ValueError) as e:
153
+ raise TypeError(Fore.RED + "ERROR: W is not a valid numeric matrix." + Style.RESET_ALL) from e
154
+ except Exception as e:
155
+ raise RuntimeError(Fore.RED + f"ERROR: An unexpected error occurred in save_model: {e}" + Style.RESET_ALL) from e
156
+
157
+ elif model_type == 'MLP' or model_type == 'PTNN':
158
+
159
+ class_count = W[-1].shape[0]
160
+
161
+ NeuronCount.append(np.shape(W[0])[1])
162
+
163
+ for i in range(len(W)):
164
+ try:
165
+ NeuronCount.append(np.shape(W[i])[0])
166
+ SynapseCount.append(np.shape(W[i])[0] * np.shape(W[i])[1])
167
+ except AttributeError as e:
168
+ raise AttributeError(Fore.RED + "ERROR: W does not have a shape attribute. Check if W is a valid matrix." + Style.RESET_ALL) from e
169
+ except IndexError as e:
170
+ raise IndexError(Fore.RED + "ERROR: W has an unexpected shape format. Ensure it has two dimensions." + Style.RESET_ALL) from e
171
+ except (TypeError, ValueError) as e:
172
+ raise TypeError(Fore.RED + "ERROR: W is not a valid numeric matrix." + Style.RESET_ALL) from e
173
+ except Exception as e:
174
+ raise RuntimeError(Fore.RED + f"ERROR: An unexpected error occurred in save_model: {e}" + Style.RESET_ALL) from e
175
+
176
+
177
+ SynapseCount.append(' ')
178
+
179
+ activations.append('')
180
+ activations.insert(0, '')
181
+
182
+ if len(activations) == 1 and model_type == 'PLAN':
183
+ activations = [activations]
184
+ activations.append('')
185
+
186
+ if model_type == 'PTNN':
187
+ if len(activations) > len(activation_potentiation):
188
+ for i in range(len(activations) - len(activation_potentiation)):
189
+ activation_potentiation.append('')
190
+
191
+ if len(activation_potentiation) > len(activations):
192
+ for i in range(len(activation_potentiation) - len(activations)):
193
+ activations.append('')
194
+
195
+ if len(activations) > len(NeuronCount):
196
+ for i in range(len(activations) - len(NeuronCount)):
197
+ NeuronCount.append('')
198
+
199
+ if len(activations) > len(SynapseCount):
200
+ for i in range(len(activations) - len(SynapseCount)):
201
+ SynapseCount.append('')
202
+
203
+ if scaler_params != None:
204
+
205
+ if len(scaler_params) > len(activations):
206
+
207
+ activations += ['']
208
+
209
+ elif len(activations) > len(scaler_params):
210
+
211
+ for i in range(len(activations) - len(scaler_params)):
212
+
213
+ scaler_params.append(' ')
214
+
215
+ data = {'MODEL NAME': model_name,
216
+ 'MODEL TYPE': model_type,
217
+ 'CLASS COUNT': class_count,
218
+ 'NEURON COUNT': NeuronCount,
219
+ 'SYNAPSE COUNT': SynapseCount,
220
+ 'VERSION': __version__,
221
+ 'ACCURACY': acc,
222
+ 'SAVE DATE': datetime.now(),
223
+ 'WEIGHTS TYPE': weights_type,
224
+ 'WEIGHTS FORMAT': weights_format,
225
+ 'STANDARD SCALER': scaler_params,
226
+ 'ACTIVATION FUNCTIONS': activations,
227
+ 'ACTIVATION POTENTIATION': activation_potentiation
228
+ }
229
+
230
+ df = pd.DataFrame(data)
231
+ df.to_pickle(model_path + model_name + '.pkl')
232
+
233
+ try:
234
+
235
+ if weights_type == 'txt' and weights_format == 'f':
236
+
237
+ np.savetxt(model_path + model_name + f'_weights.txt', W, fmt='%f')
238
+
239
+ if weights_type == 'txt' and weights_format == 'raw':
240
+
241
+ np.savetxt(model_path + model_name + f'_weights.txt', W)
242
+
243
+ ###
244
+
245
+
246
+ if weights_type == 'pkl' and weights_format == 'f':
247
+
248
+ with open(model_path + model_name + f'_weights.pkl', 'wb') as f:
249
+ pickle.dump(W.astype(float), f)
250
+
251
+ if weights_type == 'pkl' and weights_format =='raw':
252
+
253
+ with open(model_path + model_name + f'_weights.pkl', 'wb') as f:
254
+ pickle.dump(W, f)
255
+
256
+ ###
257
+
258
+ if weights_type == 'npy' and weights_format == 'f':
259
+
260
+ np.save(model_path + model_name + f'_weights.npy', W, W.astype(float))
261
+
262
+ if weights_type == 'npy' and weights_format == 'raw':
263
+
264
+ np.save(model_path + model_name + f'_weights.npy', W)
265
+
266
+ ###
267
+
268
+ if weights_type == 'mat' and weights_format == 'f':
269
+
270
+ w = {'w': W.astype(float)}
271
+ io.savemat(model_path + model_name + f'_weights.mat', w)
272
+
273
+ if weights_type == 'mat' and weights_format == 'raw':
274
+
275
+ w = {'w': W}
276
+ io.savemat(model_path + model_name + f'_weights.mat', w)
277
+
278
+
279
+ except OSError as e:
280
+ raise OSError(Fore.RED + f"ERROR: An OSError error occurred in save_model at saving weights. Maybe model name or path or administration issue: {e}" + Style.RESET_ALL) from e
281
+
282
+ if show_info:
283
+ print(df)
284
+
285
+ message = (
286
+ Fore.GREEN + "Model Saved Successfully\n" +
287
+ Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
288
+ Style.RESET_ALL
289
+ )
290
+
291
+ print(message)
292
+
293
+ if show_architecture:
294
+ draw_model_architecture(model_name=model_name, model_path=model_path)
295
+
296
+
297
+
298
+ def load_model(model_name,
299
+ model_path,
300
+ ):
301
+ """
302
+ Function to load a potentiation learning model.
303
+
304
+ Args:
305
+ model_name (str): Name of the model.
306
+
307
+ model_path (str): Path where the model is saved.
308
+
309
+ Returns:
310
+ tuple(model): Weights, None, accuracy, activations, scaler_params, None, model_type, weight_type, weight_format, device_version, (list[df_elements])=Pandas DataFrame of the model, activation_potentiation
311
+ """
312
+
313
+ from . import __version__
314
+
315
+ try:
316
+
317
+ df = pd.read_pickle(model_path + model_name + '.pkl')
318
+
319
+ except OSError as e:
320
+ raise OSError(Fore.RED + f"ERROR: An OSError error occurred in load_model at loading model params. Maybe model name or path or administration issue: {e}" + Style.RESET_ALL) from e
321
+
322
+
323
+
324
+ scaler_params = df['STANDARD SCALER'].tolist()
325
+
326
+ try:
327
+ if scaler_params[0] == None:
328
+ scaler_params = scaler_params[0]
329
+
330
+ except:
331
+ scaler_params = [item for item in scaler_params if isinstance(item, np.ndarray)]
332
+
333
+
334
+ model_name = str(df['MODEL NAME'].iloc[0])
335
+ model_type = str(df['MODEL TYPE'].iloc[0])
336
+ WeightType = str(df['WEIGHTS TYPE'].iloc[0])
337
+ WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
338
+ acc = str(df['ACCURACY'].iloc[0])
339
+
340
+ activations = list(df['ACTIVATION FUNCTIONS'])
341
+ activations = [x for x in activations if not (isinstance(x, float) and np.isnan(x))]
342
+ activations = [item for item in activations if item != '']
343
+
344
+ activation_potentiation = list(df['ACTIVATION POTENTIATION'])
345
+ activation_potentiation = [x for x in activation_potentiation if not (isinstance(x, float) and np.isnan(x))]
346
+ activation_potentiation = [item for item in activation_potentiation if item != '']
347
+
348
+ device_version = __version__
349
+
350
+ try:
351
+ model_version = str(df['VERSION'].iloc[0])
352
+ if model_version != device_version:
353
+ message = (
354
+ Fore.MAGENTA + f"WARNING: Your PyerualJetwork version({device_version}) is different from this model's version({model_version}).\nIf you have a performance issue, please install this model version. Use this: pip install pyerualjetwork=={model_version} or look issue_solver module." +
355
+ Style.RESET_ALL
356
+ )
357
+ print(message)
358
+
359
+ except:
360
+ pass # Version check only in >= 5.0.2
361
+
362
+ if model_type == 'MLP' or model_type == 'PTNN': allow_pickle = True
363
+ else: allow_pickle = False
364
+
365
+ try:
366
+ if WeightType == 'txt':
367
+ W = (np.loadtxt(model_path + model_name + f'_weights.txt'))
368
+ elif WeightType == 'npy':
369
+ W = (np.load(model_path + model_name + f'_weights.npy', allow_pickle=allow_pickle))
370
+ elif WeightType == 'mat':
371
+ W = (sio.loadmat(model_path + model_name + f'_weights.mat'))
372
+ elif WeightType == 'pkl':
373
+ with open(model_path + model_name + f'_weights.pkl', 'rb') as f:
374
+ W = pickle.load(f)
375
+ else:
376
+
377
+ raise ValueError(
378
+ Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy', 'pkl' or 'mat' from: load_model." + Style.RESET_ALL)
379
+
380
+ except OSError as e:
381
+ raise OSError(Fore.RED + f"ERROR: An OSError error occurred in load_model at loading weights. Maybe model name or path or administration issue: {e}" + Style.RESET_ALL) from e
382
+
383
+
384
+ if WeightType == 'mat':
385
+ W = W['w']
386
+
387
+ Model = get_model_template()
388
+
389
+ model = Model(
390
+ W,
391
+ None,
392
+ acc,
393
+ activations,
394
+ scaler_params,
395
+ None,
396
+ model_type,
397
+ WeightType,
398
+ WeightFormat,
399
+ device_version,
400
+ df,
401
+ activation_potentiation
402
+ )
403
+
404
+ return model
405
+
406
+
407
+
408
+ def predict_from_storage(Input, model_name, cuda=False, model_path=''):
409
+
410
+ """
411
+ Function to make a prediction
412
+ from storage
413
+
414
+ Args:
415
+ Input (list or ndarray): Input data for the model.
416
+
417
+ model_name (str): Name of the model.
418
+
419
+ cuda (bool, optional): CUDA GPU acceleration ? Default = False.
420
+
421
+ model_path (str): Path of the model. Default: ''
422
+
423
+ Returns:
424
+ ndarray: Output from the model.
425
+ """
426
+
427
+ if cuda:
428
+ if not isinstance(Input, cp.ndarray): Input = cp.array(Input)
429
+ from .cuda.activation_functions import apply_activation
430
+ from .cuda.data_ops import standard_scaler
431
+
432
+ else:
433
+ from .cpu.activation_functions import apply_activation
434
+ from .cpu.data_ops import standard_scaler
435
+
436
+ try:
437
+
438
+ model = load_model(model_name, model_path)
439
+
440
+ model_type = model.model_type
441
+ activations = model.activations
442
+ activation_potentiation = model.activation_potentiation
443
+ scaler_params = model.scaler_params
444
+ W = model.weights
445
+
446
+ if cuda and scaler_params is not None:
447
+ if not isinstance(scaler_params[0], cp.ndarray): scaler_params[0] = cp.array(scaler_params[0])
448
+ if not isinstance(scaler_params[1], cp.ndarray): scaler_params[1] = cp.array(scaler_params[1])
449
+
450
+ Input = standard_scaler(None, Input, scaler_params)
451
+
452
+ if isinstance(activations, str):
453
+ activations = [activations]
454
+ elif isinstance(activations, list):
455
+ activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
456
+
457
+ if model_type == 'MLP':
458
+ layer = Input
459
+ for i in range(len(W)):
460
+ if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
461
+
462
+ layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
463
+
464
+ result = layer
465
+
466
+ if model_type == 'PLAN':
467
+
468
+ Input = apply_activation(Input, activations)
469
+ result = Input @ cp.array(W).T if cuda else Input @ W.T
470
+
471
+ if model_type == 'PTNN':
472
+
473
+ if isinstance(activation_potentiation, str):
474
+ activation_potentiation = [activation_potentiation]
475
+ elif isinstance(activation_potentiation, list):
476
+ activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
477
+
478
+ Input = apply_activation(Input, activation_potentiation)
479
+ layer = Input @ cp.array(W[0]).T if cuda else Input @ W[0].T
480
+
481
+ for i in range(1, len(W)):
482
+ if i != len(W) - 1: layer = apply_activation(layer, activations[i])
483
+
484
+ layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
485
+
486
+ result = layer
487
+
488
+ return result
489
+
490
+ except Exception as e:
491
+ raise RuntimeError(Fore.RED + f"ERROR: An error occurred in predict_from_storage {e}" + Style.RESET_ALL) from e
492
+
493
+
494
+ def reverse_predict_from_storage(output, model_name, cuda=False, model_path=''):
495
+
496
+ """
497
+ reverse prediction function from storage
498
+ Args:
499
+
500
+ output (list or ndarray): output layer for the model .
501
+
502
+ model_name (str): Name of the model.
503
+
504
+ cuda (bool, optional): CUDA GPU acceleration ? Default = False.
505
+
506
+ model_path (str): Path of the model. Default: ''
507
+
508
+ Returns:
509
+ ndarray: Input from the model.
510
+ """
511
+
512
+ if cuda:
513
+ if not isinstance(output, cp.ndarray): output = cp.array(output)
514
+
515
+ model = load_model(model_name, model_path)
516
+
517
+ W = model.weights if not cuda else cp.array(model.weights)
518
+
519
+ try:
520
+ Input = W.T @ output
521
+ return Input
522
+ except Exception as e:
523
+ raise RuntimeError(Fore.RED + f"ERROR: An error occurred {e}" + Style.RESET_ALL) from e
524
+
525
+
526
+
527
+ def predict_from_memory(Input, model, cuda=False):
528
+
529
+ """
530
+ Function to make a prediction.
531
+ from memory.
532
+
533
+ Args:
534
+ Input (list or ndarray): Input data for the model.
535
+
536
+ model (tuple): Trained model.
537
+
538
+ cuda (bool, optional): CUDA GPU acceleration ? Default = False.
539
+
540
+ Returns:
541
+ ndarray: Output from the model.
542
+ """
543
+
544
+ model_type = model.model_type
545
+ activations = model.activations
546
+ activation_potentiation = model.activation_potentiation
547
+ scaler_params = model.scaler_params
548
+ W = model.weights
549
+
550
+ if not cuda:
551
+ from .cpu.data_ops import standard_scaler
552
+ from .cpu.activation_functions import apply_activation
553
+ else:
554
+ if scaler_params is not None:
555
+ if not isinstance(scaler_params[0], cp.ndarray): scaler_params[0] = cp.array(scaler_params[0])
556
+ if not isinstance(scaler_params[1], cp.ndarray): scaler_params[1] = cp.array(scaler_params[1])
557
+
558
+ if not isinstance(Input, cp.ndarray): Input = cp.array(Input)
559
+ from .cuda.data_ops import standard_scaler
560
+ from .cuda.activation_functions import apply_activation
561
+
562
+ if model_type != 'PLAN' and model_type != 'MLP' and model_type != 'PTNN': raise ValueError("model_type parameter must be 'PLAN', 'MLP' or 'PTNN'.")
563
+
564
+ try:
565
+
566
+ Input = standard_scaler(None, Input, scaler_params)
567
+
568
+ if isinstance(activations, str):
569
+ activations = [activations]
570
+ elif isinstance(activations, list):
571
+ activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
572
+
573
+ if model_type == 'MLP':
574
+ layer = Input
575
+ for i in range(len(W)):
576
+ if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
577
+
578
+ layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
579
+
580
+ result = layer
581
+
582
+ if model_type == 'PLAN':
583
+
584
+ Input = apply_activation(Input, activations)
585
+ result = Input @ cp.array(W).T if cuda else Input @ W.T
586
+
587
+ if model_type == 'PTNN':
588
+
589
+ if isinstance(activation_potentiation, str):
590
+ activation_potentiation = [activation_potentiation]
591
+ elif isinstance(activation_potentiation, list):
592
+ activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
593
+
594
+ Input = apply_activation(Input, activation_potentiation)
595
+ layer = Input @ cp.array(W[0]).T if cuda else Input @ W[0].T
596
+
597
+ for i in range(1, len(W)):
598
+ if i != len(W) - 1: layer = apply_activation(layer, activations[i])
599
+
600
+ layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
601
+
602
+ result = layer
603
+
604
+ return result
605
+
606
+ except Exception as e:
607
+ raise RuntimeError(Fore.RED + f"ERROR: An error occurred in predict_from_memory {e}" + Style.RESET_ALL) from e
608
+
609
+ def reverse_predict_from_memory(output, W, cuda=False):
610
+
611
+ """
612
+ reverse prediction function from memory
613
+
614
+ Args:
615
+
616
+ output (list or ndarray): output layer for the model.
617
+
618
+ W (ndarray): Weights of the model.
619
+
620
+ cuda (bool, optional): CUDA GPU acceleration ? Default = False.
621
+
622
+ Returns:
623
+ ndarray: Input from the model.
624
+ """
625
+
626
+ try:
627
+ if cuda: W = cp.array(W)
628
+ Input = W.T @ output
629
+ return Input
630
+
631
+ except Exception as e:
632
+ raise RuntimeError(Fore.RED + f"ERROR: An error occurred {e}" + Style.RESET_ALL) from e
633
+
634
+
635
+ def get_weights():
636
+
637
+ return 0
638
+
639
+
640
+ def get_preds():
641
+
642
+ return 1
643
+
644
+
645
+ def get_acc():
646
+
647
+ return 2
648
+
649
+
650
+ def get_act():
651
+
652
+ return 3
653
+
654
+
655
+ def get_scaler():
656
+
657
+ return 4
658
+
659
+
660
+ def get_preds_softmax():
661
+
662
+ return 5
663
+
664
+
665
+ def get_model_type():
666
+
667
+ return 6
668
+
669
+
670
+ def get_weights_type():
671
+
672
+ return 7
673
+
674
+
675
+ def get_weights_format():
676
+
677
+ return 8
678
+
679
+
680
+ def get_model_version():
681
+
682
+ return 9
683
+
684
+
685
+ def get_model_df():
686
+
687
+ return 10
688
+
689
+
690
+ def get_act_pot():
691
+
692
+ return 11