pyerualjetwork 4.5.3__py3-none-any.whl → 4.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/data_operations.py +30 -1
- pyerualjetwork/data_operations_cuda.py +31 -1
- pyerualjetwork/model_operations.py +177 -101
- pyerualjetwork/model_operations_cuda.py +183 -105
- pyerualjetwork/planeat.py +167 -25
- pyerualjetwork/planeat_cuda.py +172 -30
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6.dist-info}/RECORD +11 -11
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6.dist-info}/top_level.txt +0 -0
@@ -11,8 +11,8 @@ import gc
|
|
11
11
|
|
12
12
|
def save_model(model_name,
|
13
13
|
W,
|
14
|
+
model_type,
|
14
15
|
scaler_params=None,
|
15
|
-
model_type='PLAN',
|
16
16
|
test_acc=None,
|
17
17
|
model_path='',
|
18
18
|
activation_potentiation=['linear'],
|
@@ -24,18 +24,29 @@ def save_model(model_name,
|
|
24
24
|
|
25
25
|
"""
|
26
26
|
|
27
|
-
Function to save a potentiation learning artificial neural network model.
|
27
|
+
Function to save a potentiation learning artificial neural network model or trained non-bias MLP model.
|
28
28
|
Args:
|
29
29
|
model_name: (str): Name of the model.
|
30
30
|
W: Weights of the model.
|
31
|
+
|
32
|
+
model_type: (str): Type of the model. Options: 'PLAN', 'MLP'.
|
33
|
+
|
31
34
|
scaler_params: (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
35
|
+
|
32
36
|
model_type: (str): Type of the model. default: 'PLAN'
|
37
|
+
|
33
38
|
test_acc: (float): Test accuracy of the model. default: None
|
39
|
+
|
34
40
|
model_path: (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
|
35
|
-
|
41
|
+
|
42
|
+
activation_potentiation: (list[str]): For deeper PLAN networks, activation function parameters. Or activation function parameters for MLP layers. For more information please run this code: plan.activations_list() default: ['linear']
|
43
|
+
|
36
44
|
weights_type: (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
|
45
|
+
|
37
46
|
weights_format: (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
|
38
|
-
|
47
|
+
|
48
|
+
show_architecture: (bool): It draws model architecture. True or False. Default: False. NOTE! draw architecture only works for PLAN models. Not MLP models for now, but it will be.
|
49
|
+
|
39
50
|
show_info: (bool): Prints model details into console. default: True
|
40
51
|
|
41
52
|
|
@@ -45,7 +56,8 @@ def save_model(model_name,
|
|
45
56
|
|
46
57
|
from .visualizations_cuda import draw_model_architecture
|
47
58
|
|
48
|
-
|
59
|
+
if model_type != 'PLAN' and model_type != 'MLP':
|
60
|
+
raise ValueError("model_type parameter must be 'PLAN' or 'MLP'.")
|
49
61
|
|
50
62
|
if test_acc is not None:
|
51
63
|
test_acc= float(test_acc)
|
@@ -58,17 +70,45 @@ def save_model(model_name,
|
|
58
70
|
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" + Style.RESET_ALL)
|
59
71
|
sys.exit()
|
60
72
|
|
61
|
-
|
62
|
-
|
73
|
+
if model_type == 'PLAN':
|
74
|
+
class_count = W.shape[0]
|
63
75
|
|
76
|
+
NeuronCount = 0
|
77
|
+
SynapseCount = 0
|
64
78
|
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
79
|
+
try:
|
80
|
+
NeuronCount += cp.shape(W)[0] + cp.shape(W)[1]
|
81
|
+
SynapseCount += cp.shape(W)[0] * cp.shape(W)[1]
|
82
|
+
except:
|
69
83
|
|
70
|
-
|
71
|
-
|
84
|
+
print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + Style.RESET_ALL)
|
85
|
+
sys.exit()
|
86
|
+
|
87
|
+
elif model_type == 'MLP':
|
88
|
+
class_count = W[-1].shape[0]
|
89
|
+
|
90
|
+
NeuronCount = []
|
91
|
+
SynapseCount = []
|
92
|
+
|
93
|
+
NeuronCount.append(cp.shape(W[0])[1])
|
94
|
+
|
95
|
+
for i in range(len(W)):
|
96
|
+
try:
|
97
|
+
NeuronCount.append(cp.shape(W[i])[0])
|
98
|
+
SynapseCount.append(cp.shape(W[i])[0] * cp.shape(W[i])[1])
|
99
|
+
except:
|
100
|
+
|
101
|
+
print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + Style.RESET_ALL)
|
102
|
+
sys.exit()
|
103
|
+
|
104
|
+
SynapseCount.append(' ')
|
105
|
+
|
106
|
+
activation_potentiation.append('')
|
107
|
+
activation_potentiation.insert(0, '')
|
108
|
+
|
109
|
+
if isinstance(activation_potentiation, str):
|
110
|
+
activation_potentiation = [activation_potentiation]
|
111
|
+
activation_potentiation.append('')
|
72
112
|
|
73
113
|
if scaler_params != None:
|
74
114
|
|
@@ -102,56 +142,63 @@ def save_model(model_name,
|
|
102
142
|
df = pd.DataFrame(data)
|
103
143
|
df.to_pickle(model_path + model_name + '.pkl')
|
104
144
|
|
145
|
+
if isinstance(W, list): max_file = len(W)
|
146
|
+
else: max_file = 1
|
105
147
|
|
106
|
-
|
148
|
+
for i in range(max_file):
|
107
149
|
|
108
|
-
if
|
150
|
+
if max_file > 1: weight = W[i]
|
151
|
+
else: weight = W
|
109
152
|
|
110
|
-
|
153
|
+
try:
|
111
154
|
|
112
|
-
|
155
|
+
if weights_type == 'txt' and weights_format == 'f':
|
113
156
|
|
114
|
-
|
157
|
+
cp.savetxt(model_path + model_name + f'weights{i}.txt', weight, fmt='%f')
|
115
158
|
|
116
|
-
|
159
|
+
if weights_type == 'txt' and weights_format == 'raw':
|
117
160
|
|
118
|
-
|
119
|
-
if weights_type == 'pkl' and weights_format == 'f':
|
161
|
+
cp.savetxt(model_path + model_name + f'weights{i}.txt', weight)
|
120
162
|
|
121
|
-
|
122
|
-
pickle.dump(W.astype(float), f)
|
163
|
+
###
|
123
164
|
|
124
|
-
|
125
|
-
|
126
|
-
with open(model_path + model_name + '_weights.pkl', 'wb') as f:
|
127
|
-
pickle.dump(W, f)
|
165
|
+
|
166
|
+
if weights_type == 'pkl' and weights_format == 'f':
|
128
167
|
|
129
|
-
|
168
|
+
with open(model_path + model_name + f'weights{i}.pkl', 'wb') as f:
|
169
|
+
pickle.dump(weight.astype(float), f)
|
130
170
|
|
131
|
-
|
171
|
+
if weights_type == 'pkl' and weights_format =='raw':
|
172
|
+
|
173
|
+
with open(model_path + model_name + f'weights{i}.pkl', 'wb') as f:
|
174
|
+
pickle.dump(weight, f)
|
132
175
|
|
133
|
-
|
176
|
+
###
|
134
177
|
|
135
|
-
|
178
|
+
if weights_type == 'npy' and weights_format == 'f':
|
136
179
|
|
137
|
-
|
180
|
+
cp.save(model_path + model_name + f'weights{i}.npy', weight, weight.astype(float))
|
138
181
|
|
139
|
-
|
182
|
+
if weights_type == 'npy' and weights_format == 'raw':
|
140
183
|
|
141
|
-
|
184
|
+
cp.save(model_path + model_name + f'weights{i}.npy', weight)
|
142
185
|
|
143
|
-
|
144
|
-
io.savemat(model_path + model_name + '_weights.mat', w)
|
186
|
+
###
|
145
187
|
|
146
|
-
|
147
|
-
|
148
|
-
w = {'w': W}
|
149
|
-
io.savemat(model_path + model_name + '_weights.mat', w)
|
188
|
+
if weights_type == 'mat' and weights_format == 'f':
|
150
189
|
|
151
|
-
|
190
|
+
w = {'w': weight.astype(float)}
|
191
|
+
io.savemat(model_path + model_name + f'weights{i}.mat', w)
|
152
192
|
|
153
|
-
|
154
|
-
|
193
|
+
if weights_type == 'mat' and weights_format == 'raw':
|
194
|
+
|
195
|
+
w = {'w': weight}
|
196
|
+
io.savemat(model_path + model_name + f'weights{i}.mat', w)
|
197
|
+
|
198
|
+
except:
|
199
|
+
|
200
|
+
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + Style.RESET_ALL)
|
201
|
+
sys.exit()
|
155
202
|
|
156
203
|
if show_info:
|
157
204
|
print(df)
|
@@ -168,7 +215,6 @@ def save_model(model_name,
|
|
168
215
|
draw_model_architecture(model_name=model_name, model_path=model_path)
|
169
216
|
|
170
217
|
|
171
|
-
|
172
218
|
def load_model(model_name,
|
173
219
|
model_path,
|
174
220
|
):
|
@@ -176,14 +222,14 @@ def load_model(model_name,
|
|
176
222
|
Function to load a potentiation learning model.
|
177
223
|
|
178
224
|
Args:
|
179
|
-
|
180
225
|
model_name (str): Name of the model.
|
181
|
-
|
226
|
+
|
182
227
|
model_path (str): Path where the model is saved.
|
183
228
|
|
184
229
|
Returns:
|
185
|
-
lists:
|
230
|
+
lists: (list[df_elements]), DataFrame of the model
|
186
231
|
"""
|
232
|
+
|
187
233
|
try:
|
188
234
|
|
189
235
|
df = pd.read_pickle(model_path + model_name + '.pkl')
|
@@ -196,7 +242,7 @@ def load_model(model_name,
|
|
196
242
|
|
197
243
|
activation_potentiation = list(df['ACTIVATION POTENTIATION'])
|
198
244
|
activation_potentiation = [x for x in activation_potentiation if not (isinstance(x, float) and cp.isnan(x))]
|
199
|
-
activation_potentiation = [item for item in activation_potentiation if item != '']
|
245
|
+
activation_potentiation = [item for item in activation_potentiation if item != '']
|
200
246
|
|
201
247
|
scaler_params_cpu = df['STANDARD SCALER'].tolist()
|
202
248
|
|
@@ -212,26 +258,38 @@ def load_model(model_name,
|
|
212
258
|
gc.collect()
|
213
259
|
|
214
260
|
model_name = str(df['MODEL NAME'].iloc[0])
|
261
|
+
model_type = str(df['MODEL TYPE'].iloc[0])
|
215
262
|
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
263
|
+
layers = list(df['NEURON COUNT'])
|
216
264
|
|
217
|
-
if
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
265
|
+
if model_type == 'MLP': max_file = len(layers)-1
|
266
|
+
else: max_file = 1
|
267
|
+
|
268
|
+
W = []
|
269
|
+
|
270
|
+
for i in range(max_file):
|
271
|
+
|
272
|
+
if WeightType == 'txt':
|
273
|
+
W.append(cp.loadtxt(model_path + model_name + f'weights{i}.txt'))
|
274
|
+
elif WeightType == 'npy':
|
275
|
+
W.append(cp.load(model_path + model_name + f'weights{i}.npy'))
|
276
|
+
elif WeightType == 'mat':
|
277
|
+
W.append(sio.loadmat(model_path + model_name + f'weights{i}.mat'))
|
278
|
+
elif WeightType == 'pkl':
|
279
|
+
with open(model_path + model_name + f'weights{i}.pkl', 'rb') as f:
|
280
|
+
W = pickle.load(f)
|
281
|
+
else:
|
282
|
+
|
283
|
+
raise ValueError(
|
284
|
+
Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy', 'pkl' or 'mat' from: load_model." + Style.RESET_ALL)
|
285
|
+
|
286
|
+
if WeightType == 'mat':
|
287
|
+
W[-1] = W[-1]['w']
|
288
|
+
|
289
|
+
if model_type == 'PLAN': W = cp.array(W[0], dtype=W[0].dtype)
|
290
|
+
|
291
|
+
return W, None, None, activation_potentiation, scaler_params, None, model_type
|
233
292
|
|
234
|
-
return W, None, None, activation_potentiation, scaler_params
|
235
293
|
|
236
294
|
|
237
295
|
def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
@@ -241,17 +299,15 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
|
241
299
|
from storage
|
242
300
|
|
243
301
|
Args:
|
244
|
-
|
245
|
-
|
246
|
-
|
302
|
+
Input (list or ndarray or cupyarray): Input data for the model (single vector or single matrix).
|
303
|
+
|
247
304
|
model_name (str): Name of the model.
|
248
|
-
|
305
|
+
|
249
306
|
model_path (str): Path of the model. Default: ''
|
250
|
-
|
251
|
-
dtype (cupy.dtype): Data type for the arrays.
|
252
|
-
|
307
|
+
|
308
|
+
dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64, cp.float16. (optional)
|
253
309
|
Returns:
|
254
|
-
|
310
|
+
cupyarray: Output from the model.
|
255
311
|
"""
|
256
312
|
|
257
313
|
Input = cp.array(Input, dtype=dtype, copy=False)
|
@@ -259,25 +315,37 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
|
259
315
|
from .activation_functions_cuda import apply_activation
|
260
316
|
from .data_operations_cuda import standard_scaler
|
261
317
|
|
262
|
-
|
263
|
-
|
264
|
-
activation_potentiation = model[get_act_pot()]
|
265
|
-
scaler_params = model[get_scaler()]
|
266
|
-
W = model[get_weights()]
|
318
|
+
try:
|
267
319
|
|
268
|
-
|
320
|
+
model = load_model(model_name, model_path)
|
321
|
+
|
322
|
+
activation_potentiation = model[get_act_pot()]
|
323
|
+
scaler_params = model[get_scaler()]
|
324
|
+
W = model[get_weights()]
|
325
|
+
model_type = model[get_model_type()]
|
269
326
|
|
270
|
-
|
271
|
-
Input = Input.ravel()
|
327
|
+
Input = standard_scaler(None, Input, scaler_params)
|
272
328
|
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
329
|
+
if model_type == 'MLP':
|
330
|
+
|
331
|
+
layer = Input
|
332
|
+
for i in range(len(W)):
|
333
|
+
if i != len(W) - 1: layer = apply_activation(layer, activation_potentiation[i])
|
334
|
+
layer = layer @ W[i].T
|
335
|
+
|
336
|
+
return layer
|
337
|
+
|
338
|
+
else:
|
339
|
+
|
340
|
+
Input = apply_activation(Input, activation_potentiation)
|
341
|
+
result = Input @ W.T
|
342
|
+
|
343
|
+
return result
|
277
344
|
|
278
345
|
except:
|
279
346
|
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: predict_model_ssd." + Style.RESET_ALL)
|
280
|
-
sys.exit()
|
347
|
+
sys.exit(),
|
348
|
+
|
281
349
|
|
282
350
|
|
283
351
|
def reverse_predict_model_ssd(output, model_name, model_path='', dtype=cp.float32):
|
@@ -312,47 +380,54 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=cp.float3
|
|
312
380
|
sys.exit()
|
313
381
|
|
314
382
|
|
315
|
-
def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['linear'],
|
383
|
+
def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['linear'], is_mlp=False):
|
316
384
|
|
317
385
|
"""
|
318
386
|
Function to make a prediction using a potentiation learning artificial neural network (PLAN).
|
319
387
|
from memory.
|
320
388
|
|
321
389
|
Args:
|
322
|
-
|
323
390
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
324
|
-
|
391
|
+
|
325
392
|
W (list of ndarrays): Weights of the model.
|
326
|
-
|
393
|
+
|
327
394
|
scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
|
328
395
|
|
329
|
-
activation_potentiation (list):
|
330
|
-
|
331
|
-
|
332
|
-
|
396
|
+
activation_potentiation (list[str]): activation list for deep PLAN or activation list for MLP layers. Default: ['linear']
|
397
|
+
|
398
|
+
is_mlp (bool, optional): Predict from PLAN model or MLP model ? Default: False (PLAN)
|
333
399
|
Returns:
|
334
|
-
|
400
|
+
cupyarray: Output from the model.
|
335
401
|
"""
|
336
402
|
|
337
403
|
from .data_operations_cuda import standard_scaler
|
338
404
|
from .activation_functions_cuda import apply_activation
|
339
405
|
|
340
|
-
Input = standard_scaler(None, Input, scaler_params)
|
341
|
-
|
342
|
-
Input = cp.array(Input, dtype=dtype, copy=False)
|
343
|
-
Input = Input.ravel()
|
344
|
-
|
345
406
|
try:
|
407
|
+
|
408
|
+
Input = standard_scaler(None, Input, scaler_params)
|
346
409
|
|
347
|
-
|
348
|
-
|
410
|
+
if is_mlp:
|
411
|
+
|
412
|
+
layer = Input
|
413
|
+
for i in range(len(W)):
|
414
|
+
if i != len(W) - 1: layer = apply_activation(layer, activation_potentiation[i])
|
415
|
+
layer = layer @ W[i].T
|
416
|
+
|
417
|
+
return layer
|
349
418
|
|
350
|
-
|
351
|
-
|
419
|
+
else:
|
420
|
+
|
421
|
+
Input = apply_activation(Input, activation_potentiation)
|
422
|
+
result = Input @ W.T
|
423
|
+
|
424
|
+
return result
|
425
|
+
|
352
426
|
except:
|
353
427
|
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + Style.RESET_ALL)
|
354
428
|
sys.exit()
|
355
429
|
|
430
|
+
|
356
431
|
def reverse_predict_model_ram(output, W, dtype=cp.float32):
|
357
432
|
|
358
433
|
"""
|
@@ -407,4 +482,7 @@ def get_scaler():
|
|
407
482
|
|
408
483
|
def get_preds_softmax():
|
409
484
|
|
410
|
-
return 5
|
485
|
+
return 5
|
486
|
+
|
487
|
+
def get_model_type():
|
488
|
+
return 6
|