pyerualjetwork 4.5.3__py3-none-any.whl → 4.6b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/data_operations.py +30 -1
- pyerualjetwork/data_operations_cuda.py +31 -1
- pyerualjetwork/model_operations.py +171 -101
- pyerualjetwork/model_operations_cuda.py +193 -112
- pyerualjetwork/planeat.py +165 -25
- pyerualjetwork/planeat_cuda.py +170 -30
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6b0.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6b0.dist-info}/RECORD +11 -11
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6b0.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.5.3.dist-info → pyerualjetwork-4.6b0.dist-info}/top_level.txt +0 -0
@@ -11,8 +11,8 @@ import gc
|
|
11
11
|
|
12
12
|
def save_model(model_name,
|
13
13
|
W,
|
14
|
+
model_type,
|
14
15
|
scaler_params=None,
|
15
|
-
model_type='PLAN',
|
16
16
|
test_acc=None,
|
17
17
|
model_path='',
|
18
18
|
activation_potentiation=['linear'],
|
@@ -24,18 +24,29 @@ def save_model(model_name,
|
|
24
24
|
|
25
25
|
"""
|
26
26
|
|
27
|
-
Function to save a potentiation learning artificial neural network model.
|
27
|
+
Function to save a potentiation learning artificial neural network model or trained non-bias MLP model.
|
28
28
|
Args:
|
29
29
|
model_name: (str): Name of the model.
|
30
30
|
W: Weights of the model.
|
31
|
+
|
32
|
+
model_type: (str): Type of the model. Options: 'PLAN', 'MLP'.
|
33
|
+
|
31
34
|
scaler_params: (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
35
|
+
|
32
36
|
model_type: (str): Type of the model. default: 'PLAN'
|
37
|
+
|
33
38
|
test_acc: (float): Test accuracy of the model. default: None
|
39
|
+
|
34
40
|
model_path: (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
|
35
|
-
|
41
|
+
|
42
|
+
activation_potentiation: (list): For deeper PLAN networks, activation function parameters. Or activation functions for MLP model layers. For more information please run this code: plan.activations_list() default: ['linear']
|
43
|
+
|
36
44
|
weights_type: (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
|
45
|
+
|
37
46
|
weights_format: (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
|
38
|
-
|
47
|
+
|
48
|
+
show_architecture: (bool): It draws model architecture. True or False. Default: False. NOTE! draw architecture only works for PLAN models. Not MLP models for now, but it will be.
|
49
|
+
|
39
50
|
show_info: (bool): Prints model details into console. default: True
|
40
51
|
|
41
52
|
|
@@ -45,7 +56,8 @@ def save_model(model_name,
|
|
45
56
|
|
46
57
|
from .visualizations_cuda import draw_model_architecture
|
47
58
|
|
48
|
-
|
59
|
+
if model_type != 'PLAN' and model_type != 'MLP':
|
60
|
+
raise ValueError("model_type parameter must be 'PLAN' or 'MLP'.")
|
49
61
|
|
50
62
|
if test_acc is not None:
|
51
63
|
test_acc= float(test_acc)
|
@@ -58,17 +70,41 @@ def save_model(model_name,
|
|
58
70
|
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" + Style.RESET_ALL)
|
59
71
|
sys.exit()
|
60
72
|
|
61
|
-
|
62
|
-
|
73
|
+
if model_type == 'PLAN':
|
74
|
+
class_count = W.shape[0]
|
63
75
|
|
76
|
+
NeuronCount = 0
|
77
|
+
SynapseCount = 0
|
64
78
|
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
79
|
+
try:
|
80
|
+
NeuronCount += cp.shape(W)[0] + cp.shape(W)[1]
|
81
|
+
SynapseCount += cp.shape(W)[0] * cp.shape(W)[1]
|
82
|
+
except:
|
69
83
|
|
70
|
-
|
71
|
-
|
84
|
+
print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + Style.RESET_ALL)
|
85
|
+
sys.exit()
|
86
|
+
|
87
|
+
elif model_type == 'MLP':
|
88
|
+
class_count = W[-1].shape[0]
|
89
|
+
|
90
|
+
NeuronCount = []
|
91
|
+
SynapseCount = []
|
92
|
+
|
93
|
+
NeuronCount.append(cp.shape(W[0])[1])
|
94
|
+
|
95
|
+
for i in range(len(W)):
|
96
|
+
try:
|
97
|
+
NeuronCount.append(cp.shape(W[i])[0])
|
98
|
+
SynapseCount.append(cp.shape(W[i])[0] * cp.shape(W[i])[1])
|
99
|
+
except:
|
100
|
+
|
101
|
+
print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + Style.RESET_ALL)
|
102
|
+
sys.exit()
|
103
|
+
|
104
|
+
SynapseCount.append(' ')
|
105
|
+
|
106
|
+
activation_potentiation.append('')
|
107
|
+
activation_potentiation.insert(0, '')
|
72
108
|
|
73
109
|
if scaler_params != None:
|
74
110
|
|
@@ -102,56 +138,63 @@ def save_model(model_name,
|
|
102
138
|
df = pd.DataFrame(data)
|
103
139
|
df.to_pickle(model_path + model_name + '.pkl')
|
104
140
|
|
141
|
+
if isinstance(W, list): max_file = len(W)
|
142
|
+
else: max_file = 1
|
105
143
|
|
106
|
-
|
144
|
+
for i in range(max_file):
|
107
145
|
|
108
|
-
if
|
146
|
+
if max_file > 1: weight = W[i]
|
147
|
+
else: weight = W
|
109
148
|
|
110
|
-
|
149
|
+
try:
|
111
150
|
|
112
|
-
|
151
|
+
if weights_type == 'txt' and weights_format == 'f':
|
113
152
|
|
114
|
-
|
153
|
+
cp.savetxt(model_path + model_name + f'weights{i}.txt', weight, fmt='%f')
|
115
154
|
|
116
|
-
|
155
|
+
if weights_type == 'txt' and weights_format == 'raw':
|
117
156
|
|
118
|
-
|
119
|
-
if weights_type == 'pkl' and weights_format == 'f':
|
157
|
+
cp.savetxt(model_path + model_name + f'weights{i}.txt', weight)
|
120
158
|
|
121
|
-
|
122
|
-
pickle.dump(W.astype(float), f)
|
159
|
+
###
|
123
160
|
|
124
|
-
|
125
|
-
|
126
|
-
with open(model_path + model_name + '_weights.pkl', 'wb') as f:
|
127
|
-
pickle.dump(W, f)
|
161
|
+
|
162
|
+
if weights_type == 'pkl' and weights_format == 'f':
|
128
163
|
|
129
|
-
|
164
|
+
with open(model_path + model_name + f'weights{i}.pkl', 'wb') as f:
|
165
|
+
pickle.dump(weight.astype(float), f)
|
130
166
|
|
131
|
-
|
167
|
+
if weights_type == 'pkl' and weights_format =='raw':
|
168
|
+
|
169
|
+
with open(model_path + model_name + f'weights{i}.pkl', 'wb') as f:
|
170
|
+
pickle.dump(weight, f)
|
132
171
|
|
133
|
-
|
172
|
+
###
|
134
173
|
|
135
|
-
|
174
|
+
if weights_type == 'npy' and weights_format == 'f':
|
136
175
|
|
137
|
-
|
176
|
+
cp.save(model_path + model_name + f'weights{i}.npy', weight, weight.astype(float))
|
138
177
|
|
139
|
-
|
178
|
+
if weights_type == 'npy' and weights_format == 'raw':
|
140
179
|
|
141
|
-
|
180
|
+
cp.save(model_path + model_name + f'weights{i}.npy', weight)
|
142
181
|
|
143
|
-
|
144
|
-
io.savemat(model_path + model_name + '_weights.mat', w)
|
182
|
+
###
|
145
183
|
|
146
|
-
|
147
|
-
|
148
|
-
w = {'w': W}
|
149
|
-
io.savemat(model_path + model_name + '_weights.mat', w)
|
184
|
+
if weights_type == 'mat' and weights_format == 'f':
|
150
185
|
|
151
|
-
|
186
|
+
w = {'w': weight.astype(float)}
|
187
|
+
io.savemat(model_path + model_name + f'weights{i}.mat', w)
|
152
188
|
|
153
|
-
|
154
|
-
|
189
|
+
if weights_type == 'mat' and weights_format == 'raw':
|
190
|
+
|
191
|
+
w = {'w': weight}
|
192
|
+
io.savemat(model_path + model_name + f'weights{i}.mat', w)
|
193
|
+
|
194
|
+
except:
|
195
|
+
|
196
|
+
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + Style.RESET_ALL)
|
197
|
+
sys.exit()
|
155
198
|
|
156
199
|
if show_info:
|
157
200
|
print(df)
|
@@ -168,7 +211,6 @@ def save_model(model_name,
|
|
168
211
|
draw_model_architecture(model_name=model_name, model_path=model_path)
|
169
212
|
|
170
213
|
|
171
|
-
|
172
214
|
def load_model(model_name,
|
173
215
|
model_path,
|
174
216
|
):
|
@@ -176,14 +218,14 @@ def load_model(model_name,
|
|
176
218
|
Function to load a potentiation learning model.
|
177
219
|
|
178
220
|
Args:
|
179
|
-
|
180
221
|
model_name (str): Name of the model.
|
181
|
-
|
222
|
+
|
182
223
|
model_path (str): Path where the model is saved.
|
183
224
|
|
184
225
|
Returns:
|
185
|
-
lists:
|
226
|
+
lists: (list[df_elements]), DataFrame of the model
|
186
227
|
"""
|
228
|
+
|
187
229
|
try:
|
188
230
|
|
189
231
|
df = pd.read_pickle(model_path + model_name + '.pkl')
|
@@ -196,7 +238,7 @@ def load_model(model_name,
|
|
196
238
|
|
197
239
|
activation_potentiation = list(df['ACTIVATION POTENTIATION'])
|
198
240
|
activation_potentiation = [x for x in activation_potentiation if not (isinstance(x, float) and cp.isnan(x))]
|
199
|
-
activation_potentiation = [item for item in activation_potentiation if item != '']
|
241
|
+
activation_potentiation = [item for item in activation_potentiation if item != '']
|
200
242
|
|
201
243
|
scaler_params_cpu = df['STANDARD SCALER'].tolist()
|
202
244
|
|
@@ -212,26 +254,38 @@ def load_model(model_name,
|
|
212
254
|
gc.collect()
|
213
255
|
|
214
256
|
model_name = str(df['MODEL NAME'].iloc[0])
|
257
|
+
model_type = str(df['MODEL TYPE'].iloc[0])
|
215
258
|
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
259
|
+
layers = list(df['NEURON COUNT'])
|
216
260
|
|
217
|
-
if
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
261
|
+
if model_type == 'MLP': max_file = len(layers)-1
|
262
|
+
else: max_file = 1
|
263
|
+
|
264
|
+
W = []
|
265
|
+
|
266
|
+
for i in range(max_file):
|
267
|
+
|
268
|
+
if WeightType == 'txt':
|
269
|
+
W.append(cp.loadtxt(model_path + model_name + f'weights{i}.txt'))
|
270
|
+
elif WeightType == 'npy':
|
271
|
+
W.append(cp.load(model_path + model_name + f'weights{i}.npy'))
|
272
|
+
elif WeightType == 'mat':
|
273
|
+
W.append(sio.loadmat(model_path + model_name + f'weights{i}.mat'))
|
274
|
+
elif WeightType == 'pkl':
|
275
|
+
with open(model_path + model_name + f'weights{i}.pkl', 'rb') as f:
|
276
|
+
W = pickle.load(f)
|
277
|
+
else:
|
278
|
+
|
279
|
+
raise ValueError(
|
280
|
+
Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy', 'pkl' or 'mat' from: load_model." + Style.RESET_ALL)
|
281
|
+
|
282
|
+
if WeightType == 'mat':
|
283
|
+
W[-1] = W[-1]['w']
|
284
|
+
|
285
|
+
if model_type == 'PLAN': W = cp.array(W[0], dtype=W[0])
|
286
|
+
|
287
|
+
return W, None, None, activation_potentiation, scaler_params, None, model_type
|
233
288
|
|
234
|
-
return W, None, None, activation_potentiation, scaler_params
|
235
289
|
|
236
290
|
|
237
291
|
def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
@@ -241,17 +295,15 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
|
241
295
|
from storage
|
242
296
|
|
243
297
|
Args:
|
244
|
-
|
245
|
-
|
246
|
-
|
298
|
+
Input (list or ndarray or cupyarray): Input data for the model (single vector or single matrix).
|
299
|
+
|
247
300
|
model_name (str): Name of the model.
|
248
|
-
|
301
|
+
|
249
302
|
model_path (str): Path of the model. Default: ''
|
250
|
-
|
251
|
-
dtype (cupy.dtype): Data type for the arrays.
|
252
|
-
|
303
|
+
|
304
|
+
dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64, cp.float16. (optional)
|
253
305
|
Returns:
|
254
|
-
|
306
|
+
cupyarray: Output from the model.
|
255
307
|
"""
|
256
308
|
|
257
309
|
Input = cp.array(Input, dtype=dtype, copy=False)
|
@@ -259,25 +311,37 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
|
259
311
|
from .activation_functions_cuda import apply_activation
|
260
312
|
from .data_operations_cuda import standard_scaler
|
261
313
|
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
314
|
+
try:
|
315
|
+
|
316
|
+
model = load_model(model_name, model_path)
|
317
|
+
|
318
|
+
activation_potentiation = model[get_act_pot()]
|
319
|
+
scaler_params = model[get_scaler()]
|
320
|
+
W = model[get_weights()]
|
321
|
+
model_type = model[get_model_type()]
|
267
322
|
|
268
|
-
|
323
|
+
Input = standard_scaler(None, Input, scaler_params)
|
269
324
|
|
270
|
-
|
271
|
-
|
325
|
+
if model_type == 'MLP':
|
326
|
+
|
327
|
+
layer = Input
|
328
|
+
for i in range(len(W)):
|
329
|
+
if i != len(W) - 1: layer = apply_activation(layer, activation_potentiation[i])
|
330
|
+
layer = layer @ W[i].T
|
331
|
+
|
332
|
+
return layer
|
272
333
|
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
334
|
+
else:
|
335
|
+
|
336
|
+
Input = apply_activation(Input, activation_potentiation)
|
337
|
+
result = Input @ W.T
|
338
|
+
|
339
|
+
return result
|
277
340
|
|
278
341
|
except:
|
279
342
|
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: predict_model_ssd." + Style.RESET_ALL)
|
280
|
-
sys.exit()
|
343
|
+
sys.exit(),
|
344
|
+
|
281
345
|
|
282
346
|
|
283
347
|
def reverse_predict_model_ssd(output, model_name, model_path='', dtype=cp.float32):
|
@@ -312,47 +376,61 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=cp.float3
|
|
312
376
|
sys.exit()
|
313
377
|
|
314
378
|
|
315
|
-
def
|
379
|
+
def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
316
380
|
|
317
381
|
"""
|
318
382
|
Function to make a prediction using a potentiation learning artificial neural network (PLAN).
|
319
|
-
from
|
383
|
+
from storage
|
320
384
|
|
321
385
|
Args:
|
322
|
-
|
323
|
-
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
324
|
-
|
325
|
-
W (list of ndarrays): Weights of the model.
|
326
|
-
|
327
|
-
scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
|
386
|
+
Input (list or ndarray or cupyarray): Input data for the model (single vector or single matrix).
|
328
387
|
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
388
|
+
model_name (str): Name of the model.
|
389
|
+
|
390
|
+
model_path (str): Path of the model. Default: ''
|
391
|
+
|
392
|
+
dtype (cupy.dtype): Data type for the arrays. cp.float32 by default. Example: cp.float64, cp.float16. (optional)
|
333
393
|
Returns:
|
334
|
-
|
394
|
+
cupyarray: Output from the model.
|
335
395
|
"""
|
336
|
-
|
337
|
-
from .data_operations_cuda import standard_scaler
|
338
|
-
from .activation_functions_cuda import apply_activation
|
339
|
-
|
340
|
-
Input = standard_scaler(None, Input, scaler_params)
|
341
396
|
|
342
397
|
Input = cp.array(Input, dtype=dtype, copy=False)
|
343
|
-
|
344
|
-
|
398
|
+
|
399
|
+
from .activation_functions_cuda import apply_activation
|
400
|
+
from .data_operations_cuda import standard_scaler
|
401
|
+
|
345
402
|
try:
|
403
|
+
|
404
|
+
model = load_model(model_name, model_path)
|
346
405
|
|
347
|
-
|
348
|
-
|
406
|
+
activation_potentiation = model[get_act_pot()]
|
407
|
+
scaler_params = model[get_scaler()]
|
408
|
+
W = model[get_weights()]
|
409
|
+
model_type = model[get_model_type()]
|
410
|
+
|
411
|
+
Input = standard_scaler(None, Input, scaler_params)
|
412
|
+
|
413
|
+
if model_type == 'MLP':
|
414
|
+
|
415
|
+
layer = Input
|
416
|
+
for i in range(len(W)):
|
417
|
+
if i != len(W) - 1: layer = apply_activation(layer, activation_potentiation[i])
|
418
|
+
layer = layer @ W[i].T
|
419
|
+
|
420
|
+
return layer
|
421
|
+
|
422
|
+
else:
|
423
|
+
|
424
|
+
Input = apply_activation(Input, activation_potentiation)
|
425
|
+
result = Input @ W.T
|
426
|
+
|
427
|
+
return result
|
349
428
|
|
350
|
-
return neural_layer
|
351
|
-
|
352
429
|
except:
|
353
|
-
print(Fore.RED + "ERROR: Unexpected
|
430
|
+
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: predict_model_ssd." + Style.RESET_ALL)
|
354
431
|
sys.exit()
|
355
432
|
|
433
|
+
|
356
434
|
def reverse_predict_model_ram(output, W, dtype=cp.float32):
|
357
435
|
|
358
436
|
"""
|
@@ -407,4 +485,7 @@ def get_scaler():
|
|
407
485
|
|
408
486
|
def get_preds_softmax():
|
409
487
|
|
410
|
-
return 5
|
488
|
+
return 5
|
489
|
+
|
490
|
+
def get_model_type():
|
491
|
+
return 6
|