pyerualjetwork 4.5.2b0__py3-none-any.whl → 4.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- __version__ = "4.5.2b0"
1
+ __version__ = "4.6"
2
2
  __update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
3
3
  * PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
4
4
  * PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
@@ -428,4 +428,33 @@ def batcher(x_test, y_test, batch_size=1):
428
428
  sampled_x.append(x_test[sampled_indices])
429
429
  sampled_y.append(y_test[sampled_indices])
430
430
 
431
- return np.concatenate(sampled_x), np.concatenate(sampled_y)
431
+ return np.concatenate(sampled_x), np.concatenate(sampled_y)
432
+
433
+ def split_nested_arrays(data):
434
+
435
+ n_samples = len(data)
436
+ if n_samples == 0:
437
+ return []
438
+
439
+ n_components = len(data[0])
440
+
441
+ result = []
442
+
443
+ for i in range(n_components):
444
+ component_array = np.array([item[i] for item in data])
445
+ result.append(component_array)
446
+
447
+ return result
448
+
449
+
450
+ def reconstruct_nested_arrays(component_arrays):
451
+
452
+ n_components = len(component_arrays)
453
+ n_samples = len(component_arrays[0])
454
+
455
+ reconstructed_data = []
456
+ for i in range(n_samples):
457
+ sample = [component_arrays[j][i] for j in range(n_components)]
458
+ reconstructed_data.append(sample)
459
+
460
+ return reconstructed_data
@@ -476,4 +476,34 @@ def batcher(x_test, y_test, batch_size=1):
476
476
 
477
477
  offset += num_samples
478
478
 
479
- return sampled_x, sampled_y
479
+ return sampled_x, sampled_y
480
+
481
+
482
+ def split_nested_arrays(data):
483
+
484
+ n_samples = len(data)
485
+ if n_samples == 0:
486
+ return []
487
+
488
+ n_components = len(data[0])
489
+
490
+ result = []
491
+
492
+ for i in range(n_components):
493
+ component_array = cp.array([item[i] for item in data])
494
+ result.append(component_array)
495
+
496
+ return result
497
+
498
+
499
+ def reconstruct_nested_arrays(component_arrays):
500
+
501
+ n_components = len(component_arrays)
502
+ n_samples = len(component_arrays[0])
503
+
504
+ reconstructed_data = []
505
+ for i in range(n_samples):
506
+ sample = [component_arrays[j][i] for j in range(n_components)]
507
+ reconstructed_data.append(sample)
508
+
509
+ return reconstructed_data
@@ -10,8 +10,8 @@ import pandas as pd
10
10
 
11
11
  def save_model(model_name,
12
12
  W,
13
+ model_type,
13
14
  scaler_params=None,
14
- model_type='PLAN',
15
15
  test_acc=None,
16
16
  model_path='',
17
17
  activation_potentiation=['linear'],
@@ -25,15 +25,25 @@ def save_model(model_name,
25
25
  Function to save a potentiation learning artificial neural network model.
26
26
  Args:
27
27
  model_name: (str): Name of the model.
28
+
28
29
  W: Weights of the model.
30
+
31
+ model_type: (str): Type of the model. Options: 'PLAN', 'MLP'.
32
+
29
33
  scaler_params: (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
30
- model_type: (str): Type of the model. default: 'PLAN'
34
+
31
35
  test_acc: (float): Test accuracy of the model. default: None
36
+
32
37
  model_path: (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
33
- activation_potentiation: (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
38
+
39
+ activation_potentiation: (list[str]): For deeper PLAN networks, activation function parameters. Or activation function parameters for MLP layers. For more information please run this code: plan.activations_list() default: ['linear']
40
+
34
41
  weights_type: (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
42
+
35
43
  weights_format: (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
36
- show_architecture: (bool): It draws model architecture. True or False. Default: False
44
+
45
+ show_architecture: (bool): It draws model architecture. True or False. Default: False. NOTE! draw architecture only works for PLAN models. Not MLP models for now, but it will be.
46
+
37
47
  show_info: (bool): Prints model details into console. default: True
38
48
 
39
49
  Returns:
@@ -42,7 +52,8 @@ def save_model(model_name,
42
52
 
43
53
  from .visualizations import draw_model_architecture
44
54
 
45
- class_count = W.shape[0]
55
+ if model_type != 'PLAN' and model_type != 'MLP':
56
+ raise ValueError("model_type parameter must be 'PLAN' or 'MLP'.")
46
57
 
47
58
  if test_acc != None:
48
59
  test_acc= float(test_acc)
@@ -55,17 +66,45 @@ def save_model(model_name,
55
66
  print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" + Style.RESET_ALL)
56
67
  sys.exit()
57
68
 
58
- NeuronCount = 0
59
- SynapseCount = 0
69
+ if model_type == 'PLAN':
70
+ class_count = W.shape[0]
60
71
 
72
+ NeuronCount = 0
73
+ SynapseCount = 0
61
74
 
62
- try:
63
- NeuronCount += np.shape(W)[0] + np.shape(W)[1]
64
- SynapseCount += np.shape(W)[0] * np.shape(W)[1]
65
- except:
75
+ try:
76
+ NeuronCount += np.shape(W)[0] + np.shape(W)[1]
77
+ SynapseCount += np.shape(W)[0] * np.shape(W)[1]
78
+ except:
66
79
 
67
- print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + Style.RESET_ALL)
68
- sys.exit()
80
+ print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + Style.RESET_ALL)
81
+ sys.exit()
82
+
83
+ elif model_type == 'MLP':
84
+ class_count = W[-1].shape[0]
85
+
86
+ NeuronCount = []
87
+ SynapseCount = []
88
+
89
+ NeuronCount.append(np.shape(W[0])[1])
90
+
91
+ for i in range(len(W)):
92
+ try:
93
+ NeuronCount.append(np.shape(W[i])[0])
94
+ SynapseCount.append(np.shape(W[i])[0] * np.shape(W[i])[1])
95
+ except:
96
+
97
+ print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + Style.RESET_ALL)
98
+ sys.exit()
99
+
100
+ SynapseCount.append(' ')
101
+
102
+ activation_potentiation.append('')
103
+ activation_potentiation.insert(0, '')
104
+
105
+ if isinstance(activation_potentiation, str):
106
+ activation_potentiation = [activation_potentiation]
107
+ activation_potentiation.append('')
69
108
 
70
109
  if scaler_params != None:
71
110
 
@@ -96,56 +135,63 @@ def save_model(model_name,
96
135
  df = pd.DataFrame(data)
97
136
  df.to_pickle(model_path + model_name + '.pkl')
98
137
 
138
+ if isinstance(W, list): max_file = len(W)
139
+ else: max_file = 1
99
140
 
100
- try:
141
+ for i in range(max_file):
101
142
 
102
- if weights_type == 'txt' and weights_format == 'f':
143
+ if max_file > 1: weight = W[i]
144
+ else: weight = W
103
145
 
104
- np.savetxt(model_path + model_name + '_weights.txt', W, fmt='%f')
146
+ try:
105
147
 
106
- if weights_type == 'txt' and weights_format == 'raw':
148
+ if weights_type == 'txt' and weights_format == 'f':
107
149
 
108
- np.savetxt(model_path + model_name + '_weights.txt', W)
150
+ np.savetxt(model_path + model_name + f'weights{i}.txt', weight, fmt='%f')
109
151
 
110
- ###
152
+ if weights_type == 'txt' and weights_format == 'raw':
111
153
 
112
-
113
- if weights_type == 'pkl' and weights_format == 'f':
154
+ np.savetxt(model_path + model_name + f'weights{i}.txt', weight)
114
155
 
115
- with open(model_path + model_name + '_weights.pkl', 'wb') as f:
116
- pickle.dump(W.astype(float), f)
156
+ ###
117
157
 
118
- if weights_type == 'pkl' and weights_format =='raw':
119
-
120
- with open(model_path + model_name + '_weights.pkl', 'wb') as f:
121
- pickle.dump(W, f)
158
+
159
+ if weights_type == 'pkl' and weights_format == 'f':
122
160
 
123
- ###
161
+ with open(model_path + model_name + f'weights{i}.pkl', 'wb') as f:
162
+ pickle.dump(weight.astype(float), f)
124
163
 
125
- if weights_type == 'npy' and weights_format == 'f':
164
+ if weights_type == 'pkl' and weights_format =='raw':
165
+
166
+ with open(model_path + model_name + f'weights{i}.pkl', 'wb') as f:
167
+ pickle.dump(weight, f)
126
168
 
127
- np.save(model_path + model_name + '_weights.npy', W, W.astype(float))
169
+ ###
128
170
 
129
- if weights_type == 'npy' and weights_format == 'raw':
171
+ if weights_type == 'npy' and weights_format == 'f':
130
172
 
131
- np.save(model_path + model_name + '_weights.npy', W)
173
+ np.save(model_path + model_name + f'weights{i}.npy', weight, weight.astype(float))
132
174
 
133
- ###
175
+ if weights_type == 'npy' and weights_format == 'raw':
134
176
 
135
- if weights_type == 'mat' and weights_format == 'f':
177
+ np.save(model_path + model_name + f'weights{i}.npy', weight)
136
178
 
137
- w = {'w': W.astype(float)}
138
- io.savemat(model_path + model_name + '_weights.mat', w)
179
+ ###
139
180
 
140
- if weights_type == 'mat' and weights_format == 'raw':
141
-
142
- w = {'w': W}
143
- io.savemat(model_path + model_name + '_weights.mat', w)
181
+ if weights_type == 'mat' and weights_format == 'f':
144
182
 
145
- except:
183
+ w = {'w': weight.astype(float)}
184
+ io.savemat(model_path + model_name + f'weights{i}.mat', w)
146
185
 
147
- print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + Style.RESET_ALL)
148
- sys.exit()
186
+ if weights_type == 'mat' and weights_format == 'raw':
187
+
188
+ w = {'w': weight}
189
+ io.savemat(model_path + model_name + f'weights{i}.mat', w)
190
+
191
+ except:
192
+
193
+ print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + Style.RESET_ALL)
194
+ sys.exit()
149
195
 
150
196
  if show_info:
151
197
  print(df)
@@ -170,13 +216,12 @@ def load_model(model_name,
170
216
  Function to load a potentiation learning model.
171
217
 
172
218
  Args:
173
-
174
219
  model_name (str): Name of the model.
175
-
220
+
176
221
  model_path (str): Path where the model is saved.
177
222
 
178
223
  Returns:
179
- lists: W(list[num]), activation_potentiation, DataFrame of the model
224
+ lists: (list[df_elements]), DataFrame of the model
180
225
  """
181
226
 
182
227
  try:
@@ -204,26 +249,38 @@ def load_model(model_name,
204
249
 
205
250
 
206
251
  model_name = str(df['MODEL NAME'].iloc[0])
252
+ model_type = str(df['MODEL TYPE'].iloc[0])
207
253
  WeightType = str(df['WEIGHTS TYPE'].iloc[0])
254
+ layers = list(df['NEURON COUNT'])
208
255
 
209
- if WeightType == 'txt':
210
- W = np.loadtxt(model_path + model_name + '_weights.txt')
211
- elif WeightType == 'npy':
212
- W = np.load(model_path + model_name + '_weights.npy')
213
- elif WeightType == 'mat':
214
- W = sio.loadmat(model_path + model_name + '_weights.mat')
215
- elif WeightType == 'pkl':
216
- with open(model_path + model_name + '_weights.pkl', 'rb') as f:
217
- W = pickle.load(f)
218
- else:
219
-
220
- raise ValueError(
221
- Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy', 'pkl' or 'mat' from: load_model." + Style.RESET_ALL)
222
-
223
- if WeightType == 'mat':
224
- W = W['w']
256
+ if model_type == 'MLP': max_file = len(layers)-1
257
+ else: max_file = 1
258
+
259
+ W = []
260
+
261
+ for i in range(max_file):
262
+
263
+ if WeightType == 'txt':
264
+ W.append(np.loadtxt(model_path + model_name + f'weights{i}.txt'))
265
+ elif WeightType == 'npy':
266
+ W.append(np.load(model_path + model_name + f'weights{i}.npy'))
267
+ elif WeightType == 'mat':
268
+ W.append(sio.loadmat(model_path + model_name + f'weights{i}.mat'))
269
+ elif WeightType == 'pkl':
270
+ with open(model_path + model_name + f'weights{i}.pkl', 'rb') as f:
271
+ W = pickle.load(f)
272
+ else:
273
+
274
+ raise ValueError(
275
+ Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy', 'pkl' or 'mat' from: load_model." + Style.RESET_ALL)
276
+
277
+ if WeightType == 'mat':
278
+ W[-1] = W[-1]['w']
279
+
280
+ if model_type == 'PLAN': W = np.array(W[0], dtype=W[0].dtype)
281
+
282
+ return W, None, None, activation_potentiation, scaler_params, None, model_type
225
283
 
226
- return W, None, None, activation_potentiation, scaler_params
227
284
 
228
285
 
229
286
  def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
@@ -233,15 +290,13 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
233
290
  from storage
234
291
 
235
292
  Args:
236
-
237
293
  Input (list or ndarray): Input data for the model (single vector or single matrix).
238
-
294
+
239
295
  model_name (str): Name of the model.
240
-
296
+
241
297
  model_path (str): Path of the model. Default: ''
242
-
243
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
244
-
298
+
299
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64, np.float16. (optional)
245
300
  Returns:
246
301
  ndarray: Output from the model.
247
302
  """
@@ -249,25 +304,37 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
249
304
  from .activation_functions import apply_activation
250
305
  from .data_operations import standard_scaler
251
306
 
252
- model = load_model(model_name, model_path)
253
-
254
- activation_potentiation = model[get_act_pot()]
255
- scaler_params = model[get_scaler()]
256
- W = model[get_weights()]
307
+ try:
257
308
 
258
- Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
309
+ model = load_model(model_name, model_path)
310
+
311
+ activation_potentiation = model[get_act_pot()]
312
+ scaler_params = model[get_scaler()]
313
+ W = model[get_weights()]
314
+ model_type = model[get_model_type()]
259
315
 
260
- Input = np.array(Input, dtype=dtype, copy=False)
261
- Input = Input.ravel()
316
+ Input = standard_scaler(None, Input, scaler_params)
317
+
318
+ if model_type == 'MLP':
319
+
320
+ layer = Input
321
+ for i in range(len(W)):
322
+ if i != len(W) - 1: layer = apply_activation(layer, activation_potentiation[i])
323
+ layer = layer @ W[i].T
324
+
325
+ return layer
326
+
327
+ else:
328
+
329
+ Input = apply_activation(Input, activation_potentiation)
330
+ result = Input @ W.T
331
+
332
+ return result
262
333
 
263
- try:
264
- Input = apply_activation(Input, activation_potentiation)
265
- neural_layer = Input @ W.T
266
- return neural_layer
267
334
  except:
268
335
  print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: predict_model_ssd." + Style.RESET_ALL)
269
336
  sys.exit()
270
-
337
+
271
338
 
272
339
  def reverse_predict_model_ssd(output, model_name, model_path='', dtype=np.float32):
273
340
 
@@ -300,43 +367,49 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=np.float3
300
367
 
301
368
 
302
369
 
303
- def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['linear'], dtype=np.float32):
370
+ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['linear'], is_mlp=False):
304
371
 
305
372
  """
306
373
  Function to make a prediction using a potentiation learning artificial neural network (PLAN).
307
374
  from memory.
308
375
 
309
376
  Args:
310
-
311
377
  Input (list or ndarray): Input data for the model (single vector or single matrix).
312
-
378
+
313
379
  W (list of ndarrays): Weights of the model.
314
-
380
+
315
381
  scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
316
382
 
317
- activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
318
-
319
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
320
-
383
+ activation_potentiation (list[str]): activation list for deep PLAN or activation list for MLP layers. Default: ['linear']
384
+
385
+ is_mlp (bool, optional): Predict from PLAN model or MLP model ? Default: False (PLAN)
321
386
  Returns:
322
- ndarray: Output from the model.
387
+ ndarray: Output from the model.
323
388
  """
324
389
 
325
390
  from .data_operations import standard_scaler
326
391
  from .activation_functions import apply_activation
327
392
 
328
- Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
329
-
330
- Input = np.array(Input, dtype=dtype, copy=False)
331
- Input = Input.ravel()
332
-
333
393
  try:
394
+
395
+ Input = standard_scaler(None, Input, scaler_params)
334
396
 
335
- Input = apply_activation(Input, activation_potentiation)
336
- neural_layer = Input @ W.T
397
+ if is_mlp:
398
+
399
+ layer = Input
400
+ for i in range(len(W)):
401
+ if i != len(W) - 1: layer = apply_activation(layer, activation_potentiation[i])
402
+ layer = layer @ W[i].T
403
+
404
+ return layer
337
405
 
338
- return neural_layer
339
-
406
+ else:
407
+
408
+ Input = apply_activation(Input, activation_potentiation)
409
+ result = Input @ W.T
410
+
411
+ return result
412
+
340
413
  except:
341
414
  print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + Style.RESET_ALL)
342
415
  sys.exit()
@@ -393,4 +466,7 @@ def get_scaler():
393
466
 
394
467
  def get_preds_softmax():
395
468
 
396
- return 5
469
+ return 5
470
+
471
+ def get_model_type():
472
+ return 6