pyerualjetwork 4.5.3__py3-none-any.whl → 4.6b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- __version__ = "4.5.3"
1
+ __version__ = "4.6b0"
2
2
  __update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
3
3
  * PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
4
4
  * PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
@@ -428,4 +428,33 @@ def batcher(x_test, y_test, batch_size=1):
428
428
  sampled_x.append(x_test[sampled_indices])
429
429
  sampled_y.append(y_test[sampled_indices])
430
430
 
431
- return np.concatenate(sampled_x), np.concatenate(sampled_y)
431
+ return np.concatenate(sampled_x), np.concatenate(sampled_y)
432
+
433
+ def split_nested_arrays(data):
434
+
435
+ n_samples = len(data)
436
+ if n_samples == 0:
437
+ return []
438
+
439
+ n_components = len(data[0])
440
+
441
+ result = []
442
+
443
+ for i in range(n_components):
444
+ component_array = np.array([item[i] for item in data])
445
+ result.append(component_array)
446
+
447
+ return result
448
+
449
+
450
+ def reconstruct_nested_arrays(component_arrays):
451
+
452
+ n_components = len(component_arrays)
453
+ n_samples = len(component_arrays[0])
454
+
455
+ reconstructed_data = []
456
+ for i in range(n_samples):
457
+ sample = [component_arrays[j][i] for j in range(n_components)]
458
+ reconstructed_data.append(sample)
459
+
460
+ return reconstructed_data
@@ -476,4 +476,34 @@ def batcher(x_test, y_test, batch_size=1):
476
476
 
477
477
  offset += num_samples
478
478
 
479
- return sampled_x, sampled_y
479
+ return sampled_x, sampled_y
480
+
481
+
482
+ def split_nested_arrays(data):
483
+
484
+ n_samples = len(data)
485
+ if n_samples == 0:
486
+ return []
487
+
488
+ n_components = len(data[0])
489
+
490
+ result = []
491
+
492
+ for i in range(n_components):
493
+ component_array = cp.array([item[i] for item in data])
494
+ result.append(component_array)
495
+
496
+ return result
497
+
498
+
499
+ def reconstruct_nested_arrays(component_arrays):
500
+
501
+ n_components = len(component_arrays)
502
+ n_samples = len(component_arrays[0])
503
+
504
+ reconstructed_data = []
505
+ for i in range(n_samples):
506
+ sample = [component_arrays[j][i] for j in range(n_components)]
507
+ reconstructed_data.append(sample)
508
+
509
+ return reconstructed_data
@@ -10,8 +10,8 @@ import pandas as pd
10
10
 
11
11
  def save_model(model_name,
12
12
  W,
13
+ model_type,
13
14
  scaler_params=None,
14
- model_type='PLAN',
15
15
  test_acc=None,
16
16
  model_path='',
17
17
  activation_potentiation=['linear'],
@@ -25,15 +25,25 @@ def save_model(model_name,
25
25
  Function to save a potentiation learning artificial neural network model.
26
26
  Args:
27
27
  model_name: (str): Name of the model.
28
+
28
29
  W: Weights of the model.
30
+
31
+ model_type: (str): Type of the model. Options: 'PLAN', 'MLP'.
32
+
29
33
  scaler_params: (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
30
- model_type: (str): Type of the model. default: 'PLAN'
34
+
31
35
  test_acc: (float): Test accuracy of the model. default: None
36
+
32
37
  model_path: (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/ default: ''
38
+
33
39
  activation_potentiation: (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
40
+
34
41
  weights_type: (str): Type of weights to save (options: 'txt', 'pkl', 'npy', 'mat'). default: 'npy'
42
+
35
43
  weights_format: (str): Format of the weights (options: 'f', 'raw'). default: 'raw'
36
- show_architecture: (bool): It draws model architecture. True or False. Default: False
44
+
45
+ show_architecture: (bool): It draws model architecture. True or False. Default: False. NOTE! draw architecture only works for PLAN models. Not MLP models for now, but it will be.
46
+
37
47
  show_info: (bool): Prints model details into console. default: True
38
48
 
39
49
  Returns:
@@ -42,7 +52,8 @@ def save_model(model_name,
42
52
 
43
53
  from .visualizations import draw_model_architecture
44
54
 
45
- class_count = W.shape[0]
55
+ if model_type != 'PLAN' and model_type != 'MLP':
56
+ raise ValueError("model_type parameter must be 'PLAN' or 'MLP'.")
46
57
 
47
58
  if test_acc != None:
48
59
  test_acc= float(test_acc)
@@ -55,17 +66,41 @@ def save_model(model_name,
55
66
  print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" + Style.RESET_ALL)
56
67
  sys.exit()
57
68
 
58
- NeuronCount = 0
59
- SynapseCount = 0
69
+ if model_type == 'PLAN':
70
+ class_count = W.shape[0]
60
71
 
72
+ NeuronCount = 0
73
+ SynapseCount = 0
61
74
 
62
- try:
63
- NeuronCount += np.shape(W)[0] + np.shape(W)[1]
64
- SynapseCount += np.shape(W)[0] * np.shape(W)[1]
65
- except:
75
+ try:
76
+ NeuronCount += np.shape(W)[0] + np.shape(W)[1]
77
+ SynapseCount += np.shape(W)[0] * np.shape(W)[1]
78
+ except:
66
79
 
67
- print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + Style.RESET_ALL)
68
- sys.exit()
80
+ print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + Style.RESET_ALL)
81
+ sys.exit()
82
+
83
+ elif model_type == 'MLP':
84
+ class_count = W[-1].shape[0]
85
+
86
+ NeuronCount = []
87
+ SynapseCount = []
88
+
89
+ NeuronCount.append(np.shape(W[0])[1])
90
+
91
+ for i in range(len(W)):
92
+ try:
93
+ NeuronCount.append(np.shape(W[i])[0])
94
+ SynapseCount.append(np.shape(W[i])[0] * np.shape(W[i])[1])
95
+ except:
96
+
97
+ print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + Style.RESET_ALL)
98
+ sys.exit()
99
+
100
+ SynapseCount.append(' ')
101
+
102
+ activation_potentiation.append('')
103
+ activation_potentiation.insert(0, '')
69
104
 
70
105
  if scaler_params != None:
71
106
 
@@ -96,56 +131,63 @@ def save_model(model_name,
96
131
  df = pd.DataFrame(data)
97
132
  df.to_pickle(model_path + model_name + '.pkl')
98
133
 
134
+ if isinstance(W, list): max_file = len(W)
135
+ else: max_file = 1
99
136
 
100
- try:
137
+ for i in range(max_file):
101
138
 
102
- if weights_type == 'txt' and weights_format == 'f':
139
+ if max_file > 1: weight = W[i]
140
+ else: weight = W
103
141
 
104
- np.savetxt(model_path + model_name + '_weights.txt', W, fmt='%f')
142
+ try:
105
143
 
106
- if weights_type == 'txt' and weights_format == 'raw':
144
+ if weights_type == 'txt' and weights_format == 'f':
107
145
 
108
- np.savetxt(model_path + model_name + '_weights.txt', W)
146
+ np.savetxt(model_path + model_name + f'weights{i}.txt', weight, fmt='%f')
109
147
 
110
- ###
148
+ if weights_type == 'txt' and weights_format == 'raw':
111
149
 
112
-
113
- if weights_type == 'pkl' and weights_format == 'f':
150
+ np.savetxt(model_path + model_name + f'weights{i}.txt', weight)
114
151
 
115
- with open(model_path + model_name + '_weights.pkl', 'wb') as f:
116
- pickle.dump(W.astype(float), f)
152
+ ###
117
153
 
118
- if weights_type == 'pkl' and weights_format =='raw':
119
-
120
- with open(model_path + model_name + '_weights.pkl', 'wb') as f:
121
- pickle.dump(W, f)
154
+
155
+ if weights_type == 'pkl' and weights_format == 'f':
122
156
 
123
- ###
157
+ with open(model_path + model_name + f'weights{i}.pkl', 'wb') as f:
158
+ pickle.dump(weight.astype(float), f)
124
159
 
125
- if weights_type == 'npy' and weights_format == 'f':
160
+ if weights_type == 'pkl' and weights_format =='raw':
161
+
162
+ with open(model_path + model_name + f'weights{i}.pkl', 'wb') as f:
163
+ pickle.dump(weight, f)
126
164
 
127
- np.save(model_path + model_name + '_weights.npy', W, W.astype(float))
165
+ ###
128
166
 
129
- if weights_type == 'npy' and weights_format == 'raw':
167
+ if weights_type == 'npy' and weights_format == 'f':
130
168
 
131
- np.save(model_path + model_name + '_weights.npy', W)
169
+ np.save(model_path + model_name + f'weights{i}.npy', weight, weight.astype(float))
132
170
 
133
- ###
171
+ if weights_type == 'npy' and weights_format == 'raw':
134
172
 
135
- if weights_type == 'mat' and weights_format == 'f':
173
+ np.save(model_path + model_name + f'weights{i}.npy', weight)
136
174
 
137
- w = {'w': W.astype(float)}
138
- io.savemat(model_path + model_name + '_weights.mat', w)
175
+ ###
139
176
 
140
- if weights_type == 'mat' and weights_format == 'raw':
141
-
142
- w = {'w': W}
143
- io.savemat(model_path + model_name + '_weights.mat', w)
177
+ if weights_type == 'mat' and weights_format == 'f':
144
178
 
145
- except:
179
+ w = {'w': weight.astype(float)}
180
+ io.savemat(model_path + model_name + f'weights{i}.mat', w)
146
181
 
147
- print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + Style.RESET_ALL)
148
- sys.exit()
182
+ if weights_type == 'mat' and weights_format == 'raw':
183
+
184
+ w = {'w': weight}
185
+ io.savemat(model_path + model_name + f'weights{i}.mat', w)
186
+
187
+ except:
188
+
189
+ print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + Style.RESET_ALL)
190
+ sys.exit()
149
191
 
150
192
  if show_info:
151
193
  print(df)
@@ -170,13 +212,12 @@ def load_model(model_name,
170
212
  Function to load a potentiation learning model.
171
213
 
172
214
  Args:
173
-
174
215
  model_name (str): Name of the model.
175
-
216
+
176
217
  model_path (str): Path where the model is saved.
177
218
 
178
219
  Returns:
179
- lists: W(list[num]), activation_potentiation, DataFrame of the model
220
+ lists: (list[df_elements]), DataFrame of the model
180
221
  """
181
222
 
182
223
  try:
@@ -204,26 +245,36 @@ def load_model(model_name,
204
245
 
205
246
 
206
247
  model_name = str(df['MODEL NAME'].iloc[0])
248
+ model_type = str(df['MODEL TYPE'].iloc[0])
207
249
  WeightType = str(df['WEIGHTS TYPE'].iloc[0])
250
+ layers = list(df['NEURON COUNT'])
208
251
 
209
- if WeightType == 'txt':
210
- W = np.loadtxt(model_path + model_name + '_weights.txt')
211
- elif WeightType == 'npy':
212
- W = np.load(model_path + model_name + '_weights.npy')
213
- elif WeightType == 'mat':
214
- W = sio.loadmat(model_path + model_name + '_weights.mat')
215
- elif WeightType == 'pkl':
216
- with open(model_path + model_name + '_weights.pkl', 'rb') as f:
217
- W = pickle.load(f)
218
- else:
219
-
220
- raise ValueError(
221
- Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy', 'pkl' or 'mat' from: load_model." + Style.RESET_ALL)
222
-
223
- if WeightType == 'mat':
224
- W = W['w']
252
+ if model_type == 'MLP': max_file = len(layers)-1
253
+ else: max_file = 1
254
+
255
+ W = []
256
+
257
+ for i in range(max_file):
258
+
259
+ if WeightType == 'txt':
260
+ W.append(np.loadtxt(model_path + model_name + f'weights{i}.txt'))
261
+ elif WeightType == 'npy':
262
+ W.append(np.load(model_path + model_name + f'weights{i}.npy'))
263
+ elif WeightType == 'mat':
264
+ W.append(sio.loadmat(model_path + model_name + f'weights{i}.mat'))
265
+ elif WeightType == 'pkl':
266
+ with open(model_path + model_name + f'weights{i}.pkl', 'rb') as f:
267
+ W = pickle.load(f)
268
+ else:
269
+
270
+ raise ValueError(
271
+ Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy', 'pkl' or 'mat' from: load_model." + Style.RESET_ALL)
272
+
273
+ if WeightType == 'mat':
274
+ W[-1] = W[-1]['w']
275
+
276
+ return W, None, None, activation_potentiation, scaler_params, None, model_type
225
277
 
226
- return W, None, None, activation_potentiation, scaler_params
227
278
 
228
279
 
229
280
  def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
@@ -233,15 +284,13 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
233
284
  from storage
234
285
 
235
286
  Args:
236
-
237
287
  Input (list or ndarray): Input data for the model (single vector or single matrix).
238
-
288
+
239
289
  model_name (str): Name of the model.
240
-
290
+
241
291
  model_path (str): Path of the model. Default: ''
242
-
243
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
244
-
292
+
293
+ dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64, np.float16. (optional)
245
294
  Returns:
246
295
  ndarray: Output from the model.
247
296
  """
@@ -249,25 +298,37 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
249
298
  from .activation_functions import apply_activation
250
299
  from .data_operations import standard_scaler
251
300
 
252
- model = load_model(model_name, model_path)
253
-
254
- activation_potentiation = model[get_act_pot()]
255
- scaler_params = model[get_scaler()]
256
- W = model[get_weights()]
301
+ try:
257
302
 
258
- Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
303
+ model = load_model(model_name, model_path)
304
+
305
+ activation_potentiation = model[get_act_pot()]
306
+ scaler_params = model[get_scaler()]
307
+ W = model[get_weights()]
308
+ model_type = model[get_model_type()]
259
309
 
260
- Input = np.array(Input, dtype=dtype, copy=False)
261
- Input = Input.ravel()
310
+ Input = standard_scaler(None, Input, scaler_params)
311
+
312
+ if model_type == 'MLP':
313
+
314
+ layer = Input
315
+ for i in range(len(W)):
316
+ if i != len(W) - 1: layer = apply_activation(layer, activation_potentiation[i])
317
+ layer = layer @ W[i].T
318
+
319
+ return layer
320
+
321
+ else:
322
+
323
+ Input = apply_activation(Input, activation_potentiation)
324
+ result = Input @ W.T
325
+
326
+ return result
262
327
 
263
- try:
264
- Input = apply_activation(Input, activation_potentiation)
265
- neural_layer = Input @ W.T
266
- return neural_layer
267
328
  except:
268
329
  print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: predict_model_ssd." + Style.RESET_ALL)
269
330
  sys.exit()
270
-
331
+
271
332
 
272
333
  def reverse_predict_model_ssd(output, model_name, model_path='', dtype=np.float32):
273
334
 
@@ -300,43 +361,49 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=np.float3
300
361
 
301
362
 
302
363
 
303
- def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['linear'], dtype=np.float32):
364
+ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['linear'], is_mlp=False):
304
365
 
305
366
  """
306
367
  Function to make a prediction using a potentiation learning artificial neural network (PLAN).
307
368
  from memory.
308
369
 
309
370
  Args:
310
-
311
371
  Input (list or ndarray): Input data for the model (single vector or single matrix).
312
-
372
+
313
373
  W (list of ndarrays): Weights of the model.
314
-
374
+
315
375
  scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
316
376
 
317
377
  activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
318
-
319
- dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
320
-
378
+
379
+ is_mlp (bool, optional): Predict from PLAN model or MLP model ? Default: False (PLAN)
321
380
  Returns:
322
- ndarray: Output from the model.
381
+ ndarray: Output from the model.
323
382
  """
324
383
 
325
- from .data_operations import standard_scaler
326
- from .activation_functions import apply_activation
327
-
328
- Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
329
-
330
- Input = np.array(Input, dtype=dtype, copy=False)
331
- Input = Input.ravel()
384
+ from data_operations import standard_scaler
385
+ from activation_functions import apply_activation
332
386
 
333
387
  try:
388
+
389
+ Input = standard_scaler(None, Input, scaler_params)
334
390
 
335
- Input = apply_activation(Input, activation_potentiation)
336
- neural_layer = Input @ W.T
391
+ if is_mlp:
392
+
393
+ layer = Input
394
+ for i in range(len(W)):
395
+ if i != len(W) - 1: layer = apply_activation(layer, activation_potentiation[i])
396
+ layer = layer @ W[i].T
397
+
398
+ return layer
337
399
 
338
- return neural_layer
339
-
400
+ else:
401
+
402
+ Input = apply_activation(Input, activation_potentiation)
403
+ result = Input @ W.T
404
+
405
+ return result
406
+
340
407
  except:
341
408
  print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + Style.RESET_ALL)
342
409
  sys.exit()
@@ -393,4 +460,7 @@ def get_scaler():
393
460
 
394
461
  def get_preds_softmax():
395
462
 
396
- return 5
463
+ return 5
464
+
465
+ def get_model_type():
466
+ return 6