pyerualjetwork 2.3.8__tar.gz → 2.6.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork-2.6.3/PKG-INFO +7 -0
- pyerualjetwork-2.6.3/plan/__init__.py +5 -0
- pyerualjetwork-2.3.8/plan_di/plan_di.py → pyerualjetwork-2.6.3/plan/plan.py +292 -105
- pyerualjetwork-2.6.3/pyerualjetwork.egg-info/PKG-INFO +7 -0
- {pyerualjetwork-2.3.8 → pyerualjetwork-2.6.3}/pyerualjetwork.egg-info/SOURCES.txt +2 -4
- pyerualjetwork-2.6.3/pyerualjetwork.egg-info/top_level.txt +1 -0
- {pyerualjetwork-2.3.8 → pyerualjetwork-2.6.3}/setup.py +2 -2
- pyerualjetwork-2.3.8/PKG-INFO +0 -7
- pyerualjetwork-2.3.8/plan_bi/__init__.py +0 -5
- pyerualjetwork-2.3.8/plan_bi/plan_bi.py +0 -1506
- pyerualjetwork-2.3.8/plan_di/__init__.py +0 -5
- pyerualjetwork-2.3.8/pyerualjetwork.egg-info/PKG-INFO +0 -7
- pyerualjetwork-2.3.8/pyerualjetwork.egg-info/top_level.txt +0 -2
- {pyerualjetwork-2.3.8 → pyerualjetwork-2.6.3}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-2.3.8 → pyerualjetwork-2.6.3}/setup.cfg +0 -0
@@ -0,0 +1,7 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: pyerualjetwork
|
3
|
+
Version: 2.6.3
|
4
|
+
Summary: New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document
|
5
|
+
Author: Hasan Can Beydili
|
6
|
+
Author-email: tchasancan@gmail.com
|
7
|
+
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -0,0 +1,5 @@
|
|
1
|
+
# pyerualjetwork/PLAN/__init__.py
|
2
|
+
|
3
|
+
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
|
+
|
5
|
+
from .plan import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer, weight_normalization
|
@@ -1,8 +1,9 @@
|
|
1
1
|
|
2
|
+
# -*- coding: utf-8 -*-
|
2
3
|
"""
|
3
|
-
Created on
|
4
|
+
Created on Tue Jun 18 23:32:16 2024
|
4
5
|
|
5
|
-
@author: hasan
|
6
|
+
@author: hasan
|
6
7
|
"""
|
7
8
|
|
8
9
|
import pandas as pd
|
@@ -20,9 +21,14 @@ import seaborn as sns
|
|
20
21
|
|
21
22
|
def fit(
|
22
23
|
x_train: List[Union[int, float]],
|
23
|
-
# At least two.. and one hot encoded
|
24
|
-
|
25
|
-
|
24
|
+
y_train: List[Union[int, float]], # At least two.. and one hot encoded
|
25
|
+
show_training,
|
26
|
+
show_count= None,
|
27
|
+
val= None,
|
28
|
+
val_count= None,
|
29
|
+
x_val= None,
|
30
|
+
y_val= None,
|
31
|
+
activation_potentiation=None # (float): Input activation_potentiation (optional)
|
26
32
|
) -> str:
|
27
33
|
|
28
34
|
infoPLAN = """
|
@@ -30,19 +36,49 @@ def fit(
|
|
30
36
|
|
31
37
|
Args:
|
32
38
|
x_train (list[num]): List of input data.
|
33
|
-
y_train (list[num]): List of
|
34
|
-
show_training (bool, str): True, None or
|
35
|
-
|
39
|
+
y_train (list[num]): List of target labels. (one hot encoded)
|
40
|
+
show_training (bool, str): True, None or'final'
|
41
|
+
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
42
|
+
val (None or True): validation in training process ? None or True Default: None
|
43
|
+
val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
|
44
|
+
x_val (list[num]): List of validation data. (optional) Default: x_train
|
45
|
+
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
|
46
|
+
activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
|
36
47
|
Returns:
|
37
48
|
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
38
49
|
error handled ?: Process status ('e')
|
39
50
|
"""
|
51
|
+
|
40
52
|
|
41
53
|
if len(x_train) != len(y_train):
|
54
|
+
|
42
55
|
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
|
43
56
|
return 'e'
|
57
|
+
|
58
|
+
if x_val == None and y_val == None:
|
59
|
+
|
60
|
+
x_val = x_train
|
61
|
+
y_val = y_train
|
62
|
+
|
63
|
+
if val == True and val_count == None:
|
64
|
+
|
65
|
+
val_count = 0.1
|
66
|
+
|
67
|
+
if val == True and val_count != None:
|
68
|
+
|
69
|
+
val_count = int(len(x_train) * val_count)
|
70
|
+
val_count_copy = val_count
|
44
71
|
|
72
|
+
if show_count == None:
|
73
|
+
|
74
|
+
show_count = 10
|
75
|
+
|
76
|
+
if show_training == True or show_training == 'final':
|
77
|
+
|
78
|
+
row, col = shape_control(x_train)
|
79
|
+
|
45
80
|
class_count = set()
|
81
|
+
|
46
82
|
for sublist in y_train:
|
47
83
|
|
48
84
|
class_count.add(tuple(sublist))
|
@@ -53,20 +89,24 @@ def fit(
|
|
53
89
|
|
54
90
|
neurons = [len(class_count), len(class_count)]
|
55
91
|
layers = ['fex']
|
92
|
+
val_list = [None]
|
56
93
|
|
57
94
|
x_train[0] = np.array(x_train[0])
|
58
95
|
x_train[0] = x_train[0].ravel()
|
59
96
|
x_train_size = len(x_train[0])
|
60
97
|
|
61
|
-
W =
|
98
|
+
W = weight_identification(
|
62
99
|
len(layers) - 1, len(class_count), neurons, x_train_size)
|
63
100
|
|
64
101
|
trained_W = [1] * len(W)
|
65
102
|
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
|
66
103
|
start_time = time.time()
|
67
|
-
y =
|
104
|
+
y = decode_one_hot(y_train)
|
68
105
|
|
69
106
|
for index, inp in enumerate(x_train):
|
107
|
+
|
108
|
+
progress = index / len(x_train) * 100
|
109
|
+
|
70
110
|
uni_start_time = time.time()
|
71
111
|
inp = np.array(inp)
|
72
112
|
inp = inp.ravel()
|
@@ -80,40 +120,104 @@ def fit(
|
|
80
120
|
|
81
121
|
for Lindex, Layer in enumerate(layers):
|
82
122
|
|
83
|
-
neural_layer =
|
123
|
+
neural_layer = normalization(neural_layer)
|
84
124
|
|
85
125
|
if Layer == 'fex':
|
86
|
-
W[Lindex] =
|
126
|
+
W[Lindex] = fex(neural_layer, W[Lindex], True, y[index], activation_potentiation)
|
87
127
|
|
88
128
|
for i, w in enumerate(W):
|
89
129
|
trained_W[i] = trained_W[i] + w
|
90
130
|
|
131
|
+
|
132
|
+
if val == True:
|
133
|
+
|
134
|
+
if index == val_count:
|
135
|
+
|
91
136
|
if show_training == True:
|
92
|
-
|
93
|
-
fig, ax = plt.subplots(1, 10, figsize=(18, 14))
|
94
|
-
|
95
137
|
try:
|
96
|
-
|
97
|
-
|
138
|
+
plt.close(fig)
|
139
|
+
|
98
140
|
except:
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
ax[j].set_aspect('equal')
|
108
|
-
|
109
|
-
ax[j].set_xticks([])
|
110
|
-
ax[j].set_yticks([])
|
111
|
-
ax[j].set_title(f'{j+1}. Neuron')
|
112
|
-
|
113
|
-
|
114
|
-
plt.show()
|
141
|
+
pass
|
142
|
+
|
143
|
+
val_count += val_count_copy
|
144
|
+
|
145
|
+
layers.append('cat')
|
146
|
+
trained_W.append(np.eye(len(class_count)))
|
147
|
+
|
148
|
+
validation_model = evaluate(x_val, y_val, None, trained_W)
|
115
149
|
|
116
|
-
|
150
|
+
layers.pop()
|
151
|
+
trained_W.pop()
|
152
|
+
|
153
|
+
val_acc = validation_model[get_acc()]
|
154
|
+
|
155
|
+
val_list.append(val_acc)
|
156
|
+
|
157
|
+
|
158
|
+
plt.plot(val_list, linestyle='-',
|
159
|
+
color='r')
|
160
|
+
|
161
|
+
progress_status = f"{progress:.1f}"
|
162
|
+
plt.title('Validation accuracy graph. Amount of data learned by the model: % ' + progress_status)
|
163
|
+
plt.xlabel('Learning Progress')
|
164
|
+
plt.ylabel('Accuracy')
|
165
|
+
plt.ylim(0, 1)
|
166
|
+
plt.draw()
|
167
|
+
plt.pause(0.1)
|
168
|
+
|
169
|
+
if show_training == True:
|
170
|
+
|
171
|
+
if index %show_count == 0:
|
172
|
+
|
173
|
+
|
174
|
+
if index != 0:
|
175
|
+
plt.close(fig)
|
176
|
+
|
177
|
+
if row != 0:
|
178
|
+
|
179
|
+
fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
|
180
|
+
|
181
|
+
else:
|
182
|
+
|
183
|
+
fig, ax = plt.subplots(1, 1, figsize=(18, 14))
|
184
|
+
|
185
|
+
for j in range(len(class_count)):
|
186
|
+
|
187
|
+
|
188
|
+
if row != 0:
|
189
|
+
|
190
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
191
|
+
suptitle_info = 'Neurons Learning Progress: % '
|
192
|
+
title_info = f'{j+1}. Neuron'
|
193
|
+
|
194
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
195
|
+
|
196
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
197
|
+
|
198
|
+
ax[j].set_aspect('equal')
|
199
|
+
|
200
|
+
ax[j].set_xticks([])
|
201
|
+
ax[j].set_yticks([])
|
202
|
+
ax[j].set_title(title_info)
|
203
|
+
|
204
|
+
else:
|
205
|
+
|
206
|
+
mat = trained_W[0]
|
207
|
+
ax.imshow(mat, interpolation='sinc', cmap='viridis')
|
208
|
+
suptitle_info = 'Weight Learning Progress: % '
|
209
|
+
title_info = 'Weight Matrix Of Fex Layer'
|
210
|
+
|
211
|
+
|
212
|
+
|
213
|
+
|
214
|
+
progress_status = f"{progress:.1f}"
|
215
|
+
fig.suptitle(suptitle_info + progress_status)
|
216
|
+
plt.draw()
|
217
|
+
plt.pause(0.1)
|
218
|
+
|
219
|
+
|
220
|
+
W = weight_identification(
|
117
221
|
len(layers) - 1, len(class_count), neurons, x_train_size)
|
118
222
|
|
119
223
|
uni_end_time = time.time()
|
@@ -133,29 +237,25 @@ def fit(
|
|
133
237
|
print('\rTraining: ', index, "/", len(x_train), "\n", end="")
|
134
238
|
|
135
239
|
if show_training == 'final':
|
136
|
-
|
137
|
-
fig, ax = plt.subplots(1, 10, figsize=(18, 14))
|
138
|
-
|
139
|
-
try:
|
140
|
-
row = x_train[1].shape[0]
|
141
|
-
col = x_train[1].shape[1]
|
142
|
-
except:
|
143
|
-
print(Fore.RED + 'ERROR: You try train showing but inputs is raveled. x_train inputs to must be reshape for training_show.', infoPLAN + Style.RESET_ALL)
|
144
|
-
return 'e'
|
145
240
|
|
146
|
-
|
147
|
-
|
148
|
-
mat = trained_W[0][j,:].reshape(row, col)
|
241
|
+
fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
|
149
242
|
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
243
|
+
for j in range(len(class_count)):
|
244
|
+
|
245
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
246
|
+
|
247
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
248
|
+
ax[j].set_aspect('equal')
|
249
|
+
|
250
|
+
ax[j].set_xticks([])
|
251
|
+
ax[j].set_yticks([])
|
252
|
+
ax[j].set_title(f'{j+1}. Neuron')
|
253
|
+
|
254
|
+
progress_status = f"{progress:.1f}"
|
255
|
+
fig.suptitle('Neurons Learning Progress: % ' + progress_status)
|
256
|
+
plt.draw()
|
257
|
+
plt.pause(0.1)
|
157
258
|
|
158
|
-
plt.show()
|
159
259
|
|
160
260
|
EndTime = time.time()
|
161
261
|
|
@@ -180,6 +280,39 @@ def fit(
|
|
180
280
|
|
181
281
|
# FUNCTIONS -----
|
182
282
|
|
283
|
+
def shape_control(x_train):
|
284
|
+
|
285
|
+
try:
|
286
|
+
row = x_train[1].shape[0]
|
287
|
+
col = x_train[1].shape[1]
|
288
|
+
|
289
|
+
except:
|
290
|
+
|
291
|
+
print(Fore.MAGENTA + 'WARNING: You trying show_training but inputs is raveled. x_train inputs should be reshaped for show_training.' + Style.RESET_ALL)
|
292
|
+
|
293
|
+
try:
|
294
|
+
row, col = find_numbers(len(x_train[0]))
|
295
|
+
|
296
|
+
except:
|
297
|
+
|
298
|
+
print(Fore.MAGENTA + 'WARNING: Input length cannot be reshaped. Neurons learning progression cannot be draw, weight learning progress drwaing started.' + Style.RESET_ALL)
|
299
|
+
return [0, 0]
|
300
|
+
|
301
|
+
return row, col
|
302
|
+
|
303
|
+
def find_numbers(n):
|
304
|
+
if n <= 1:
|
305
|
+
raise ValueError("Parameter 'n' must be greater than 1.")
|
306
|
+
|
307
|
+
for i in range(2, int(n**0.5) + 1):
|
308
|
+
if n % i == 0:
|
309
|
+
factor1 = i
|
310
|
+
factor2 = n // i
|
311
|
+
if factor1 == factor2:
|
312
|
+
return factor1, factor2
|
313
|
+
|
314
|
+
return None
|
315
|
+
|
183
316
|
def weight_normalization(
|
184
317
|
W,
|
185
318
|
class_count
|
@@ -234,32 +367,53 @@ def fex(
|
|
234
367
|
Input, # list[num]: Input data.
|
235
368
|
w, # num: Weight matrix of the neural network.
|
236
369
|
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
237
|
-
Class # int: Which class is, if training.
|
370
|
+
Class, # int: Which class is, if training.
|
371
|
+
activation_potentiation # float or None: Input activation potentiation (optional)
|
238
372
|
) -> tuple:
|
239
373
|
"""
|
240
|
-
Applies feature extraction process to the input data using synaptic
|
374
|
+
Applies feature extraction process to the input data using synaptic potentiation.
|
241
375
|
|
242
376
|
Args:
|
243
377
|
Input (num): Input data.
|
244
378
|
w (num): Weight matrix of the neural network.
|
245
379
|
is_training (bool): Flag indicating if the function is called during training (True or False).
|
246
380
|
Class (int): if is during training then which class(label) ? is isnt then put None.
|
381
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
247
382
|
|
248
383
|
Returns:
|
249
384
|
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
250
385
|
"""
|
251
386
|
|
252
|
-
if is_training == True:
|
253
|
-
|
387
|
+
if is_training == True and activation_potentiation == None:
|
388
|
+
|
254
389
|
w[Class, :] = Input
|
255
390
|
|
256
391
|
return w
|
257
392
|
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
393
|
+
elif is_training == True and activation_potentiation != None:
|
394
|
+
|
395
|
+
|
396
|
+
Input[Input < activation_potentiation] = 0
|
397
|
+
Input[Input > activation_potentiation] = 1
|
398
|
+
|
399
|
+
w[Class,:] = Input
|
400
|
+
|
401
|
+
return w
|
402
|
+
|
403
|
+
elif is_training == False and activation_potentiation == None:
|
404
|
+
|
405
|
+
neural_layer = np.dot(w, Input)
|
406
|
+
|
407
|
+
return neural_layer
|
408
|
+
|
409
|
+
elif is_training == False and activation_potentiation != None:
|
410
|
+
|
411
|
+
Input[Input < activation_potentiation] = 0
|
412
|
+
Input[Input > activation_potentiation] = 1
|
413
|
+
|
414
|
+
neural_layer = np.dot(w, Input)
|
415
|
+
|
416
|
+
return neural_layer
|
263
417
|
|
264
418
|
|
265
419
|
|
@@ -336,7 +490,8 @@ def evaluate(
|
|
336
490
|
x_test, # list[num]: Test input data.
|
337
491
|
y_test, # list[num]: Test labels.
|
338
492
|
show_metrices, # show_metrices (bool): (True or False)
|
339
|
-
W
|
493
|
+
W, # list[num]: Weight matrix list of the neural network.
|
494
|
+
activation_potentiation=None # activation_potentiation (float or None): Threshold value for comparison. (optional)
|
340
495
|
) -> tuple:
|
341
496
|
infoTestModel = """
|
342
497
|
Tests the neural network model with the given test data.
|
@@ -346,6 +501,7 @@ def evaluate(
|
|
346
501
|
y_test (list[num]): Test labels.
|
347
502
|
show_metrices (bool): (True or False)
|
348
503
|
W (list[num]): Weight matrix list of the neural network.
|
504
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
349
505
|
|
350
506
|
Returns:
|
351
507
|
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
@@ -376,7 +532,7 @@ def evaluate(
|
|
376
532
|
neural_layer = normalization(neural_layer)
|
377
533
|
|
378
534
|
if Layer == 'fex':
|
379
|
-
neural_layer = fex(neural_layer, W[index], False, None)
|
535
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
380
536
|
elif Layer == 'cat':
|
381
537
|
neural_layer = np.dot(W[index], neural_layer)
|
382
538
|
|
@@ -451,7 +607,8 @@ def multiple_evaluate(
|
|
451
607
|
x_test, # list[num]: Test input data.
|
452
608
|
y_test, # list[num]: Test labels.
|
453
609
|
show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
|
454
|
-
MW # list[list[num]]: Weight matrix of the neural network.
|
610
|
+
MW, # list[list[num]]: Weight matrix of the neural network.
|
611
|
+
activation_potentiation=None # (float or None): Threshold value for comparison. (optional)
|
455
612
|
) -> tuple:
|
456
613
|
infoTestModel = """
|
457
614
|
Tests the neural network model with the given test data.
|
@@ -498,7 +655,7 @@ def multiple_evaluate(
|
|
498
655
|
neural_layer = normalization(neural_layer)
|
499
656
|
|
500
657
|
if Layer == 'fex':
|
501
|
-
neural_layer = fex(neural_layer, W[index], False, None)
|
658
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
502
659
|
elif Layer == 'cat':
|
503
660
|
neural_layer = np.dot(W[index], neural_layer)
|
504
661
|
|
@@ -568,7 +725,7 @@ def multiple_evaluate(
|
|
568
725
|
|
569
726
|
except:
|
570
727
|
|
571
|
-
print(Fore.RED + "ERROR: Testing model parameters like '
|
728
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
|
572
729
|
return 'e'
|
573
730
|
|
574
731
|
return W, y_preds, acc
|
@@ -582,11 +739,12 @@ def save_model(model_name,
|
|
582
739
|
weights_format,
|
583
740
|
model_path,
|
584
741
|
scaler_params,
|
585
|
-
W
|
742
|
+
W,
|
743
|
+
activation_potentiation=None
|
586
744
|
):
|
587
745
|
|
588
746
|
infosave_model = """
|
589
|
-
Function to save a
|
747
|
+
Function to save a potentiation learning model.
|
590
748
|
|
591
749
|
Arguments:
|
592
750
|
model_name (str): Name of the model.
|
@@ -598,6 +756,7 @@ def save_model(model_name,
|
|
598
756
|
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
599
757
|
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
600
758
|
W: Weights of the model.
|
759
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
601
760
|
|
602
761
|
Returns:
|
603
762
|
str: Message indicating if the model was saved successfully or encountered an error.
|
@@ -646,7 +805,8 @@ def save_model(model_name,
|
|
646
805
|
'WEIGHTS TYPE': weights_type,
|
647
806
|
'WEIGHTS FORMAT': weights_format,
|
648
807
|
'MODEL PATH': model_path,
|
649
|
-
'STANDARD SCALER': scaler_params
|
808
|
+
'STANDARD SCALER': scaler_params,
|
809
|
+
'ACTIVATION POTENTIATION': activation_potentiation
|
650
810
|
}
|
651
811
|
try:
|
652
812
|
|
@@ -735,14 +895,14 @@ def load_model(model_name,
|
|
735
895
|
model_path,
|
736
896
|
):
|
737
897
|
infoload_model = """
|
738
|
-
Function to load a
|
898
|
+
Function to load a potentiation learning model.
|
739
899
|
|
740
900
|
Arguments:
|
741
901
|
model_name (str): Name of the model.
|
742
902
|
model_path (str): Path where the model is saved.
|
743
903
|
|
744
904
|
Returns:
|
745
|
-
lists: W(list[num]),
|
905
|
+
lists: W(list[num]), activation_potentiation, DataFrame of the model
|
746
906
|
"""
|
747
907
|
pass
|
748
908
|
|
@@ -782,22 +942,39 @@ def load_model(model_name,
|
|
782
942
|
def predict_model_ssd(Input, model_name, model_path):
|
783
943
|
|
784
944
|
infopredict_model_ssd = """
|
785
|
-
Function to make a prediction using a divided
|
945
|
+
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
786
946
|
|
787
947
|
Arguments:
|
788
948
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
789
949
|
model_name (str): Name of the model.
|
790
|
-
model_path (str): Path where the model is saved.
|
791
950
|
Returns:
|
792
951
|
ndarray: Output from the model.
|
793
952
|
"""
|
794
953
|
W, df = load_model(model_name, model_path)
|
954
|
+
|
955
|
+
activation_potentiation = str(df['ACTIVATION POTENTIATION'].iloc[0])
|
795
956
|
|
796
|
-
|
957
|
+
if activation_potentiation != 'nan':
|
797
958
|
|
798
|
-
|
959
|
+
activation_potentiation = float(activation_potentiation)
|
960
|
+
|
961
|
+
else:
|
962
|
+
|
963
|
+
activation_potentiation = None
|
964
|
+
|
965
|
+
try:
|
966
|
+
|
967
|
+
scaler_params = df['STANDARD SCALER'].tolist()
|
968
|
+
|
969
|
+
|
970
|
+
scaler_params = [np.fromstring(arr.strip('[]'), sep=' ') for arr in scaler_params]
|
799
971
|
|
800
972
|
Input = standard_scaler(None, Input, scaler_params)
|
973
|
+
|
974
|
+
except:
|
975
|
+
|
976
|
+
non_scaled = True
|
977
|
+
|
801
978
|
|
802
979
|
layers = ['fex', 'cat']
|
803
980
|
|
@@ -813,7 +990,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
813
990
|
neural_layer = normalization(neural_layer)
|
814
991
|
|
815
992
|
if Layer == 'fex':
|
816
|
-
neural_layer = fex(neural_layer, W[index], False, None)
|
993
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
817
994
|
elif Layer == 'cat':
|
818
995
|
neural_layer = np.dot(W[index], neural_layer)
|
819
996
|
except:
|
@@ -825,21 +1002,22 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
825
1002
|
return neural_layer
|
826
1003
|
|
827
1004
|
|
828
|
-
def predict_model_ram(Input, scaler_params, W):
|
1005
|
+
def predict_model_ram(Input, scaler_params, W, activation_potentiation=None):
|
829
1006
|
|
830
1007
|
infopredict_model_ram = """
|
831
|
-
Function to make a prediction using a divided
|
1008
|
+
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
832
1009
|
from weights and parameters stored in memory.
|
833
1010
|
|
834
1011
|
Arguments:
|
835
1012
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
836
1013
|
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
837
1014
|
W (list of ndarrays): Weights of the model.
|
1015
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
838
1016
|
|
839
1017
|
Returns:
|
840
1018
|
ndarray: Output from the model.
|
841
1019
|
"""
|
842
|
-
|
1020
|
+
|
843
1021
|
if scaler_params != None:
|
844
1022
|
|
845
1023
|
Input = standard_scaler(None, Input, scaler_params)
|
@@ -858,7 +1036,7 @@ def predict_model_ram(Input, scaler_params, W):
|
|
858
1036
|
neural_layer = normalization(neural_layer)
|
859
1037
|
|
860
1038
|
if Layer == 'fex':
|
861
|
-
neural_layer = fex(neural_layer, W[index], False, None)
|
1039
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
862
1040
|
elif Layer == 'cat':
|
863
1041
|
neural_layer = np.dot(W[index], neural_layer)
|
864
1042
|
|
@@ -987,35 +1165,44 @@ def standard_scaler(x_train, x_test, scaler_params=None):
|
|
987
1165
|
"""
|
988
1166
|
try:
|
989
1167
|
|
990
|
-
|
991
|
-
|
992
|
-
mean = np.mean(x_train, axis=0)
|
993
|
-
std = np.std(x_train, axis=0)
|
994
|
-
train_data_scaled = (x_train - mean) / std
|
995
|
-
test_data_scaled = (x_test - mean) / std
|
996
|
-
|
997
|
-
scaler_params = [mean, std]
|
998
|
-
|
999
|
-
return scaler_params, train_data_scaled, test_data_scaled
|
1168
|
+
if scaler_params == None and x_test != None:
|
1000
1169
|
|
1001
|
-
|
1002
|
-
|
1003
|
-
|
1004
|
-
|
1005
|
-
|
1006
|
-
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1170
|
+
mean = np.mean(x_train, axis=0)
|
1171
|
+
std = np.std(x_train, axis=0)
|
1172
|
+
train_data_scaled = (x_train - mean) / std
|
1173
|
+
test_data_scaled = (x_test - mean) / std
|
1174
|
+
|
1175
|
+
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1176
|
+
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1177
|
+
|
1178
|
+
scaler_params = [mean, std]
|
1179
|
+
|
1180
|
+
return scaler_params, train_data_scaled, test_data_scaled
|
1181
|
+
|
1182
|
+
if scaler_params == None and x_test == None:
|
1183
|
+
|
1184
|
+
mean = np.mean(x_train, axis=0)
|
1185
|
+
std = np.std(x_train, axis=0)
|
1186
|
+
train_data_scaled = (x_train - mean) / std
|
1187
|
+
|
1188
|
+
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1189
|
+
|
1190
|
+
scaler_params = [mean, std]
|
1191
|
+
|
1192
|
+
return scaler_params, train_data_scaled
|
1193
|
+
|
1194
|
+
if scaler_params != None:
|
1195
|
+
|
1196
|
+
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
1197
|
+
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1198
|
+
|
1199
|
+
return test_data_scaled
|
1014
1200
|
|
1015
1201
|
except:
|
1016
1202
|
print(
|
1017
1203
|
Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
|
1018
1204
|
|
1205
|
+
|
1019
1206
|
def encode_one_hot(y_train, y_test):
|
1020
1207
|
info_one_hot_encode = """
|
1021
1208
|
Performs one-hot encoding on y_train and y_test data..
|
@@ -0,0 +1,7 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: pyerualjetwork
|
3
|
+
Version: 2.6.3
|
4
|
+
Summary: New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document
|
5
|
+
Author: Hasan Can Beydili
|
6
|
+
Author-email: tchasancan@gmail.com
|
7
|
+
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -0,0 +1 @@
|
|
1
|
+
plan
|