pyerualjetwork 2.3.0__tar.gz → 2.5.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-2.3.0 → pyerualjetwork-2.5.9}/PKG-INFO +2 -2
- pyerualjetwork-2.5.9/plan/__init__.py +5 -0
- pyerualjetwork-2.3.0/plan_di/plan_di.py → pyerualjetwork-2.5.9/plan/plan.py +328 -59
- {pyerualjetwork-2.3.0 → pyerualjetwork-2.5.9}/pyerualjetwork.egg-info/PKG-INFO +2 -2
- {pyerualjetwork-2.3.0 → pyerualjetwork-2.5.9}/pyerualjetwork.egg-info/SOURCES.txt +2 -4
- pyerualjetwork-2.5.9/pyerualjetwork.egg-info/top_level.txt +1 -0
- {pyerualjetwork-2.3.0 → pyerualjetwork-2.5.9}/setup.py +2 -2
- pyerualjetwork-2.3.0/plan_bi/__init__.py +0 -5
- pyerualjetwork-2.3.0/plan_bi/plan_bi.py +0 -1458
- pyerualjetwork-2.3.0/plan_di/__init__.py +0 -5
- pyerualjetwork-2.3.0/pyerualjetwork.egg-info/top_level.txt +0 -2
- {pyerualjetwork-2.3.0 → pyerualjetwork-2.5.9}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-2.3.0 → pyerualjetwork-2.5.9}/setup.cfg +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.
|
4
|
-
Summary:
|
3
|
+
Version: 2.5.9
|
4
|
+
Summary: New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
7
7
|
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -0,0 +1,5 @@
|
|
1
|
+
# pyerualjetwork/PLAN/__init__.py
|
2
|
+
|
3
|
+
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
|
+
|
5
|
+
from .plan import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer, weight_normalization
|
@@ -1,4 +1,24 @@
|
|
1
1
|
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
"""
|
4
|
+
Created on Tue Jun 18 23:32:16 2024
|
5
|
+
|
6
|
+
@author: hasan
|
7
|
+
"""
|
8
|
+
|
9
|
+
# optic
|
10
|
+
|
11
|
+
# -*- coding: utf-8 -*-
|
12
|
+
"""
|
13
|
+
Created on Fri Jun 21 05:21:35 2024
|
14
|
+
|
15
|
+
@author: hasan
|
16
|
+
"""
|
17
|
+
|
18
|
+
# -*- coding: utf-8 -*-
|
19
|
+
|
20
|
+
|
21
|
+
|
2
22
|
"""
|
3
23
|
Created on Thu Jun 12 00:00:00 2024
|
4
24
|
|
@@ -20,8 +40,14 @@ import seaborn as sns
|
|
20
40
|
|
21
41
|
def fit(
|
22
42
|
x_train: List[Union[int, float]],
|
23
|
-
# At least two.. and one hot encoded
|
24
|
-
|
43
|
+
y_train: List[Union[int, float]], # At least two.. and one hot encoded
|
44
|
+
show_training,
|
45
|
+
show_count= None,
|
46
|
+
val= None,
|
47
|
+
val_count= None,
|
48
|
+
x_val= None,
|
49
|
+
y_val= None,
|
50
|
+
activation_potential=None # (float): Input activation potential (optional)
|
25
51
|
) -> str:
|
26
52
|
|
27
53
|
infoPLAN = """
|
@@ -29,19 +55,50 @@ def fit(
|
|
29
55
|
|
30
56
|
Args:
|
31
57
|
x_train (list[num]): List of input data.
|
32
|
-
y_train (list[num]): List of
|
33
|
-
|
34
|
-
|
58
|
+
y_train (list[num]): List of target labels. (one hot encoded)
|
59
|
+
show_training (bool, str): True, None or'final'
|
60
|
+
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
61
|
+
val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
|
62
|
+
x_val (list[num]): List of validation data. (optional) Default: x_train
|
63
|
+
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
|
64
|
+
activation_potential (float): Input activation potential (for binary injection) (optional) in range: -1, 1
|
35
65
|
Returns:
|
36
66
|
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
37
67
|
error handled ?: Process status ('e')
|
38
68
|
"""
|
69
|
+
|
39
70
|
|
40
71
|
if len(x_train) != len(y_train):
|
72
|
+
|
41
73
|
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
|
42
74
|
return 'e'
|
75
|
+
|
76
|
+
if x_val == None and y_val == None:
|
77
|
+
|
78
|
+
x_val = x_train
|
79
|
+
y_val = y_train
|
80
|
+
|
81
|
+
if val == True and val_count == None:
|
43
82
|
|
83
|
+
val_count = 0.1
|
84
|
+
val_count_copy = val_count
|
85
|
+
|
86
|
+
if val == True:
|
87
|
+
|
88
|
+
val_count = int(len(x_train) * val_count)
|
89
|
+
|
90
|
+
val_count_copy = val_count
|
91
|
+
|
92
|
+
if show_training == True or show_training == 'final' and show_count == None:
|
93
|
+
|
94
|
+
show_count = 10
|
95
|
+
|
96
|
+
if show_training == True or show_training == 'final':
|
97
|
+
|
98
|
+
row, col = shape_control(x_train)
|
99
|
+
|
44
100
|
class_count = set()
|
101
|
+
|
45
102
|
for sublist in y_train:
|
46
103
|
|
47
104
|
class_count.add(tuple(sublist))
|
@@ -52,6 +109,7 @@ def fit(
|
|
52
109
|
|
53
110
|
neurons = [len(class_count), len(class_count)]
|
54
111
|
layers = ['fex']
|
112
|
+
val_list = [None]
|
55
113
|
|
56
114
|
x_train[0] = np.array(x_train[0])
|
57
115
|
x_train[0] = x_train[0].ravel()
|
@@ -64,7 +122,11 @@ def fit(
|
|
64
122
|
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
|
65
123
|
start_time = time.time()
|
66
124
|
y = decode_one_hot(y_train)
|
125
|
+
|
67
126
|
for index, inp in enumerate(x_train):
|
127
|
+
|
128
|
+
progress = index / len(x_train) * 100
|
129
|
+
|
68
130
|
uni_start_time = time.time()
|
69
131
|
inp = np.array(inp)
|
70
132
|
inp = inp.ravel()
|
@@ -81,11 +143,93 @@ def fit(
|
|
81
143
|
neural_layer = normalization(neural_layer)
|
82
144
|
|
83
145
|
if Layer == 'fex':
|
84
|
-
W[Lindex] = fex(neural_layer, W[Lindex], True, y[index])
|
146
|
+
W[Lindex] = fex(neural_layer, W[Lindex], True, y[index], activation_potential)
|
85
147
|
|
86
148
|
for i, w in enumerate(W):
|
87
149
|
trained_W[i] = trained_W[i] + w
|
88
150
|
|
151
|
+
|
152
|
+
if val == True:
|
153
|
+
|
154
|
+
if index == val_count:
|
155
|
+
|
156
|
+
val_count += val_count_copy
|
157
|
+
|
158
|
+
layers.append('cat')
|
159
|
+
trained_W.append(np.eye(len(class_count)))
|
160
|
+
|
161
|
+
validation_model = evaluate(x_val, y_val, None, trained_W)
|
162
|
+
|
163
|
+
layers.pop()
|
164
|
+
trained_W.pop()
|
165
|
+
|
166
|
+
val_acc = validation_model[get_acc()]
|
167
|
+
|
168
|
+
val_list.append(val_acc)
|
169
|
+
|
170
|
+
|
171
|
+
plt.plot(val_list, linestyle='-',
|
172
|
+
color='r')
|
173
|
+
|
174
|
+
progress_status = f"{progress:.1f}"
|
175
|
+
plt.title('Validation accuracy graph. Amount of data learned by the model: % ' + progress_status)
|
176
|
+
plt.xlabel('Learning Progress')
|
177
|
+
plt.ylabel('Accuracy')
|
178
|
+
plt.ylim(0, 1)
|
179
|
+
plt.draw()
|
180
|
+
plt.pause(0.1)
|
181
|
+
|
182
|
+
if show_training == True:
|
183
|
+
|
184
|
+
if index %show_count == 0:
|
185
|
+
|
186
|
+
|
187
|
+
if index != 0:
|
188
|
+
plt.close(fig)
|
189
|
+
|
190
|
+
if row != 0:
|
191
|
+
|
192
|
+
fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
|
193
|
+
|
194
|
+
else:
|
195
|
+
|
196
|
+
fig, ax = plt.subplots(1, 1, figsize=(18, 14))
|
197
|
+
|
198
|
+
for j in range(len(class_count)):
|
199
|
+
|
200
|
+
|
201
|
+
if row != 0:
|
202
|
+
|
203
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
204
|
+
suptitle_info = 'Neurons Learning Progress: % '
|
205
|
+
title_info = f'{j+1}. Neuron'
|
206
|
+
|
207
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
208
|
+
|
209
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
210
|
+
|
211
|
+
ax[j].set_aspect('equal')
|
212
|
+
|
213
|
+
ax[j].set_xticks([])
|
214
|
+
ax[j].set_yticks([])
|
215
|
+
ax[j].set_title(title_info)
|
216
|
+
|
217
|
+
else:
|
218
|
+
|
219
|
+
mat = trained_W[0]
|
220
|
+
ax.imshow(mat, interpolation='sinc', cmap='viridis')
|
221
|
+
suptitle_info = 'Weight Learning Progress: % '
|
222
|
+
title_info = 'Weight Matrix Of Fex Layer'
|
223
|
+
|
224
|
+
|
225
|
+
|
226
|
+
|
227
|
+
progress_status = f"{progress:.1f}"
|
228
|
+
fig.suptitle(suptitle_info + progress_status)
|
229
|
+
plt.draw()
|
230
|
+
plt.pause(0.1)
|
231
|
+
|
232
|
+
|
89
233
|
W = weight_identification(
|
90
234
|
len(layers) - 1, len(class_count), neurons, x_train_size)
|
91
235
|
|
@@ -105,6 +249,27 @@ def fit(
|
|
105
249
|
|
106
250
|
print('\rTraining: ', index, "/", len(x_train), "\n", end="")
|
107
251
|
|
252
|
+
if show_training == 'final':
|
253
|
+
|
254
|
+
fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
|
255
|
+
|
256
|
+
for j in range(len(class_count)):
|
257
|
+
|
258
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
259
|
+
|
260
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
261
|
+
ax[j].set_aspect('equal')
|
262
|
+
|
263
|
+
ax[j].set_xticks([])
|
264
|
+
ax[j].set_yticks([])
|
265
|
+
ax[j].set_title(f'{j+1}. Neuron')
|
266
|
+
|
267
|
+
progress_status = f"{progress:.1f}"
|
268
|
+
fig.suptitle('Neurons Learning Progress: % ' + progress_status)
|
269
|
+
plt.draw()
|
270
|
+
plt.pause(0.1)
|
271
|
+
|
272
|
+
|
108
273
|
EndTime = time.time()
|
109
274
|
|
110
275
|
calculating_est = round(EndTime - start_time, 2)
|
@@ -122,20 +287,65 @@ def fit(
|
|
122
287
|
|
123
288
|
layers.append('cat')
|
124
289
|
trained_W.append(np.eye(len(class_count)))
|
125
|
-
|
126
|
-
for i in range(len(class_count)):
|
127
|
-
|
128
|
-
for j in range(len(layers)):
|
129
|
-
|
130
|
-
if layers[j] == 'fex':
|
131
|
-
|
132
|
-
trained_W[j][i,:] = normalization(trained_W[j][i,:])
|
133
290
|
|
134
|
-
|
291
|
+
|
135
292
|
return trained_W
|
136
293
|
|
137
294
|
# FUNCTIONS -----
|
138
295
|
|
296
|
+
def shape_control(x_train):
|
297
|
+
|
298
|
+
try:
|
299
|
+
row = x_train[1].shape[0]
|
300
|
+
col = x_train[1].shape[1]
|
301
|
+
|
302
|
+
except:
|
303
|
+
|
304
|
+
print(Fore.MAGENTA + 'WARNING: You trying show_training but inputs is raveled. x_train inputs should be reshaped for show_training.' + Style.RESET_ALL)
|
305
|
+
|
306
|
+
try:
|
307
|
+
row, col = find_numbers(len(x_train[0]))
|
308
|
+
|
309
|
+
except:
|
310
|
+
|
311
|
+
print(Fore.MAGENTA + 'WARNING: Input length cannot be reshaped. Neurons learning progression cannot be draw, weight learning progress drwaing started.' + Style.RESET_ALL)
|
312
|
+
return [0, 0]
|
313
|
+
|
314
|
+
return row, col
|
315
|
+
|
316
|
+
def find_numbers(n):
|
317
|
+
if n <= 1:
|
318
|
+
raise ValueError("Parameter 'n' must be greater than 1.")
|
319
|
+
|
320
|
+
for i in range(2, int(n**0.5) + 1):
|
321
|
+
if n % i == 0:
|
322
|
+
factor1 = i
|
323
|
+
factor2 = n // i
|
324
|
+
if factor1 == factor2:
|
325
|
+
return factor1, factor2
|
326
|
+
|
327
|
+
return None
|
328
|
+
|
329
|
+
def weight_normalization(
|
330
|
+
W,
|
331
|
+
class_count
|
332
|
+
) -> str:
|
333
|
+
"""
|
334
|
+
Row(Neuron) based normalization. For unbalanced models.
|
335
|
+
|
336
|
+
Args:
|
337
|
+
W (list(num)): Trained weight matrix list.
|
338
|
+
class_count (int): Class count of model.
|
339
|
+
|
340
|
+
Returns:
|
341
|
+
list([numpy_arrays],[...]): posttrained weight matices of the model. .
|
342
|
+
"""
|
343
|
+
|
344
|
+
for i in range(class_count):
|
345
|
+
|
346
|
+
W[0][i,:] = normalization(W[0][i,:])
|
347
|
+
|
348
|
+
return W
|
139
349
|
|
140
350
|
def weight_identification(
|
141
351
|
layer_count, # int: Number of layers in the neural network.
|
@@ -170,7 +380,8 @@ def fex(
|
|
170
380
|
Input, # list[num]: Input data.
|
171
381
|
w, # num: Weight matrix of the neural network.
|
172
382
|
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
173
|
-
Class # int: Which class is, if training.
|
383
|
+
Class, # int: Which class is, if training.
|
384
|
+
activation_potential # float or None: Input activation potential (optional)
|
174
385
|
) -> tuple:
|
175
386
|
"""
|
176
387
|
Applies feature extraction process to the input data using synaptic pruning.
|
@@ -180,22 +391,42 @@ def fex(
|
|
180
391
|
w (num): Weight matrix of the neural network.
|
181
392
|
is_training (bool): Flag indicating if the function is called during training (True or False).
|
182
393
|
Class (int): if is during training then which class(label) ? is isnt then put None.
|
394
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
183
395
|
|
184
396
|
Returns:
|
185
397
|
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
186
398
|
"""
|
187
399
|
|
188
|
-
if is_training == True:
|
189
|
-
|
400
|
+
if is_training == True and activation_potential == None:
|
401
|
+
|
190
402
|
w[Class, :] = Input
|
191
403
|
|
192
404
|
return w
|
193
405
|
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
406
|
+
elif is_training == True and activation_potential != None:
|
407
|
+
|
408
|
+
|
409
|
+
Input[Input < activation_potential] = 0
|
410
|
+
Input[Input > activation_potential] = 1
|
411
|
+
|
412
|
+
w[Class,:] = Input
|
413
|
+
|
414
|
+
return w
|
415
|
+
|
416
|
+
elif is_training == False and activation_potential == None:
|
417
|
+
|
418
|
+
neural_layer = np.dot(w, Input)
|
419
|
+
|
420
|
+
return neural_layer
|
421
|
+
|
422
|
+
elif is_training == False and activation_potential != None:
|
423
|
+
|
424
|
+
Input[Input < activation_potential] = 0
|
425
|
+
Input[Input > activation_potential] = 1
|
426
|
+
|
427
|
+
neural_layer = np.dot(w, Input)
|
428
|
+
|
429
|
+
return neural_layer
|
199
430
|
|
200
431
|
|
201
432
|
|
@@ -272,7 +503,8 @@ def evaluate(
|
|
272
503
|
x_test, # list[num]: Test input data.
|
273
504
|
y_test, # list[num]: Test labels.
|
274
505
|
show_metrices, # show_metrices (bool): (True or False)
|
275
|
-
W
|
506
|
+
W, # list[num]: Weight matrix list of the neural network.
|
507
|
+
activation_potential=None # activation_potential (float or None): Threshold value for comparison. (optional)
|
276
508
|
) -> tuple:
|
277
509
|
infoTestModel = """
|
278
510
|
Tests the neural network model with the given test data.
|
@@ -282,6 +514,7 @@ def evaluate(
|
|
282
514
|
y_test (list[num]): Test labels.
|
283
515
|
show_metrices (bool): (True or False)
|
284
516
|
W (list[num]): Weight matrix list of the neural network.
|
517
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
285
518
|
|
286
519
|
Returns:
|
287
520
|
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
@@ -312,7 +545,7 @@ def evaluate(
|
|
312
545
|
neural_layer = normalization(neural_layer)
|
313
546
|
|
314
547
|
if Layer == 'fex':
|
315
|
-
neural_layer = fex(neural_layer, W[index], False, None)
|
548
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
316
549
|
elif Layer == 'cat':
|
317
550
|
neural_layer = np.dot(W[index], neural_layer)
|
318
551
|
|
@@ -387,7 +620,8 @@ def multiple_evaluate(
|
|
387
620
|
x_test, # list[num]: Test input data.
|
388
621
|
y_test, # list[num]: Test labels.
|
389
622
|
show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
|
390
|
-
MW # list[list[num]]: Weight matrix of the neural network.
|
623
|
+
MW, # list[list[num]]: Weight matrix of the neural network.
|
624
|
+
activation_potential=None # (float or None): Threshold value for comparison. (optional)
|
391
625
|
) -> tuple:
|
392
626
|
infoTestModel = """
|
393
627
|
Tests the neural network model with the given test data.
|
@@ -434,7 +668,7 @@ def multiple_evaluate(
|
|
434
668
|
neural_layer = normalization(neural_layer)
|
435
669
|
|
436
670
|
if Layer == 'fex':
|
437
|
-
neural_layer = fex(neural_layer, W[index], False, None)
|
671
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
438
672
|
elif Layer == 'cat':
|
439
673
|
neural_layer = np.dot(W[index], neural_layer)
|
440
674
|
|
@@ -518,7 +752,8 @@ def save_model(model_name,
|
|
518
752
|
weights_format,
|
519
753
|
model_path,
|
520
754
|
scaler_params,
|
521
|
-
W
|
755
|
+
W,
|
756
|
+
activation_potential=None
|
522
757
|
):
|
523
758
|
|
524
759
|
infosave_model = """
|
@@ -534,6 +769,7 @@ def save_model(model_name,
|
|
534
769
|
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
535
770
|
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
536
771
|
W: Weights of the model.
|
772
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
537
773
|
|
538
774
|
Returns:
|
539
775
|
str: Message indicating if the model was saved successfully or encountered an error.
|
@@ -582,7 +818,8 @@ def save_model(model_name,
|
|
582
818
|
'WEIGHTS TYPE': weights_type,
|
583
819
|
'WEIGHTS FORMAT': weights_format,
|
584
820
|
'MODEL PATH': model_path,
|
585
|
-
'STANDARD SCALER': scaler_params
|
821
|
+
'STANDARD SCALER': scaler_params,
|
822
|
+
'ACTIVATION POTENTIAL': activation_potential
|
586
823
|
}
|
587
824
|
try:
|
588
825
|
|
@@ -723,17 +960,34 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
723
960
|
Arguments:
|
724
961
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
725
962
|
model_name (str): Name of the model.
|
726
|
-
model_path (str): Path where the model is saved.
|
727
963
|
Returns:
|
728
964
|
ndarray: Output from the model.
|
729
965
|
"""
|
730
966
|
W, df = load_model(model_name, model_path)
|
967
|
+
|
968
|
+
activation_potential = str(df['ACTIVATION POTENTIAL'].iloc[0])
|
731
969
|
|
732
|
-
|
970
|
+
if activation_potential != 'nan':
|
733
971
|
|
734
|
-
|
972
|
+
activation_potential = float(activation_potential)
|
973
|
+
|
974
|
+
else:
|
975
|
+
|
976
|
+
activation_potential = None
|
977
|
+
|
978
|
+
try:
|
979
|
+
|
980
|
+
scaler_params = df['STANDARD SCALER'].tolist()
|
981
|
+
|
982
|
+
|
983
|
+
scaler_params = [np.fromstring(arr.strip('[]'), sep=' ') for arr in scaler_params]
|
735
984
|
|
736
|
-
Input = standard_scaler(
|
985
|
+
Input = standard_scaler(None, Input, scaler_params)
|
986
|
+
|
987
|
+
except:
|
988
|
+
|
989
|
+
non_scaled = True
|
990
|
+
|
737
991
|
|
738
992
|
layers = ['fex', 'cat']
|
739
993
|
|
@@ -749,7 +1003,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
749
1003
|
neural_layer = normalization(neural_layer)
|
750
1004
|
|
751
1005
|
if Layer == 'fex':
|
752
|
-
neural_layer = fex(neural_layer, W[index], False, None)
|
1006
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
753
1007
|
elif Layer == 'cat':
|
754
1008
|
neural_layer = np.dot(W[index], neural_layer)
|
755
1009
|
except:
|
@@ -761,7 +1015,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
761
1015
|
return neural_layer
|
762
1016
|
|
763
1017
|
|
764
|
-
def predict_model_ram(Input, scaler_params, W):
|
1018
|
+
def predict_model_ram(Input, scaler_params, W, activation_potential=None):
|
765
1019
|
|
766
1020
|
infopredict_model_ram = """
|
767
1021
|
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
@@ -771,14 +1025,15 @@ def predict_model_ram(Input, scaler_params, W):
|
|
771
1025
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
772
1026
|
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
773
1027
|
W (list of ndarrays): Weights of the model.
|
1028
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
774
1029
|
|
775
1030
|
Returns:
|
776
1031
|
ndarray: Output from the model.
|
777
1032
|
"""
|
778
|
-
|
1033
|
+
|
779
1034
|
if scaler_params != None:
|
780
1035
|
|
781
|
-
Input = standard_scaler(
|
1036
|
+
Input = standard_scaler(None, Input, scaler_params)
|
782
1037
|
|
783
1038
|
layers = ['fex', 'cat']
|
784
1039
|
|
@@ -794,7 +1049,7 @@ def predict_model_ram(Input, scaler_params, W):
|
|
794
1049
|
neural_layer = normalization(neural_layer)
|
795
1050
|
|
796
1051
|
if Layer == 'fex':
|
797
|
-
neural_layer = fex(neural_layer, W[index], False, None)
|
1052
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
798
1053
|
elif Layer == 'cat':
|
799
1054
|
neural_layer = np.dot(W[index], neural_layer)
|
800
1055
|
|
@@ -921,26 +1176,40 @@ def standard_scaler(x_train, x_test, scaler_params=None):
|
|
921
1176
|
tuple
|
922
1177
|
Standardized training and test datasets
|
923
1178
|
"""
|
924
|
-
|
1179
|
+
try:
|
925
1180
|
|
926
|
-
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
|
932
|
-
|
933
|
-
|
934
|
-
|
935
|
-
|
936
|
-
|
937
|
-
|
938
|
-
|
939
|
-
|
940
|
-
|
941
|
-
|
942
|
-
|
943
|
-
|
1181
|
+
if scaler_params == None and x_test != None:
|
1182
|
+
|
1183
|
+
mean = np.mean(x_train, axis=0)
|
1184
|
+
std = np.std(x_train, axis=0)
|
1185
|
+
train_data_scaled = (x_train - mean) / std
|
1186
|
+
test_data_scaled = (x_test - mean) / std
|
1187
|
+
|
1188
|
+
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1189
|
+
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1190
|
+
|
1191
|
+
scaler_params = [mean, std]
|
1192
|
+
|
1193
|
+
return scaler_params, train_data_scaled, test_data_scaled
|
1194
|
+
|
1195
|
+
if scaler_params == None and x_test == None:
|
1196
|
+
|
1197
|
+
mean = np.mean(x_train, axis=0)
|
1198
|
+
std = np.std(x_train, axis=0)
|
1199
|
+
train_data_scaled = (x_train - mean) / std
|
1200
|
+
|
1201
|
+
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1202
|
+
|
1203
|
+
scaler_params = [mean, std]
|
1204
|
+
|
1205
|
+
return scaler_params, train_data_scaled
|
1206
|
+
|
1207
|
+
if scaler_params != None:
|
1208
|
+
|
1209
|
+
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
1210
|
+
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1211
|
+
|
1212
|
+
return test_data_scaled
|
944
1213
|
|
945
1214
|
except:
|
946
1215
|
print(
|
@@ -1293,7 +1562,7 @@ def plot_evaluate(y_test, y_preds, acc_list):
|
|
1293
1562
|
axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
|
1294
1563
|
axs[0, 1].set_xlabel('Metrics')
|
1295
1564
|
axs[0, 1].set_ylabel('Score')
|
1296
|
-
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (
|
1565
|
+
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Weighted)')
|
1297
1566
|
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
1298
1567
|
|
1299
1568
|
# Accuracy
|
@@ -1426,4 +1695,4 @@ def get_preds():
|
|
1426
1695
|
|
1427
1696
|
def get_acc():
|
1428
1697
|
|
1429
|
-
return 2
|
1698
|
+
return 2
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.
|
4
|
-
Summary:
|
3
|
+
Version: 2.5.9
|
4
|
+
Summary: New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
7
7
|
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -0,0 +1 @@
|
|
1
|
+
plan
|
@@ -5,10 +5,10 @@ from setuptools import setup, find_packages
|
|
5
5
|
setup(
|
6
6
|
|
7
7
|
name = "pyerualjetwork",
|
8
|
-
version = "2.
|
8
|
+
version = "2.5.9",
|
9
9
|
author = "Hasan Can Beydili",
|
10
10
|
author_email = "tchasancan@gmail.com",
|
11
|
-
description= "
|
11
|
+
description= "New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document",
|
12
12
|
packages = find_packages(),
|
13
13
|
keywords = ["model evaluation", "classifcation", 'pruning learning artficial neural networks'],
|
14
14
|
|
@@ -1,5 +0,0 @@
|
|
1
|
-
# pyerualjetwork/PLAN/__init__.py
|
2
|
-
|
3
|
-
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
|
-
|
5
|
-
from .plan_bi import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, get_pot, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer
|