pyerualjetwork 1.1.7__py3-none-any.whl → 2.5.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan/__init__.py +1 -1
- plan/plan.py +1434 -790
- pyerualjetwork-2.5.8.dist-info/METADATA +8 -0
- pyerualjetwork-2.5.8.dist-info/RECORD +6 -0
- {pyerualjetwork-1.1.7.dist-info → pyerualjetwork-2.5.8.dist-info}/WHEEL +1 -1
- pyerualjetwork-1.1.7.dist-info/METADATA +0 -8
- pyerualjetwork-1.1.7.dist-info/RECORD +0 -6
- {pyerualjetwork-1.1.7.dist-info → pyerualjetwork-2.5.8.dist-info}/top_level.txt +0 -0
plan/plan.py
CHANGED
@@ -1,1054 +1,1698 @@
|
|
1
|
+
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
"""
|
4
|
+
Created on Tue Jun 18 23:32:16 2024
|
5
|
+
|
6
|
+
@author: hasan
|
7
|
+
"""
|
8
|
+
|
9
|
+
# optic
|
10
|
+
|
11
|
+
# -*- coding: utf-8 -*-
|
12
|
+
"""
|
13
|
+
Created on Fri Jun 21 05:21:35 2024
|
14
|
+
|
15
|
+
@author: hasan
|
16
|
+
"""
|
17
|
+
|
18
|
+
# -*- coding: utf-8 -*-
|
19
|
+
|
20
|
+
|
21
|
+
|
22
|
+
"""
|
23
|
+
Created on Thu Jun 12 00:00:00 2024
|
24
|
+
|
25
|
+
@author: hasan can beydili
|
26
|
+
"""
|
27
|
+
|
28
|
+
import pandas as pd
|
1
29
|
import numpy as np
|
2
30
|
import time
|
3
|
-
from colorama import Fore,Style
|
31
|
+
from colorama import Fore, Style
|
4
32
|
from typing import List, Union
|
5
33
|
import math
|
34
|
+
from scipy.special import expit, softmax
|
35
|
+
import matplotlib.pyplot as plt
|
36
|
+
import seaborn as sns
|
37
|
+
|
6
38
|
# BUILD -----
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
39
|
+
|
40
|
+
|
41
|
+
def fit(
|
42
|
+
x_train: List[Union[int, float]],
|
43
|
+
y_train: List[Union[int, float]], # At least two.. and one hot encoded
|
44
|
+
show_training,
|
45
|
+
show_count= None,
|
46
|
+
val_count= None,
|
47
|
+
val= None,
|
48
|
+
x_val= None,
|
49
|
+
y_val= None,
|
50
|
+
activation_potential=None # (float): Input activation potential (optional)
|
17
51
|
) -> str:
|
18
|
-
|
52
|
+
|
19
53
|
infoPLAN = """
|
20
54
|
Creates and configures a PLAN model.
|
21
55
|
|
22
56
|
Args:
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
Activations (list[str]): List of activation functions.
|
32
|
-
|
57
|
+
x_train (list[num]): List of input data.
|
58
|
+
y_train (list[num]): List of target labels. (one hot encoded)
|
59
|
+
show_training (bool, str): True, None or'final'
|
60
|
+
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
61
|
+
val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
|
62
|
+
x_val (list[num]): List of validation data. (optional) Default: x_train
|
63
|
+
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
|
64
|
+
activation_potential (float): Input activation potential (for binary injection) (optional) in range: -1, 1
|
33
65
|
Returns:
|
34
|
-
list([num]): (Weight matrices list,
|
66
|
+
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
35
67
|
error handled ?: Process status ('e')
|
36
68
|
"""
|
37
|
-
|
38
|
-
LastNeuron = Neurons[-1:][0]
|
39
|
-
if LastNeuron != ClassCount:
|
40
|
-
print(Fore.RED + "ERROR108: Last layer of neuron count must be equal class count. from: TrainPLAN",infoPLAN)
|
41
|
-
return 'e'
|
42
|
-
|
43
|
-
if len(Normalizations) != len(ThresholdValues):
|
44
|
-
|
45
|
-
print(Fore.RED + "ERROR307: Normalization list length must be equal to length of ThresholdSigns List,ThresholdValues List,Layers List,Neurons List. from: TrainPLAN",infoPLAN)
|
46
|
-
return 'e'
|
47
|
-
|
48
|
-
if len(Inputs) != len(Labels):
|
49
|
-
print(Fore.RED + "ERROR301: Inputs list and Labels list must be same length.",infoPLAN)
|
50
|
-
return 'e'
|
51
|
-
|
52
|
-
for i, Value in enumerate(ThresholdValues):
|
53
|
-
|
54
|
-
if Normalizations[i] != 'y' and Normalizations[i] != 'n':
|
55
|
-
print(Fore.RED + "ERROR105: Normalization list must be 'y' or 'n'.",infoPLAN)
|
56
|
-
return 'e'
|
57
|
-
|
58
|
-
if ThresholdSigns[i] == 'none':
|
59
|
-
print(Fore.MAGENTA + "WARNING102: We are advise to do not put 'none' Threshold sign. But some cases improves performance of the model from: TrainPLAN",infoPLAN + Style.RESET_ALL)
|
60
|
-
time.sleep(3)
|
61
|
-
|
62
|
-
if isinstance(Value, str):
|
63
|
-
print(Fore.RED + "ERROR201: Threshold values must be numeric. from: TrainPLAN")
|
64
|
-
return 'e'
|
65
|
-
|
66
|
-
if isinstance(Neurons[i], str):
|
67
|
-
print(Fore.RED + "ERROR202: Neurons list must be numeric.")
|
68
|
-
return 'e'
|
69
|
-
|
70
|
-
if len(ThresholdSigns) != len(ThresholdValues):
|
71
|
-
print(Fore.RED + "ERROR302: Threshold signs list and Threshold Values list must be same length. from: TrainPLAN",infoPLAN)
|
72
|
-
return 'e'
|
73
|
-
|
74
|
-
if len(Layers) != len(Neurons):
|
75
|
-
print(Fore.RED + "ERROR303: Layers list and Neurons list must same length. from: TrainPLAN",infoPLAN)
|
76
|
-
return 'e'
|
77
|
-
|
78
|
-
if len(ThresholdValues) != len(Layers) or len(ThresholdSigns) != len(Layers):
|
79
|
-
print(Fore.RED + "ERROR306: Threshold Values and Threshold Signs lists length must be same Layers list length. from: TrainPLAN",infoPLAN)
|
80
|
-
return 'e'
|
81
|
-
|
82
|
-
|
83
|
-
for Activation in Activations:
|
84
|
-
if Activation != 'softmax' and Activation != 'sigmoid' and Activation != 'relu' and Activation != 'none':
|
85
|
-
print(Fore.RED + "ERROR108: Activations list must be 'sigmoid' or 'softmax' or 'relu' or 'none' from: TrainPLAN",infoPLAN)
|
86
|
-
return 'e'
|
87
|
-
|
69
|
+
|
88
70
|
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
return 'e'
|
93
|
-
|
94
|
-
if index + 1 != len(Neurons) and Neuron % 2 != 0:
|
95
|
-
print(Fore.MAGENTA + "WARNING101: We strongly advise to do Neuron counts be should even numbers. from: TrainPLAN",infoPLAN)
|
96
|
-
time.sleep(3)
|
97
|
-
|
98
|
-
if Neuron < ClassCount:
|
99
|
-
print(Fore.RED + "ERROR102: Neuron count must be greater than class count(For PLAN). from: TrainPLAN")
|
100
|
-
return 'e'
|
101
|
-
|
102
|
-
if Layers[index] != 'fex' and Layers[index] != 'cat':
|
103
|
-
print(Fore.RED + "ERROR107: Layers list must be 'fex'(Feature Extraction Layer) or 'cat' (Catalyser Layer). from: TrainPLAN",infoPLAN)
|
104
|
-
return 'e'
|
105
|
-
|
106
|
-
if len(ThresholdSigns) != len(ThresholdValues):
|
107
|
-
print(Fore.RED + "ERROR305: Threshold signs list and Threshold values list must be same length. from: TrainPLAN",infoPLAN)
|
71
|
+
if len(x_train) != len(y_train):
|
72
|
+
|
73
|
+
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
|
108
74
|
return 'e'
|
109
75
|
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
76
|
+
if x_val == None and y_val == None:
|
77
|
+
|
78
|
+
x_val = x_train
|
79
|
+
y_val = y_train
|
80
|
+
|
81
|
+
if val == True and val_count == None:
|
82
|
+
|
83
|
+
val_count = 0.1
|
84
|
+
val_count_copy = val_count
|
85
|
+
|
86
|
+
if val == True:
|
115
87
|
|
116
|
-
|
117
|
-
|
118
|
-
|
88
|
+
val_count = int(len(x_train) * val_count)
|
89
|
+
|
90
|
+
val_count_copy = val_count
|
91
|
+
|
92
|
+
if show_training == True or show_training == 'final' and show_count == None:
|
119
93
|
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
UniqueLabels = list(UniqueLabels)
|
127
|
-
|
128
|
-
Labels = [tuple(sublist) for sublist in Labels]
|
129
|
-
|
130
|
-
|
131
|
-
if len(UniqueLabels) != ClassCount:
|
132
|
-
print(Fore.RED + "ERROR106: Label variety length must be same Class Count. from: TrainPLAN",infoPLAN)
|
133
|
-
return 'e'
|
134
|
-
|
135
|
-
Inputs[0] = np.array(Inputs[0])
|
136
|
-
Inputs[0] = Inputs[0].ravel()
|
137
|
-
InputSize = len(Inputs[0])
|
94
|
+
show_count = 10
|
95
|
+
|
96
|
+
if show_training == True or show_training == 'final':
|
97
|
+
|
98
|
+
row, col = shape_control(x_train)
|
138
99
|
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
for
|
148
|
-
|
100
|
+
class_count = set()
|
101
|
+
|
102
|
+
for sublist in y_train:
|
103
|
+
|
104
|
+
class_count.add(tuple(sublist))
|
105
|
+
|
106
|
+
class_count = list(class_count)
|
107
|
+
|
108
|
+
y_train = [tuple(sublist) for sublist in y_train]
|
109
|
+
|
110
|
+
neurons = [len(class_count), len(class_count)]
|
111
|
+
layers = ['fex']
|
112
|
+
val_list = [None]
|
113
|
+
|
114
|
+
x_train[0] = np.array(x_train[0])
|
115
|
+
x_train[0] = x_train[0].ravel()
|
116
|
+
x_train_size = len(x_train[0])
|
117
|
+
|
118
|
+
W = weight_identification(
|
119
|
+
len(layers) - 1, len(class_count), neurons, x_train_size)
|
120
|
+
|
121
|
+
trained_W = [1] * len(W)
|
122
|
+
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
|
123
|
+
start_time = time.time()
|
124
|
+
y = decode_one_hot(y_train)
|
125
|
+
|
126
|
+
for index, inp in enumerate(x_train):
|
127
|
+
|
128
|
+
progress = index / len(x_train) * 100
|
129
|
+
|
130
|
+
uni_start_time = time.time()
|
149
131
|
inp = np.array(inp)
|
150
132
|
inp = inp.ravel()
|
151
133
|
|
152
|
-
if
|
153
|
-
print(Fore.RED +"ERROR304: All input matrices or vectors in
|
134
|
+
if x_train_size != len(inp):
|
135
|
+
print(Fore.RED + "ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",
|
136
|
+
infoPLAN + Style.RESET_ALL)
|
154
137
|
return 'e'
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
NeuralLayer = inp
|
169
|
-
|
170
|
-
for Lindex, Layer in enumerate(Layers):
|
138
|
+
|
139
|
+
neural_layer = inp
|
140
|
+
|
141
|
+
for Lindex, Layer in enumerate(layers):
|
142
|
+
|
143
|
+
neural_layer = normalization(neural_layer)
|
144
|
+
|
145
|
+
if Layer == 'fex':
|
146
|
+
W[Lindex] = fex(neural_layer, W[Lindex], True, y[index], activation_potential)
|
147
|
+
|
148
|
+
for i, w in enumerate(W):
|
149
|
+
trained_W[i] = trained_W[i] + w
|
150
|
+
|
171
151
|
|
172
|
-
|
173
|
-
|
152
|
+
if val == True:
|
153
|
+
|
154
|
+
if index == val_count:
|
174
155
|
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
156
|
+
val_count += val_count_copy
|
157
|
+
|
158
|
+
layers.append('cat')
|
159
|
+
trained_W.append(np.eye(len(class_count)))
|
160
|
+
|
161
|
+
validation_model = evaluate(x_val, y_val, None, trained_W)
|
162
|
+
|
163
|
+
layers.pop()
|
164
|
+
trained_W.pop()
|
165
|
+
|
166
|
+
val_acc = validation_model[get_acc()]
|
167
|
+
|
168
|
+
val_list.append(val_acc)
|
169
|
+
|
181
170
|
|
182
|
-
|
183
|
-
|
184
|
-
elif Layer == 'cat':
|
185
|
-
NeuralLayer,W[Lindex] = Cat(NeuralLayer, W[Lindex], ThresholdSigns[Lindex], ThresholdValues[Lindex],1)
|
171
|
+
plt.plot(val_list, linestyle='-',
|
172
|
+
color='r')
|
186
173
|
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
174
|
+
progress_status = f"{progress:.1f}"
|
175
|
+
plt.title('Validation accuracy graph. Amount of data learned by the model: % ' + progress_status)
|
176
|
+
plt.xlabel('Learning Progress')
|
177
|
+
plt.ylabel('Accuracy')
|
178
|
+
plt.ylim(0, 1)
|
179
|
+
plt.draw()
|
180
|
+
plt.pause(0.1)
|
181
|
+
|
182
|
+
if show_training == True:
|
183
|
+
|
184
|
+
if index %show_count == 0:
|
185
|
+
|
186
|
+
|
187
|
+
if index != 0:
|
188
|
+
plt.close(fig)
|
189
|
+
|
190
|
+
if row != 0:
|
191
|
+
|
192
|
+
fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
|
193
|
+
|
194
|
+
else:
|
202
195
|
|
203
|
-
|
204
|
-
|
196
|
+
fig, ax = plt.subplots(1, 1, figsize=(18, 14))
|
197
|
+
|
198
|
+
for j in range(len(class_count)):
|
199
|
+
|
200
|
+
|
201
|
+
if row != 0:
|
202
|
+
|
203
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
204
|
+
suptitle_info = 'Neurons Learning Progress: % '
|
205
|
+
title_info = f'{j+1}. Neuron'
|
206
|
+
|
207
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
208
|
+
|
209
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
210
|
+
|
211
|
+
ax[j].set_aspect('equal')
|
212
|
+
|
213
|
+
ax[j].set_xticks([])
|
214
|
+
ax[j].set_yticks([])
|
215
|
+
ax[j].set_title(title_info)
|
216
|
+
|
217
|
+
else:
|
218
|
+
|
219
|
+
mat = trained_W[0]
|
220
|
+
ax.imshow(mat, interpolation='sinc', cmap='viridis')
|
221
|
+
suptitle_info = 'Weight Learning Progress: % '
|
222
|
+
title_info = 'Weight Matrix Of Fex Layer'
|
223
|
+
|
224
|
+
|
205
225
|
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
226
|
+
|
227
|
+
progress_status = f"{progress:.1f}"
|
228
|
+
fig.suptitle(suptitle_info + progress_status)
|
229
|
+
plt.draw()
|
230
|
+
plt.pause(0.1)
|
231
|
+
|
232
|
+
|
233
|
+
W = weight_identification(
|
234
|
+
len(layers) - 1, len(class_count), neurons, x_train_size)
|
235
|
+
|
236
|
+
uni_end_time = time.time()
|
237
|
+
|
238
|
+
calculating_est = round(
|
239
|
+
(uni_end_time - uni_start_time) * (len(x_train) - index), 3)
|
240
|
+
|
241
|
+
if calculating_est < 60:
|
242
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
243
|
+
|
244
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
245
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
246
|
+
|
247
|
+
elif calculating_est > 3600:
|
248
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
249
|
+
|
250
|
+
print('\rTraining: ', index, "/", len(x_train), "\n", end="")
|
251
|
+
|
252
|
+
if show_training == 'final':
|
253
|
+
|
254
|
+
fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
|
255
|
+
|
256
|
+
for j in range(len(class_count)):
|
257
|
+
|
258
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
259
|
+
|
260
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
261
|
+
ax[j].set_aspect('equal')
|
262
|
+
|
263
|
+
ax[j].set_xticks([])
|
264
|
+
ax[j].set_yticks([])
|
265
|
+
ax[j].set_title(f'{j+1}. Neuron')
|
266
|
+
|
267
|
+
progress_status = f"{progress:.1f}"
|
268
|
+
fig.suptitle('Neurons Learning Progress: % ' + progress_status)
|
269
|
+
plt.draw()
|
270
|
+
plt.pause(0.1)
|
271
|
+
|
272
|
+
|
222
273
|
EndTime = time.time()
|
274
|
+
|
275
|
+
calculating_est = round(EndTime - start_time, 2)
|
276
|
+
|
277
|
+
print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n" + Style.RESET_ALL)
|
278
|
+
|
279
|
+
if calculating_est < 60:
|
280
|
+
print('Total training time(sec): ', calculating_est)
|
281
|
+
|
282
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
283
|
+
print('Total training time(min): ', calculating_est/60)
|
284
|
+
|
285
|
+
elif calculating_est > 3600:
|
286
|
+
print('Total training time(h): ', calculating_est/3600)
|
223
287
|
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
288
|
+
layers.append('cat')
|
289
|
+
trained_W.append(np.eye(len(class_count)))
|
290
|
+
|
291
|
+
|
292
|
+
return trained_W
|
293
|
+
|
294
|
+
# FUNCTIONS -----
|
295
|
+
|
296
|
+
def shape_control(x_train):
|
297
|
+
|
298
|
+
try:
|
299
|
+
row = x_train[1].shape[0]
|
300
|
+
col = x_train[1].shape[1]
|
230
301
|
|
231
|
-
|
232
|
-
|
302
|
+
except:
|
303
|
+
|
304
|
+
print(Fore.MAGENTA + 'WARNING: You trying show_training but inputs is raveled. x_train inputs should be reshaped for show_training.' + Style.RESET_ALL)
|
233
305
|
|
234
|
-
|
235
|
-
|
306
|
+
try:
|
307
|
+
row, col = find_numbers(len(x_train[0]))
|
308
|
+
|
309
|
+
except:
|
236
310
|
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
elif Acc < 0.8 and Acc > 0.6:
|
241
|
-
print(Fore.MAGENTA + '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
|
242
|
-
|
243
|
-
elif Acc < 0.6:
|
244
|
-
print(Fore.RED+ '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
return TrainedWs,TrainPredictions
|
311
|
+
print(Fore.MAGENTA + 'WARNING: Input length cannot be reshaped. Neurons learning progression cannot be draw, weight learning progress drwaing started.' + Style.RESET_ALL)
|
312
|
+
return [0, 0]
|
250
313
|
|
251
|
-
|
252
|
-
|
253
|
-
def WeightIdentification(
|
254
|
-
LayerCount, # int: Number of layers in the neural network.
|
255
|
-
ClassCount, # int: Number of classes in the classification task.
|
256
|
-
Neurons, # list[num]: List of neuron counts for each layer.
|
257
|
-
InputSize # int: Size of the input data.
|
258
|
-
) -> str:
|
259
|
-
"""
|
260
|
-
Identifies the weights for a neural network model.
|
314
|
+
return row, col
|
261
315
|
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
Neurons (list[num]): List of neuron counts for each layer.
|
266
|
-
InputSize (int): Size of the input data.
|
316
|
+
def find_numbers(n):
|
317
|
+
if n <= 1:
|
318
|
+
raise ValueError("Parameter 'n' must be greater than 1.")
|
267
319
|
|
268
|
-
|
269
|
-
|
270
|
-
|
320
|
+
for i in range(2, int(n**0.5) + 1):
|
321
|
+
if n % i == 0:
|
322
|
+
factor1 = i
|
323
|
+
factor2 = n // i
|
324
|
+
if factor1 == factor2:
|
325
|
+
return factor1, factor2
|
271
326
|
|
272
|
-
|
273
|
-
Wlen = LayerCount + 1
|
274
|
-
W = [None] * Wlen
|
275
|
-
W[0] = np.ones((Neurons[0],InputSize))
|
276
|
-
ws = LayerCount - 1
|
277
|
-
for w in range(ws):
|
278
|
-
W[w + 1] = np.ones((Neurons[w + 1],Neurons[w]))
|
279
|
-
W[LayerCount] = np.ones((ClassCount,Neurons[LayerCount - 1]))
|
280
|
-
return W
|
327
|
+
return None
|
281
328
|
|
282
|
-
def
|
283
|
-
|
284
|
-
|
285
|
-
Key, # int: Key for identifying synaptic connections.
|
286
|
-
Class, # int: Class label for the current training instance.
|
287
|
-
ClassCount # int: Total number of classes in the dataset.
|
288
|
-
|
329
|
+
def weight_normalization(
|
330
|
+
W,
|
331
|
+
class_count
|
289
332
|
) -> str:
|
290
|
-
|
291
|
-
|
333
|
+
"""
|
334
|
+
Row(Neuron) based normalization. For unbalanced models.
|
292
335
|
|
293
336
|
Args:
|
294
|
-
|
295
|
-
|
296
|
-
Key (str): Key for identifying synaptic row or col connections.
|
297
|
-
Class (int): Class label for the current training instance.
|
298
|
-
ClassCount (int): Total number of classes in the dataset.
|
337
|
+
W (list(num)): Trained weight matrix list.
|
338
|
+
class_count (int): Class count of model.
|
299
339
|
|
300
340
|
Returns:
|
301
|
-
|
341
|
+
list([numpy_arrays],[...]): posttrained weight matices of the model. .
|
302
342
|
"""
|
303
343
|
|
304
|
-
|
305
|
-
Class += 1 # because index start 0
|
306
|
-
|
307
|
-
if Class != ClassCount and Class != 1:
|
308
|
-
|
309
|
-
Ce = Cs / Class
|
310
|
-
|
311
|
-
w[int(Ce)-1::-1,:] = 0
|
312
|
-
|
313
|
-
w[Cs:,:] = 0
|
314
|
-
|
315
|
-
|
316
|
-
else:
|
344
|
+
for i in range(class_count):
|
317
345
|
|
318
|
-
|
319
|
-
if Key == 'row':
|
320
|
-
|
321
|
-
w[Cs:,:] = 0
|
322
|
-
|
323
|
-
elif Key == 'col':
|
324
|
-
|
325
|
-
w[:,Cs] = 0
|
346
|
+
W[0][i,:] = normalization(W[0][i,:])
|
326
347
|
|
327
|
-
|
328
|
-
print(Fore.RED + "ERROR103: SynapticPruning func's Key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning)
|
329
|
-
return 'e'
|
330
|
-
else:
|
331
|
-
if Key == 'row':
|
332
|
-
|
333
|
-
w[Cs:,:] = 0
|
334
|
-
|
335
|
-
Ce = int(round(w.shape[0] - Cs / ClassCount))
|
336
|
-
w[Ce-1::-1,:] = 0
|
337
|
-
|
338
|
-
elif Key == 'col':
|
339
|
-
|
340
|
-
w[:,Cs] = 0
|
341
|
-
|
342
|
-
else:
|
343
|
-
print(Fore.RED + "ERROR103: SynapticPruning func's Key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning + Style.RESET_ALL)
|
344
|
-
return 'e'
|
345
|
-
return w
|
348
|
+
return W
|
346
349
|
|
347
|
-
def
|
348
|
-
|
349
|
-
|
350
|
+
def weight_identification(
|
351
|
+
layer_count, # int: Number of layers in the neural network.
|
352
|
+
class_count, # int: Number of classes in the classification task.
|
353
|
+
neurons, # list[num]: List of neuron counts for each layer.
|
354
|
+
x_train_size # int: Size of the input data.
|
350
355
|
) -> str:
|
351
356
|
"""
|
352
|
-
|
357
|
+
Identifies the weights for a neural network model.
|
353
358
|
|
354
359
|
Args:
|
355
|
-
|
356
|
-
|
360
|
+
layer_count (int): Number of layers in the neural network.
|
361
|
+
class_count (int): Number of classes in the classification task.
|
362
|
+
neurons (list[num]): List of neuron counts for each layer.
|
363
|
+
x_train_size (int): Size of the input data.
|
357
364
|
|
358
365
|
Returns:
|
359
|
-
list:
|
366
|
+
list([numpy_arrays],[...]): pretrained weight matices of the model. .
|
360
367
|
"""
|
361
368
|
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
Piece[i] = int(math.floor(W[i].shape[0] / ClassCount))
|
373
|
-
|
374
|
-
Cs = 0
|
375
|
-
# j = Classes, i = Weights, [0] = CutStart.
|
369
|
+
Wlen = layer_count + 1
|
370
|
+
W = [None] * Wlen
|
371
|
+
W[0] = np.ones((neurons[0], x_train_size))
|
372
|
+
ws = layer_count - 1
|
373
|
+
for w in range(ws):
|
374
|
+
W[w + 1] = np.ones((neurons[w + 1], neurons[w]))
|
375
|
+
|
376
|
+
return W
|
376
377
|
|
377
|
-
for i in range(len(W)):
|
378
|
-
for j in range(ClassCount):
|
379
|
-
Cs = Cs + Piece[i]
|
380
|
-
Divides[j][i][0] = Cs
|
381
|
-
#print('Divides: ' + j + i + ' = ' + Divides[j][i][0])
|
382
|
-
#input()
|
383
|
-
|
384
|
-
j = 0
|
385
|
-
Cs = 0
|
386
|
-
|
387
|
-
return Divides
|
388
|
-
|
389
378
|
|
390
|
-
def
|
379
|
+
def fex(
|
391
380
|
Input, # list[num]: Input data.
|
392
|
-
w, #
|
393
|
-
|
394
|
-
|
381
|
+
w, # num: Weight matrix of the neural network.
|
382
|
+
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
383
|
+
Class, # int: Which class is, if training.
|
384
|
+
activation_potential # float or None: Input activation potential (optional)
|
395
385
|
) -> tuple:
|
396
386
|
"""
|
397
387
|
Applies feature extraction process to the input data using synaptic pruning.
|
398
388
|
|
399
389
|
Args:
|
400
|
-
Input (
|
401
|
-
w (
|
402
|
-
|
403
|
-
|
390
|
+
Input (num): Input data.
|
391
|
+
w (num): Weight matrix of the neural network.
|
392
|
+
is_training (bool): Flag indicating if the function is called during training (True or False).
|
393
|
+
Class (int): if is during training then which class(label) ? is isnt then put None.
|
394
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
404
395
|
|
405
396
|
Returns:
|
406
397
|
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
407
398
|
"""
|
408
399
|
|
409
|
-
if
|
410
|
-
|
411
|
-
|
412
|
-
PruneIndex = np.where(Input > ThresholdValue)
|
413
|
-
elif ThresholdsSign == '==':
|
414
|
-
PruneIndex = np.where(Input == ThresholdValue)
|
415
|
-
elif ThresholdsSign == '!=':
|
416
|
-
PruneIndex = np.where(Input != ThresholdValue)
|
417
|
-
|
418
|
-
w = SynapticPruning(w, PruneIndex, 'col', 0, 0)
|
419
|
-
|
420
|
-
NeuralLayer = np.dot(w, Input)
|
421
|
-
return NeuralLayer,w
|
422
|
-
|
423
|
-
def Cat(
|
424
|
-
Input, # list[num]: Input data.
|
425
|
-
w, # list[list[num]]: Weight matrix of the neural network.
|
426
|
-
ThresholdsSign, # str: Sign for threshold comparison ('<', '>', '==', '!=').
|
427
|
-
ThresholdValue, # num: Threshold value for comparison.
|
428
|
-
isTrain # int: Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
429
|
-
) -> tuple:
|
430
|
-
"""
|
431
|
-
Applies categorization process to the input data using synaptic pruning if specified.
|
432
|
-
|
433
|
-
Args:
|
434
|
-
Input (list[num]): Input data.
|
435
|
-
w (list[list[num]]): Weight matrix of the neural network.
|
436
|
-
ThresholdsSign (str): Sign for threshold comparison ('<', '>', '==', '!=').
|
437
|
-
ThresholdValue (num): Threshold value for comparison.
|
438
|
-
isTrain (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
400
|
+
if is_training == True and activation_potential == None:
|
401
|
+
|
402
|
+
w[Class, :] = Input
|
439
403
|
|
440
|
-
|
441
|
-
tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
|
442
|
-
"""
|
404
|
+
return w
|
443
405
|
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
406
|
+
elif is_training == True and activation_potential != None:
|
407
|
+
|
408
|
+
|
409
|
+
Input[Input < activation_potential] = 0
|
410
|
+
Input[Input > activation_potential] = 1
|
411
|
+
|
412
|
+
w[Class,:] = Input
|
413
|
+
|
414
|
+
return w
|
415
|
+
|
416
|
+
elif is_training == False and activation_potential == None:
|
417
|
+
|
418
|
+
neural_layer = np.dot(w, Input)
|
455
419
|
|
456
|
-
|
457
|
-
|
458
|
-
|
420
|
+
return neural_layer
|
421
|
+
|
422
|
+
elif is_training == False and activation_potential != None:
|
423
|
+
|
424
|
+
Input[Input < activation_potential] = 0
|
425
|
+
Input[Input > activation_potential] = 1
|
426
|
+
|
427
|
+
neural_layer = np.dot(w, Input)
|
428
|
+
|
429
|
+
return neural_layer
|
430
|
+
|
459
431
|
|
460
432
|
|
461
|
-
def
|
462
|
-
Input #
|
433
|
+
def normalization(
|
434
|
+
Input # num: Input data to be normalized.
|
463
435
|
):
|
464
436
|
"""
|
465
437
|
Normalizes the input data using maximum absolute scaling.
|
466
438
|
|
467
439
|
Args:
|
468
|
-
Input (
|
440
|
+
Input (num): Input data to be normalized.
|
469
441
|
|
470
442
|
Returns:
|
471
|
-
|
443
|
+
(num) Scaled input data after normalization.
|
472
444
|
"""
|
473
445
|
|
474
|
-
|
475
446
|
AbsVector = np.abs(Input)
|
476
|
-
|
447
|
+
|
477
448
|
MaxAbs = np.max(AbsVector)
|
478
|
-
|
449
|
+
|
479
450
|
ScaledInput = Input / MaxAbs
|
480
|
-
|
451
|
+
|
481
452
|
return ScaledInput
|
482
453
|
|
483
454
|
|
484
455
|
def Softmax(
|
485
|
-
x #
|
456
|
+
x # num: Input data to be transformed using softmax function.
|
486
457
|
):
|
487
458
|
"""
|
488
459
|
Applies the softmax function to the input data.
|
489
460
|
|
490
461
|
Args:
|
491
|
-
|
462
|
+
(num): Input data to be transformed using softmax function.
|
492
463
|
|
493
464
|
Returns:
|
494
|
-
|
465
|
+
(num): Transformed data after applying softmax function.
|
495
466
|
"""
|
496
467
|
|
497
|
-
|
498
|
-
MaxX = np.max(x)
|
499
|
-
|
500
|
-
x -= MaxX
|
501
|
-
|
502
|
-
ExpX = np.exp(x)
|
503
|
-
|
504
|
-
SumExpX = np.sum(ExpX)
|
505
|
-
|
506
|
-
SoftmaxX = ExpX / SumExpX
|
507
|
-
|
508
|
-
return SoftmaxX
|
468
|
+
return softmax(x)
|
509
469
|
|
510
470
|
|
511
471
|
def Sigmoid(
|
512
|
-
x #
|
472
|
+
x # num: Input data to be transformed using sigmoid function.
|
513
473
|
):
|
514
474
|
"""
|
515
475
|
Applies the sigmoid function to the input data.
|
516
476
|
|
517
477
|
Args:
|
518
|
-
|
478
|
+
(num): Input data to be transformed using sigmoid function.
|
519
479
|
|
520
480
|
Returns:
|
521
|
-
|
481
|
+
(num): Transformed data after applying sigmoid function.
|
522
482
|
"""
|
523
|
-
|
524
|
-
PositiveMask = x >= 0
|
525
|
-
NegativeMask = ~PositiveMask
|
526
|
-
|
527
|
-
ExpXPositive = np.exp(-np.clip(x[PositiveMask], -709, None))
|
528
|
-
ExpXNegative = np.exp(np.clip(x[NegativeMask], None, 709))
|
529
|
-
|
530
|
-
SigmoidX = np.zeros_like(x, dtype=float)
|
531
|
-
SigmoidX[PositiveMask] = 1 / (1 + ExpXPositive)
|
532
|
-
SigmoidX[NegativeMask] = ExpXNegative / (1 + ExpXNegative)
|
533
|
-
|
534
|
-
return SigmoidX
|
483
|
+
return expit(x)
|
535
484
|
|
536
485
|
|
537
486
|
def Relu(
|
538
|
-
x #
|
487
|
+
x # num: Input data to be transformed using ReLU function.
|
539
488
|
):
|
540
489
|
"""
|
541
490
|
Applies the Rectified Linear Unit (ReLU) function to the input data.
|
542
491
|
|
543
492
|
Args:
|
544
|
-
|
493
|
+
(num): Input data to be transformed using ReLU function.
|
545
494
|
|
546
495
|
Returns:
|
547
|
-
|
496
|
+
(num): Transformed data after applying ReLU function.
|
548
497
|
"""
|
549
498
|
|
550
|
-
|
551
499
|
return np.maximum(0, x)
|
552
500
|
|
553
501
|
|
554
|
-
def
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
Normalizations, # str: Whether normalization will be performed ("y" or "n").
|
561
|
-
Activation, # str: Activation function list for the neural network.
|
562
|
-
W # list[list[num]]: Weight matrix of the neural network.
|
502
|
+
def evaluate(
|
503
|
+
x_test, # list[num]: Test input data.
|
504
|
+
y_test, # list[num]: Test labels.
|
505
|
+
show_metrices, # show_metrices (bool): (True or False)
|
506
|
+
W, # list[num]: Weight matrix list of the neural network.
|
507
|
+
activation_potential=None # activation_potential (float or None): Threshold value for comparison. (optional)
|
563
508
|
) -> tuple:
|
564
|
-
infoTestModel =
|
509
|
+
infoTestModel = """
|
565
510
|
Tests the neural network model with the given test data.
|
566
511
|
|
567
512
|
Args:
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
Normalizatios list([str]): Whether normalization will be performed ("yes" or "no").
|
574
|
-
Activation (str): Activation function for the neural network.
|
575
|
-
W (list[list[num]]): Weight matrix of the neural network.
|
513
|
+
x_test (list[num]): Test input data.
|
514
|
+
y_test (list[num]): Test labels.
|
515
|
+
show_metrices (bool): (True or False)
|
516
|
+
W (list[num]): Weight matrix list of the neural network.
|
517
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
576
518
|
|
577
519
|
Returns:
|
578
520
|
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
579
521
|
"""
|
580
522
|
|
523
|
+
layers = ['fex', 'cat']
|
581
524
|
|
582
525
|
try:
|
583
|
-
Wc = [0] * len(W)
|
526
|
+
Wc = [0] * len(W) # Wc = Weight copy
|
584
527
|
true = 0
|
585
|
-
|
528
|
+
y_preds = [-1] * len(y_test)
|
529
|
+
acc_list = []
|
530
|
+
|
586
531
|
for i, w in enumerate(W):
|
587
532
|
Wc[i] = np.copy(w)
|
588
|
-
print('\rCopying weights.....',i+1,'/',len(W),end
|
589
|
-
|
533
|
+
print('\rCopying weights.....', i+1, '/', len(W), end="")
|
534
|
+
|
590
535
|
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
591
|
-
|
592
|
-
for inpIndex,Input in enumerate(
|
536
|
+
start_time = time.time()
|
537
|
+
for inpIndex, Input in enumerate(x_test):
|
593
538
|
Input = np.array(Input)
|
594
539
|
Input = Input.ravel()
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
for index, Layer in enumerate(
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
for i, w in enumerate(Wc):
|
613
|
-
W[i] = np.copy(w)
|
614
|
-
RealOutput = np.argmax(TestLabels[inpIndex])
|
615
|
-
PredictedOutput = np.argmax(NeuralLayer)
|
616
|
-
if RealOutput == PredictedOutput:
|
540
|
+
uni_start_time = time.time()
|
541
|
+
neural_layer = Input
|
542
|
+
|
543
|
+
for index, Layer in enumerate(layers):
|
544
|
+
|
545
|
+
neural_layer = normalization(neural_layer)
|
546
|
+
|
547
|
+
if Layer == 'fex':
|
548
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
549
|
+
elif Layer == 'cat':
|
550
|
+
neural_layer = np.dot(W[index], neural_layer)
|
551
|
+
|
552
|
+
for i, w in enumerate(Wc):
|
553
|
+
W[i] = np.copy(w)
|
554
|
+
RealOutput = np.argmax(y_test[inpIndex])
|
555
|
+
PredictedOutput = np.argmax(neural_layer)
|
556
|
+
if RealOutput == PredictedOutput:
|
617
557
|
true += 1
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
print('\rest......(
|
630
|
-
print('\rTest
|
631
|
-
|
632
|
-
elif
|
633
|
-
print('\rest......(
|
634
|
-
print('\rTest
|
635
|
-
|
558
|
+
acc = true / len(y_test)
|
559
|
+
if show_metrices == True:
|
560
|
+
acc_list.append(acc)
|
561
|
+
y_preds[inpIndex] = PredictedOutput
|
562
|
+
|
563
|
+
uni_end_time = time.time()
|
564
|
+
|
565
|
+
calculating_est = round(
|
566
|
+
(uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
|
567
|
+
|
568
|
+
if calculating_est < 60:
|
569
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
570
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
571
|
+
|
572
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
573
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
574
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
575
|
+
|
576
|
+
elif calculating_est > 3600:
|
577
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
578
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
579
|
+
if show_metrices == True:
|
580
|
+
plot_evaluate(y_test, y_preds, acc_list)
|
581
|
+
|
636
582
|
EndTime = time.time()
|
637
583
|
for i, w in enumerate(Wc):
|
638
584
|
W[i] = np.copy(w)
|
639
|
-
|
640
|
-
|
641
|
-
|
585
|
+
|
586
|
+
calculating_est = round(EndTime - start_time, 2)
|
587
|
+
|
642
588
|
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
643
|
-
|
644
|
-
if CalculatingEst < 60:
|
645
|
-
print('Total testing time(sec): ',CalculatingEst)
|
646
|
-
|
647
|
-
elif CalculatingEst > 60 and CalculatingEst < 3600:
|
648
|
-
print('Total testing time(min): ',CalculatingEst/60)
|
649
|
-
|
650
|
-
elif CalculatingEst > 3600:
|
651
|
-
print('Total testing time(h): ',CalculatingEst/3600)
|
652
|
-
|
653
|
-
if Acc >= 0.8:
|
654
|
-
print(Fore.GREEN + '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
655
|
-
|
656
|
-
elif Acc < 0.8 and Acc > 0.6:
|
657
|
-
print(Fore.MAGENTA + '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
658
|
-
|
659
|
-
elif Acc <= 0.6:
|
660
|
-
print(Fore.RED+ '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
661
589
|
|
590
|
+
if calculating_est < 60:
|
591
|
+
print('Total testing time(sec): ', calculating_est)
|
592
|
+
|
593
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
594
|
+
print('Total testing time(min): ', calculating_est/60)
|
595
|
+
|
596
|
+
elif calculating_est > 3600:
|
597
|
+
print('Total testing time(h): ', calculating_est/3600)
|
598
|
+
|
599
|
+
if acc >= 0.8:
|
600
|
+
print(Fore.GREEN + '\nTotal Test accuracy: ',
|
601
|
+
acc, '\n' + Style.RESET_ALL)
|
602
|
+
|
603
|
+
elif acc < 0.8 and acc > 0.6:
|
604
|
+
print(Fore.MAGENTA + '\nTotal Test accuracy: ',
|
605
|
+
acc, '\n' + Style.RESET_ALL)
|
606
|
+
|
607
|
+
elif acc <= 0.6:
|
608
|
+
print(Fore.RED + '\nTotal Test accuracy: ', acc, '\n' + Style.RESET_ALL)
|
609
|
+
|
662
610
|
except:
|
611
|
+
|
612
|
+
print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" +
|
613
|
+
infoTestModel + Style.RESET_ALL)
|
614
|
+
return 'e'
|
615
|
+
|
616
|
+
return W, y_preds, acc
|
617
|
+
|
618
|
+
|
619
|
+
def multiple_evaluate(
|
620
|
+
x_test, # list[num]: Test input data.
|
621
|
+
y_test, # list[num]: Test labels.
|
622
|
+
show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
|
623
|
+
MW, # list[list[num]]: Weight matrix of the neural network.
|
624
|
+
activation_potential=None # (float or None): Threshold value for comparison. (optional)
|
625
|
+
) -> tuple:
|
626
|
+
infoTestModel = """
|
627
|
+
Tests the neural network model with the given test data.
|
628
|
+
|
629
|
+
Args:
|
630
|
+
x_test (list[num]): Test input data.
|
631
|
+
y_test (list[num]): Test labels.
|
632
|
+
show_metrices (bool): (True or False)
|
633
|
+
MW (list(list[num])): Multiple Weight matrix list of the neural network. (Multiple model testing)
|
634
|
+
|
635
|
+
Returns:
|
636
|
+
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
637
|
+
"""
|
638
|
+
|
639
|
+
layers = ['fex', 'cat']
|
640
|
+
|
641
|
+
try:
|
642
|
+
y_preds = [-1] * len(y_test)
|
643
|
+
acc_list = []
|
644
|
+
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
645
|
+
start_time = time.time()
|
646
|
+
true = 0
|
647
|
+
for inpIndex, Input in enumerate(x_test):
|
648
|
+
|
649
|
+
output_layer = 0
|
650
|
+
|
651
|
+
for m, Model in enumerate(MW):
|
652
|
+
|
653
|
+
W = Model
|
654
|
+
|
655
|
+
Wc = [0] * len(W) # Wc = weight copy
|
656
|
+
|
657
|
+
y_preds = [None] * len(y_test)
|
658
|
+
for i, w in enumerate(W):
|
659
|
+
Wc[i] = np.copy(w)
|
660
|
+
|
661
|
+
Input = np.array(Input)
|
662
|
+
Input = Input.ravel()
|
663
|
+
uni_start_time = time.time()
|
664
|
+
neural_layer = Input
|
665
|
+
|
666
|
+
for index, Layer in enumerate(layers):
|
667
|
+
|
668
|
+
neural_layer = normalization(neural_layer)
|
669
|
+
|
670
|
+
if Layer == 'fex':
|
671
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
672
|
+
elif Layer == 'cat':
|
673
|
+
neural_layer = np.dot(W[index], neural_layer)
|
674
|
+
|
675
|
+
output_layer += neural_layer
|
676
|
+
|
677
|
+
for i, w in enumerate(Wc):
|
678
|
+
W[i] = np.copy(w)
|
679
|
+
for i, w in enumerate(Wc):
|
680
|
+
W[i] = np.copy(w)
|
681
|
+
RealOutput = np.argmax(y_test[inpIndex])
|
682
|
+
PredictedOutput = np.argmax(output_layer)
|
683
|
+
if RealOutput == PredictedOutput:
|
684
|
+
true += 1
|
685
|
+
acc = true / len(y_test)
|
686
|
+
if show_metrices == True:
|
687
|
+
acc_list.append(acc)
|
688
|
+
y_preds[inpIndex] = PredictedOutput
|
689
|
+
|
690
|
+
|
691
|
+
uni_end_time = time.time()
|
692
|
+
|
693
|
+
calculating_est = round(
|
694
|
+
(uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
|
695
|
+
|
696
|
+
if calculating_est < 60:
|
697
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
698
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
699
|
+
|
700
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
701
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
702
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
703
|
+
|
704
|
+
elif calculating_est > 3600:
|
705
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
706
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
707
|
+
if show_metrices == True:
|
708
|
+
plot_evaluate(y_test, y_preds, acc_list)
|
663
709
|
|
664
|
-
|
710
|
+
EndTime = time.time()
|
711
|
+
for i, w in enumerate(Wc):
|
712
|
+
W[i] = np.copy(w)
|
713
|
+
|
714
|
+
calculating_est = round(EndTime - start_time, 2)
|
715
|
+
|
716
|
+
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
717
|
+
|
718
|
+
if calculating_est < 60:
|
719
|
+
print('Total testing time(sec): ', calculating_est)
|
720
|
+
|
721
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
722
|
+
print('Total testing time(min): ', calculating_est/60)
|
723
|
+
|
724
|
+
elif calculating_est > 3600:
|
725
|
+
print('Total testing time(h): ', calculating_est/3600)
|
726
|
+
|
727
|
+
if acc >= 0.8:
|
728
|
+
print(Fore.GREEN + '\nTotal Test accuracy: ',
|
729
|
+
acc, '\n' + Style.RESET_ALL)
|
730
|
+
|
731
|
+
elif acc < 0.8 and acc > 0.6:
|
732
|
+
print(Fore.MAGENTA + '\nTotal Test accuracy: ',
|
733
|
+
acc, '\n' + Style.RESET_ALL)
|
734
|
+
|
735
|
+
elif acc <= 0.6:
|
736
|
+
print(Fore.RED + '\nTotal Test accuracy: ',
|
737
|
+
acc, '\n' + Style.RESET_ALL)
|
738
|
+
|
739
|
+
except:
|
740
|
+
|
741
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'activation_potential' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
|
665
742
|
return 'e'
|
666
|
-
|
667
|
-
return
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
|
672
|
-
|
673
|
-
|
674
|
-
|
675
|
-
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
infoSavePLAN = """
|
686
|
-
Function to save a deep learning model.
|
743
|
+
|
744
|
+
return W, y_preds, acc
|
745
|
+
|
746
|
+
|
747
|
+
def save_model(model_name,
|
748
|
+
model_type,
|
749
|
+
class_count,
|
750
|
+
test_acc,
|
751
|
+
weights_type,
|
752
|
+
weights_format,
|
753
|
+
model_path,
|
754
|
+
scaler_params,
|
755
|
+
W,
|
756
|
+
activation_potential=None
|
757
|
+
):
|
758
|
+
|
759
|
+
infosave_model = """
|
760
|
+
Function to save a pruning learning model.
|
687
761
|
|
688
762
|
Arguments:
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
694
|
-
ThresholdValues (list): List containing threshold values.
|
695
|
-
DoNormalization (str): is that normalized data ? 'y' or 'n'.
|
696
|
-
Activations (list): List containing activation functions for each layer.
|
697
|
-
TestAcc (float): Test accuracy of the model.
|
698
|
-
LogType (str): Type of log to save (options: 'csv', 'txt', 'hdf5').
|
699
|
-
WeightsType (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
763
|
+
model_name (str): Name of the model.
|
764
|
+
model_type (str): Type of the model.(options: PLAN)
|
765
|
+
class_count (int): Number of classes.
|
766
|
+
test_acc (float): Test accuracy of the model.
|
767
|
+
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
700
768
|
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
701
|
-
|
769
|
+
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
770
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
702
771
|
W: Weights of the model.
|
772
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
703
773
|
|
704
774
|
Returns:
|
705
775
|
str: Message indicating if the model was saved successfully or encountered an error.
|
706
776
|
"""
|
707
|
-
|
777
|
+
|
708
778
|
# Operations to be performed by the function will be written here
|
709
779
|
pass
|
710
780
|
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
781
|
+
layers = ['fex', 'cat']
|
782
|
+
|
783
|
+
if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
|
784
|
+
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
|
785
|
+
infosave_model + Style.RESET_ALL)
|
717
786
|
return 'e'
|
718
|
-
|
719
|
-
if
|
720
|
-
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from:
|
787
|
+
|
788
|
+
if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
|
789
|
+
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" +
|
790
|
+
infosave_model + Style.RESET_ALL)
|
721
791
|
return 'e'
|
722
|
-
|
792
|
+
|
723
793
|
NeuronCount = 0
|
724
794
|
SynapseCount = 0
|
795
|
+
|
725
796
|
try:
|
726
797
|
for w in W:
|
727
798
|
NeuronCount += np.shape(w)[0]
|
728
799
|
SynapseCount += np.shape(w)[0] * np.shape(w)[1]
|
729
800
|
except:
|
730
|
-
|
731
|
-
print(Fore.RED + "ERROR: Weight matrices has a problem from:
|
801
|
+
|
802
|
+
print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
|
803
|
+
infosave_model + Style.RESET_ALL)
|
732
804
|
return 'e'
|
733
805
|
import pandas as pd
|
734
806
|
from datetime import datetime
|
735
807
|
from scipy import io
|
736
|
-
|
737
|
-
data = {'MODEL NAME':
|
738
|
-
'MODEL TYPE':
|
739
|
-
'LAYERS':
|
740
|
-
'LAYER COUNT': len(
|
741
|
-
'CLASS COUNT':
|
742
|
-
'THRESHOLD SIGNS': ThresholdSigns,
|
743
|
-
'THRESHOLD VALUES': ThresholdValues,
|
744
|
-
'NORMALIZATION': Normalizations,
|
745
|
-
'ACTIVATIONS': Activations,
|
808
|
+
|
809
|
+
data = {'MODEL NAME': model_name,
|
810
|
+
'MODEL TYPE': model_type,
|
811
|
+
'LAYERS': layers,
|
812
|
+
'LAYER COUNT': len(layers),
|
813
|
+
'CLASS COUNT': class_count,
|
746
814
|
'NEURON COUNT': NeuronCount,
|
747
815
|
'SYNAPSE COUNT': SynapseCount,
|
748
|
-
'TEST ACCURACY':
|
816
|
+
'TEST ACCURACY': test_acc,
|
749
817
|
'SAVE DATE': datetime.now(),
|
750
|
-
'WEIGHTS TYPE':
|
751
|
-
'WEIGHTS FORMAT':
|
752
|
-
'
|
818
|
+
'WEIGHTS TYPE': weights_type,
|
819
|
+
'WEIGHTS FORMAT': weights_format,
|
820
|
+
'MODEL PATH': model_path,
|
821
|
+
'STANDARD SCALER': scaler_params,
|
822
|
+
'ACTIVATION POTENTIAL': activation_potential
|
753
823
|
}
|
754
824
|
try:
|
755
|
-
|
825
|
+
|
756
826
|
df = pd.DataFrame(data)
|
757
|
-
|
758
|
-
|
759
|
-
|
760
|
-
df.to_csv(SavePath + ModelName + '.csv', sep='\t', index=False)
|
761
|
-
|
762
|
-
elif LogType == 'txt':
|
763
|
-
|
764
|
-
df.to_csv(SavePath + ModelName + '.txt', sep='\t', index=False)
|
765
|
-
|
766
|
-
elif LogType == 'hdf5':
|
767
|
-
|
768
|
-
df.to_hdf(SavePath + ModelName + '.h5', key='data', mode='w')
|
769
|
-
|
827
|
+
|
828
|
+
df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
|
829
|
+
|
770
830
|
except:
|
771
|
-
|
772
|
-
print(Fore.RED + "ERROR: Model log not saved. Check the log parameters from:
|
831
|
+
|
832
|
+
print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
|
833
|
+
infosave_model + Style.RESET_ALL)
|
773
834
|
return 'e'
|
774
835
|
try:
|
775
|
-
|
776
|
-
if
|
777
|
-
|
836
|
+
|
837
|
+
if weights_type == 'txt' and weights_format == 'd':
|
838
|
+
|
778
839
|
for i, w in enumerate(W):
|
779
|
-
np.savetxt(
|
780
|
-
|
781
|
-
|
782
|
-
|
840
|
+
np.savetxt(model_path + model_name +
|
841
|
+
str(i+1) + 'w.txt', w, fmt='%d')
|
842
|
+
|
843
|
+
if weights_type == 'txt' and weights_format == 'f':
|
844
|
+
|
783
845
|
for i, w in enumerate(W):
|
784
|
-
|
785
|
-
|
786
|
-
|
787
|
-
|
846
|
+
np.savetxt(model_path + model_name +
|
847
|
+
str(i+1) + 'w.txt', w, fmt='%f')
|
848
|
+
|
849
|
+
if weights_type == 'txt' and weights_format == 'raw':
|
850
|
+
|
788
851
|
for i, w in enumerate(W):
|
789
|
-
np.savetxt(
|
790
|
-
|
791
|
-
|
852
|
+
np.savetxt(model_path + model_name + str(i+1) + 'w.txt', w)
|
853
|
+
|
792
854
|
###
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
855
|
+
|
856
|
+
if weights_type == 'npy' and weights_format == 'd':
|
857
|
+
|
797
858
|
for i, w in enumerate(W):
|
798
|
-
np.save(
|
799
|
-
|
800
|
-
|
801
|
-
|
859
|
+
np.save(model_path + model_name +
|
860
|
+
str(i+1) + 'w.npy', w.astype(int))
|
861
|
+
|
862
|
+
if weights_type == 'npy' and weights_format == 'f':
|
863
|
+
|
802
864
|
for i, w in enumerate(W):
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
865
|
+
np.save(model_path + model_name + str(i+1) +
|
866
|
+
'w.npy', w, w.astype(float))
|
867
|
+
|
868
|
+
if weights_type == 'npy' and weights_format == 'raw':
|
869
|
+
|
807
870
|
for i, w in enumerate(W):
|
808
|
-
np.save(
|
809
|
-
|
810
|
-
|
871
|
+
np.save(model_path + model_name + str(i+1) + 'w.npy', w)
|
872
|
+
|
811
873
|
###
|
812
|
-
|
813
|
-
|
814
|
-
|
815
|
-
|
874
|
+
|
875
|
+
if weights_type == 'mat' and weights_format == 'd':
|
876
|
+
|
816
877
|
for i, w in enumerate(W):
|
817
878
|
w = {'w': w.astype(int)}
|
818
|
-
io.savemat(
|
819
|
-
|
820
|
-
if
|
821
|
-
|
879
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
880
|
+
|
881
|
+
if weights_type == 'mat' and weights_format == 'f':
|
882
|
+
|
822
883
|
for i, w in enumerate(W):
|
823
884
|
w = {'w': w.astype(float)}
|
824
|
-
io.savemat(
|
825
|
-
|
826
|
-
if
|
827
|
-
|
885
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
886
|
+
|
887
|
+
if weights_type == 'mat' and weights_format == 'raw':
|
888
|
+
|
828
889
|
for i, w in enumerate(W):
|
829
890
|
w = {'w': w}
|
830
|
-
io.savemat(
|
831
|
-
|
891
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
892
|
+
|
832
893
|
except:
|
833
|
-
|
834
|
-
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from:
|
894
|
+
|
895
|
+
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
|
835
896
|
return 'e'
|
836
897
|
print(df)
|
837
898
|
message = (
|
838
899
|
Fore.GREEN + "Model Saved Successfully\n" +
|
839
|
-
Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
|
900
|
+
Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
|
840
901
|
Style.RESET_ALL
|
841
|
-
|
842
|
-
|
902
|
+
)
|
903
|
+
|
843
904
|
return print(message)
|
844
905
|
|
845
906
|
|
846
|
-
def
|
847
|
-
|
848
|
-
|
849
|
-
|
850
|
-
|
851
|
-
Function to load a deep learning model.
|
907
|
+
def load_model(model_name,
|
908
|
+
model_path,
|
909
|
+
):
|
910
|
+
infoload_model = """
|
911
|
+
Function to load a pruning learning model.
|
852
912
|
|
853
913
|
Arguments:
|
854
|
-
|
855
|
-
|
856
|
-
LogType (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
914
|
+
model_name (str): Name of the model.
|
915
|
+
model_path (str): Path where the model is saved.
|
857
916
|
|
858
917
|
Returns:
|
859
|
-
lists: W(list[num]),
|
918
|
+
lists: W(list[num]), activation_potential, DataFrame of the model
|
860
919
|
"""
|
861
|
-
|
920
|
+
pass
|
921
|
+
|
922
|
+
import scipy.io as sio
|
862
923
|
|
924
|
+
try:
|
925
|
+
|
926
|
+
df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
|
927
|
+
|
928
|
+
except:
|
929
|
+
|
930
|
+
print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" +
|
931
|
+
infoload_model + Style.RESET_ALL)
|
932
|
+
|
933
|
+
model_name = str(df['MODEL NAME'].iloc[0])
|
934
|
+
layer_count = int(df['LAYER COUNT'].iloc[0])
|
935
|
+
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
936
|
+
|
937
|
+
W = [0] * layer_count
|
938
|
+
|
939
|
+
if WeightType == 'txt':
|
940
|
+
for i in range(layer_count):
|
941
|
+
W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
|
942
|
+
elif WeightType == 'npy':
|
943
|
+
for i in range(layer_count):
|
944
|
+
W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
|
945
|
+
elif WeightType == 'mat':
|
946
|
+
for i in range(layer_count):
|
947
|
+
W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
|
948
|
+
else:
|
949
|
+
raise ValueError(
|
950
|
+
Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
|
951
|
+
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
952
|
+
return W, df
|
953
|
+
|
954
|
+
|
955
|
+
def predict_model_ssd(Input, model_name, model_path):
|
956
|
+
|
957
|
+
infopredict_model_ssd = """
|
958
|
+
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
959
|
+
|
960
|
+
Arguments:
|
961
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
962
|
+
model_name (str): Name of the model.
|
963
|
+
Returns:
|
964
|
+
ndarray: Output from the model.
|
965
|
+
"""
|
966
|
+
W, df = load_model(model_name, model_path)
|
863
967
|
|
864
|
-
|
865
|
-
|
866
|
-
|
867
|
-
|
868
|
-
|
869
|
-
if LogType == 'csv':
|
870
|
-
df = pd.read_csv(LoadPath + ModelName + '.' + LogType)
|
968
|
+
activation_potential = str(df['ACTIVATION POTENTIAL'].iloc[0])
|
969
|
+
|
970
|
+
if activation_potential != 'nan':
|
971
|
+
|
972
|
+
activation_potential = float(activation_potential)
|
871
973
|
|
974
|
+
else:
|
975
|
+
|
976
|
+
activation_potential = None
|
872
977
|
|
873
|
-
|
874
|
-
|
978
|
+
try:
|
979
|
+
|
980
|
+
scaler_params = df['STANDARD SCALER'].tolist()
|
981
|
+
|
982
|
+
|
983
|
+
scaler_params = [np.fromstring(arr.strip('[]'), sep=' ') for arr in scaler_params]
|
984
|
+
|
985
|
+
Input = standard_scaler(None, Input, scaler_params)
|
875
986
|
|
987
|
+
except:
|
876
988
|
|
877
|
-
|
878
|
-
|
879
|
-
except:
|
880
|
-
print(Fore.RED + "ERROR: Model Path error. Accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: LoadPLAN" + infoLoadPLAN + Style.RESET_ALL)
|
881
|
-
|
882
|
-
ModelName = str(df['MODEL NAME'].iloc[0])
|
883
|
-
Layers = df['LAYERS'].tolist()
|
884
|
-
LayerCount = int(df['LAYER COUNT'].iloc[0])
|
885
|
-
ClassCount = int(df['CLASS COUNT'].iloc[0])
|
886
|
-
ThresholdSigns = df['THRESHOLD SIGNS'].tolist()
|
887
|
-
ThresholdValues = df['THRESHOLD VALUES'].tolist()
|
888
|
-
Normalization = df['NORMALIZATION'].tolist()
|
889
|
-
Activations = df['ACTIVATIONS'].tolist()
|
890
|
-
NeuronCount = int(df['NEURON COUNT'].iloc[0])
|
891
|
-
SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
|
892
|
-
TestAcc = int(df['TEST ACCURACY'].iloc[0])
|
893
|
-
ModelType = str(df['MODEL TYPE'].iloc[0])
|
894
|
-
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
895
|
-
WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
|
896
|
-
SavePath = str(df['SAVE PATH'].iloc[0])
|
897
|
-
|
898
|
-
W = [0] * LayerCount
|
899
|
-
|
900
|
-
if WeightType == 'txt':
|
901
|
-
for i in range(LayerCount):
|
902
|
-
W[i] = np.loadtxt(LoadPath + ModelName + str(i+1) + 'w.txt')
|
903
|
-
elif WeightType == 'npy':
|
904
|
-
for i in range(LayerCount):
|
905
|
-
W[i] = np.load(LoadPath + ModelName + str(i+1) + 'w.npy')
|
906
|
-
elif WeightType == 'mat':
|
907
|
-
for i in range(LayerCount):
|
908
|
-
W[i] = sio.loadmat(LoadPath + ModelName + str(i+1) + 'w.mat')
|
909
|
-
else:
|
910
|
-
raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: LoadPLAN." + infoLoadPLAN + Style.RESET_ALL)
|
911
|
-
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
912
|
-
return W,Layers,ThresholdSigns,ThresholdValues,Normalization,Activations,df
|
913
|
-
|
914
|
-
def PredictFromDiscPLAN(Input,ModelName,ModelPath,LogType):
|
915
|
-
infoPredictFromDİscPLAN = """
|
916
|
-
Function to make a prediction using a divided pruning deep learning neural network (PLAN).
|
989
|
+
non_scaled = True
|
990
|
+
|
917
991
|
|
918
|
-
|
919
|
-
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
920
|
-
ModelName (str): Name of the model.
|
921
|
-
ModelPath (str): Path where the model is saved.
|
922
|
-
LogType (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
992
|
+
layers = ['fex', 'cat']
|
923
993
|
|
924
|
-
Returns:
|
925
|
-
ndarray: Output from the model.
|
926
|
-
"""
|
927
|
-
W,Layers,ThresholdSigns,ThresholdValues,Normalization,Activations = LoadPLAN(ModelName,ModelPath,
|
928
|
-
LogType)[0:6]
|
929
994
|
Wc = [0] * len(W)
|
930
995
|
for i, w in enumerate(W):
|
931
996
|
Wc[i] = np.copy(w)
|
932
997
|
try:
|
933
|
-
|
934
|
-
|
935
|
-
|
936
|
-
for index, Layer in enumerate(
|
937
|
-
|
938
|
-
|
939
|
-
|
940
|
-
|
941
|
-
|
942
|
-
|
943
|
-
|
944
|
-
NeuralLayer = Softmax(NeuralLayer)
|
945
|
-
|
946
|
-
if Layers[index] == 'fex':
|
947
|
-
NeuralLayer,useless = Fex(NeuralLayer, W[index],
|
948
|
-
ThresholdSigns[index],
|
949
|
-
ThresholdValues[index])
|
950
|
-
if Layers[index] == 'cat':
|
951
|
-
NeuralLayer,useless = Cat(NeuralLayer, W[index],
|
952
|
-
ThresholdSigns[index],
|
953
|
-
ThresholdValues[index],
|
954
|
-
0)
|
998
|
+
neural_layer = Input
|
999
|
+
neural_layer = np.array(neural_layer)
|
1000
|
+
neural_layer = neural_layer.ravel()
|
1001
|
+
for index, Layer in enumerate(layers):
|
1002
|
+
|
1003
|
+
neural_layer = normalization(neural_layer)
|
1004
|
+
|
1005
|
+
if Layer == 'fex':
|
1006
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
1007
|
+
elif Layer == 'cat':
|
1008
|
+
neural_layer = np.dot(W[index], neural_layer)
|
955
1009
|
except:
|
956
|
-
|
957
|
-
|
1010
|
+
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
|
1011
|
+
infopredict_model_ssd + Style.RESET_ALL)
|
1012
|
+
return 'e'
|
958
1013
|
for i, w in enumerate(Wc):
|
959
1014
|
W[i] = np.copy(w)
|
960
|
-
return
|
1015
|
+
return neural_layer
|
961
1016
|
|
962
1017
|
|
963
|
-
def
|
964
|
-
|
965
|
-
|
1018
|
+
def predict_model_ram(Input, scaler_params, W, activation_potential=None):
|
1019
|
+
|
1020
|
+
infopredict_model_ram = """
|
1021
|
+
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
966
1022
|
from weights and parameters stored in memory.
|
967
1023
|
|
968
1024
|
Arguments:
|
969
1025
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
970
|
-
|
971
|
-
ThresholdSigns (list): Threshold signs.
|
972
|
-
ThresholdValues (list): Threshold values.
|
973
|
-
DoNormalization (str): Whether to normalize ('y' or 'n').
|
974
|
-
Activations (list): Activation functions for each layer.
|
1026
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
975
1027
|
W (list of ndarrays): Weights of the model.
|
1028
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
976
1029
|
|
977
1030
|
Returns:
|
978
1031
|
ndarray: Output from the model.
|
979
1032
|
"""
|
980
1033
|
|
1034
|
+
if scaler_params != None:
|
1035
|
+
|
1036
|
+
Input = standard_scaler(None, Input, scaler_params)
|
1037
|
+
|
1038
|
+
layers = ['fex', 'cat']
|
1039
|
+
|
981
1040
|
Wc = [0] * len(W)
|
982
1041
|
for i, w in enumerate(W):
|
983
1042
|
Wc[i] = np.copy(w)
|
984
1043
|
try:
|
985
|
-
|
986
|
-
|
987
|
-
|
988
|
-
for index, Layer in enumerate(
|
989
|
-
|
990
|
-
|
991
|
-
|
992
|
-
|
993
|
-
|
994
|
-
|
995
|
-
|
996
|
-
|
997
|
-
|
998
|
-
if Layers[index] == 'fex':
|
999
|
-
NeuralLayer,useless = Fex(NeuralLayer, W[index],
|
1000
|
-
ThresholdSigns[index],
|
1001
|
-
ThresholdValues[index])
|
1002
|
-
if Layers[index] == 'cat':
|
1003
|
-
NeuralLayer,useless = Cat(NeuralLayer, W[index],
|
1004
|
-
ThresholdSigns[index],
|
1005
|
-
ThresholdValues[index],0)
|
1044
|
+
neural_layer = Input
|
1045
|
+
neural_layer = np.array(neural_layer)
|
1046
|
+
neural_layer = neural_layer.ravel()
|
1047
|
+
for index, Layer in enumerate(layers):
|
1048
|
+
|
1049
|
+
neural_layer = normalization(neural_layer)
|
1050
|
+
|
1051
|
+
if Layer == 'fex':
|
1052
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
1053
|
+
elif Layer == 'cat':
|
1054
|
+
neural_layer = np.dot(W[index], neural_layer)
|
1055
|
+
|
1006
1056
|
except:
|
1007
|
-
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from:
|
1057
|
+
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
|
1058
|
+
infopredict_model_ram + Style.RESET_ALL)
|
1008
1059
|
return 'e'
|
1009
1060
|
for i, w in enumerate(Wc):
|
1010
1061
|
W[i] = np.copy(w)
|
1011
|
-
return
|
1012
|
-
|
1062
|
+
return neural_layer
|
1063
|
+
|
1064
|
+
|
1065
|
+
def auto_balancer(x_train, y_train):
|
1013
1066
|
|
1014
|
-
|
1015
|
-
infoAutoBalancer = """
|
1067
|
+
infoauto_balancer = """
|
1016
1068
|
Function to balance the training data across different classes.
|
1017
1069
|
|
1018
1070
|
Arguments:
|
1019
|
-
|
1020
|
-
|
1021
|
-
ClassCount (int): Number of classes.
|
1071
|
+
x_train (list): Input data for training.
|
1072
|
+
y_train (list): Labels corresponding to the input data.
|
1022
1073
|
|
1023
1074
|
Returns:
|
1024
1075
|
tuple: A tuple containing balanced input data and labels.
|
1025
1076
|
"""
|
1026
|
-
|
1027
|
-
|
1028
|
-
|
1029
|
-
|
1030
|
-
|
1031
|
-
|
1032
|
-
|
1033
|
-
|
1034
|
-
|
1035
|
-
|
1036
|
-
|
1077
|
+
classes = np.arange(y_train.shape[1])
|
1078
|
+
class_count = len(classes)
|
1079
|
+
|
1080
|
+
try:
|
1081
|
+
ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[
|
1082
|
+
0] for i in range(class_count)}
|
1083
|
+
classes = [len(ClassIndices[i]) for i in range(class_count)]
|
1084
|
+
|
1085
|
+
if len(set(classes)) == 1:
|
1086
|
+
print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
|
1087
|
+
return x_train, y_train
|
1088
|
+
|
1089
|
+
MinCount = min(classes)
|
1090
|
+
|
1037
1091
|
BalancedIndices = []
|
1038
|
-
for i in range(
|
1092
|
+
for i in range(class_count):
|
1039
1093
|
if len(ClassIndices[i]) > MinCount:
|
1040
|
-
SelectedIndices = np.random.choice(
|
1094
|
+
SelectedIndices = np.random.choice(
|
1095
|
+
ClassIndices[i], MinCount, replace=False)
|
1041
1096
|
else:
|
1042
1097
|
SelectedIndices = ClassIndices[i]
|
1043
1098
|
BalancedIndices.extend(SelectedIndices)
|
1099
|
+
|
1100
|
+
BalancedInputs = [x_train[idx] for idx in BalancedIndices]
|
1101
|
+
BalancedLabels = [y_train[idx] for idx in BalancedIndices]
|
1102
|
+
|
1103
|
+
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)
|
1104
|
+
) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
|
1105
|
+
except:
|
1106
|
+
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
|
1107
|
+
return 'e'
|
1108
|
+
|
1109
|
+
return BalancedInputs, BalancedLabels
|
1110
|
+
|
1111
|
+
|
1112
|
+
def synthetic_augmentation(x_train, y_train):
|
1113
|
+
"""
|
1114
|
+
Generates synthetic examples to balance classes with fewer examples.
|
1115
|
+
|
1116
|
+
Arguments:
|
1117
|
+
x -- Input dataset (examples) - list format
|
1118
|
+
y -- Class labels (one-hot encoded) - list format
|
1119
|
+
|
1120
|
+
Returns:
|
1121
|
+
x_balanced -- Balanced input dataset (list format)
|
1122
|
+
y_balanced -- Balanced class labels (one-hot encoded, list format)
|
1123
|
+
"""
|
1124
|
+
x = x_train
|
1125
|
+
y = y_train
|
1126
|
+
classes = np.arange(y_train.shape[1])
|
1127
|
+
class_count = len(classes)
|
1128
|
+
|
1129
|
+
# Calculate class distribution
|
1130
|
+
class_distribution = {i: 0 for i in range(class_count)}
|
1131
|
+
for label in y:
|
1132
|
+
class_distribution[np.argmax(label)] += 1
|
1133
|
+
|
1134
|
+
max_class_count = max(class_distribution.values())
|
1135
|
+
|
1136
|
+
x_balanced = list(x)
|
1137
|
+
y_balanced = list(y)
|
1138
|
+
|
1139
|
+
for class_label in range(class_count):
|
1140
|
+
class_indices = [i for i, label in enumerate(
|
1141
|
+
y) if np.argmax(label) == class_label]
|
1142
|
+
num_samples = len(class_indices)
|
1143
|
+
|
1144
|
+
if num_samples < max_class_count:
|
1145
|
+
while num_samples < max_class_count:
|
1146
|
+
|
1147
|
+
random_indices = np.random.choice(
|
1148
|
+
class_indices, 2, replace=False)
|
1149
|
+
sample1 = x[random_indices[0]]
|
1150
|
+
sample2 = x[random_indices[1]]
|
1151
|
+
|
1152
|
+
synthetic_sample = sample1 + \
|
1153
|
+
(np.array(sample2) - np.array(sample1)) * np.random.rand()
|
1154
|
+
|
1155
|
+
x_balanced.append(synthetic_sample.tolist())
|
1156
|
+
y_balanced.append(y[class_indices[0]])
|
1157
|
+
|
1158
|
+
num_samples += 1
|
1159
|
+
|
1160
|
+
return np.array(x_balanced), np.array(y_balanced)
|
1161
|
+
|
1162
|
+
|
1163
|
+
def standard_scaler(x_train, x_test, scaler_params=None):
|
1164
|
+
info_standard_scaler = """
|
1165
|
+
Standardizes training and test datasets. x_test may be None.
|
1166
|
+
|
1167
|
+
Args:
|
1168
|
+
train_data: numpy.ndarray
|
1169
|
+
Training data
|
1170
|
+
test_data: numpy.ndarray
|
1171
|
+
Test data
|
1172
|
+
|
1173
|
+
Returns:
|
1174
|
+
list:
|
1175
|
+
Scaler parameters: mean and std
|
1176
|
+
tuple
|
1177
|
+
Standardized training and test datasets
|
1178
|
+
"""
|
1179
|
+
try:
|
1180
|
+
|
1181
|
+
if scaler_params == None and x_test != None:
|
1182
|
+
|
1183
|
+
mean = np.mean(x_train, axis=0)
|
1184
|
+
std = np.std(x_train, axis=0)
|
1185
|
+
train_data_scaled = (x_train - mean) / std
|
1186
|
+
test_data_scaled = (x_test - mean) / std
|
1187
|
+
|
1188
|
+
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1189
|
+
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1190
|
+
|
1191
|
+
scaler_params = [mean, std]
|
1192
|
+
|
1193
|
+
return scaler_params, train_data_scaled, test_data_scaled
|
1044
1194
|
|
1045
|
-
|
1046
|
-
|
1195
|
+
if scaler_params == None and x_test == None:
|
1196
|
+
|
1197
|
+
mean = np.mean(x_train, axis=0)
|
1198
|
+
std = np.std(x_train, axis=0)
|
1199
|
+
train_data_scaled = (x_train - mean) / std
|
1200
|
+
|
1201
|
+
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1202
|
+
|
1203
|
+
scaler_params = [mean, std]
|
1204
|
+
|
1205
|
+
return scaler_params, train_data_scaled
|
1206
|
+
|
1207
|
+
if scaler_params != None:
|
1208
|
+
|
1209
|
+
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
1210
|
+
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1211
|
+
|
1212
|
+
return test_data_scaled
|
1213
|
+
|
1214
|
+
except:
|
1215
|
+
print(
|
1216
|
+
Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
|
1217
|
+
|
1218
|
+
|
1219
|
+
def encode_one_hot(y_train, y_test):
|
1220
|
+
info_one_hot_encode = """
|
1221
|
+
Performs one-hot encoding on y_train and y_test data..
|
1222
|
+
|
1223
|
+
Args:
|
1224
|
+
y_train (numpy.ndarray): Eğitim etiketi verisi.
|
1225
|
+
y_test (numpy.ndarray): Test etiketi verisi.
|
1226
|
+
|
1227
|
+
Returns:
|
1228
|
+
tuple: One-hot encoded y_train ve y_test verileri.
|
1229
|
+
"""
|
1230
|
+
try:
|
1231
|
+
classes = np.unique(y_train)
|
1232
|
+
class_count = len(classes)
|
1233
|
+
|
1234
|
+
class_to_index = {cls: idx for idx, cls in enumerate(classes)}
|
1235
|
+
|
1236
|
+
y_train_encoded = np.zeros((y_train.shape[0], class_count))
|
1237
|
+
for i, label in enumerate(y_train):
|
1238
|
+
y_train_encoded[i, class_to_index[label]] = 1
|
1239
|
+
|
1240
|
+
y_test_encoded = np.zeros((y_test.shape[0], class_count))
|
1241
|
+
for i, label in enumerate(y_test):
|
1242
|
+
y_test_encoded[i, class_to_index[label]] = 1
|
1243
|
+
except:
|
1244
|
+
print(Fore.RED + 'ERROR: y_train and y_test must be numpy array. from: one_hot_encode' + info_one_hot_encode)
|
1245
|
+
|
1246
|
+
return y_train_encoded, y_test_encoded
|
1247
|
+
|
1248
|
+
|
1249
|
+
def split(X, y, test_size, random_state):
|
1250
|
+
"""
|
1251
|
+
Splits the given X (features) and y (labels) data into training and testing subsets.
|
1252
|
+
|
1253
|
+
Args:
|
1254
|
+
X (numpy.ndarray): Features data.
|
1255
|
+
y (numpy.ndarray): Labels data.
|
1256
|
+
test_size (float or int): Proportion or number of samples for the test subset.
|
1257
|
+
random_state (int or None): Seed for random state.
|
1258
|
+
|
1259
|
+
Returns:
|
1260
|
+
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
1261
|
+
"""
|
1262
|
+
# Size of the dataset
|
1263
|
+
num_samples = X.shape[0]
|
1264
|
+
|
1265
|
+
if isinstance(test_size, float):
|
1266
|
+
test_size = int(test_size * num_samples)
|
1267
|
+
elif isinstance(test_size, int):
|
1268
|
+
if test_size > num_samples:
|
1269
|
+
raise ValueError(
|
1270
|
+
"test_size cannot be larger than the number of samples.")
|
1271
|
+
else:
|
1272
|
+
raise ValueError("test_size should be float or int.")
|
1273
|
+
|
1274
|
+
if random_state is not None:
|
1275
|
+
np.random.seed(random_state)
|
1276
|
+
|
1277
|
+
indices = np.arange(num_samples)
|
1278
|
+
np.random.shuffle(indices)
|
1279
|
+
|
1280
|
+
test_indices = indices[:test_size]
|
1281
|
+
train_indices = indices[test_size:]
|
1282
|
+
|
1283
|
+
x_train, x_test = X[train_indices], X[test_indices]
|
1284
|
+
y_train, y_test = y[train_indices], y[test_indices]
|
1285
|
+
|
1286
|
+
return x_train, x_test, y_train, y_test
|
1287
|
+
|
1288
|
+
|
1289
|
+
def metrics(y_ts, test_preds, average='weighted'):
|
1290
|
+
"""
|
1291
|
+
Calculates precision, recall and F1 score for a classification task.
|
1292
|
+
|
1293
|
+
Args:
|
1294
|
+
y_ts (list or numpy.ndarray): True labels.
|
1295
|
+
test_preds (list or numpy.ndarray): Predicted labels.
|
1296
|
+
average (str): Type of averaging ('micro', 'macro', 'weighted').
|
1297
|
+
|
1298
|
+
Returns:
|
1299
|
+
tuple: Precision, recall, F1 score.
|
1300
|
+
"""
|
1301
|
+
y_test_d = decode_one_hot(y_ts)
|
1302
|
+
y_test_d = np.array(y_test_d)
|
1303
|
+
y_pred = np.array(test_preds)
|
1304
|
+
|
1305
|
+
if y_test_d.ndim > 1:
|
1306
|
+
y_test_d = y_test_d.reshape(-1)
|
1307
|
+
if y_pred.ndim > 1:
|
1308
|
+
y_pred = y_pred.reshape(-1)
|
1309
|
+
|
1310
|
+
tp = {}
|
1311
|
+
fp = {}
|
1312
|
+
fn = {}
|
1313
|
+
|
1314
|
+
classes = np.unique(np.concatenate((y_test_d, y_pred)))
|
1315
|
+
|
1316
|
+
for c in classes:
|
1317
|
+
tp[c] = 0
|
1318
|
+
fp[c] = 0
|
1319
|
+
fn[c] = 0
|
1320
|
+
|
1321
|
+
for c in classes:
|
1322
|
+
for true, pred in zip(y_test_d, y_pred):
|
1323
|
+
if true == c and pred == c:
|
1324
|
+
tp[c] += 1
|
1325
|
+
elif true != c and pred == c:
|
1326
|
+
fp[c] += 1
|
1327
|
+
elif true == c and pred != c:
|
1328
|
+
fn[c] += 1
|
1329
|
+
|
1330
|
+
precision = {}
|
1331
|
+
recall = {}
|
1332
|
+
f1 = {}
|
1333
|
+
|
1334
|
+
for c in classes:
|
1335
|
+
precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
|
1336
|
+
recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
|
1337
|
+
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
|
1338
|
+
|
1339
|
+
if average == 'micro':
|
1340
|
+
precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
|
1341
|
+
recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
|
1342
|
+
f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
|
1343
|
+
|
1344
|
+
elif average == 'macro':
|
1345
|
+
precision_val = np.mean(list(precision.values()))
|
1346
|
+
recall_val = np.mean(list(recall.values()))
|
1347
|
+
f1_val = np.mean(list(f1.values()))
|
1348
|
+
|
1349
|
+
elif average == 'weighted':
|
1350
|
+
weights = np.array([np.sum(y_test_d == c) for c in classes])
|
1351
|
+
weights = weights / np.sum(weights)
|
1352
|
+
precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
|
1353
|
+
recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
|
1354
|
+
f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
|
1355
|
+
|
1356
|
+
else:
|
1357
|
+
raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
|
1358
|
+
|
1359
|
+
return precision_val, recall_val, f1_val
|
1360
|
+
|
1361
|
+
|
1362
|
+
def decode_one_hot(encoded_data):
|
1363
|
+
"""
|
1364
|
+
Decodes one-hot encoded data to original categorical labels.
|
1365
|
+
|
1366
|
+
Args:
|
1367
|
+
encoded_data (numpy.ndarray): One-hot encoded data with shape (n_samples, n_classes).
|
1368
|
+
|
1369
|
+
Returns:
|
1370
|
+
numpy.ndarray: Decoded categorical labels with shape (n_samples,).
|
1371
|
+
"""
|
1372
|
+
|
1373
|
+
decoded_labels = np.argmax(encoded_data, axis=1)
|
1374
|
+
|
1375
|
+
return decoded_labels
|
1376
|
+
|
1377
|
+
|
1378
|
+
def roc_curve(y_true, y_score):
|
1379
|
+
"""
|
1380
|
+
Compute Receiver Operating Characteristic (ROC) curve.
|
1381
|
+
|
1382
|
+
Parameters:
|
1383
|
+
y_true : array, shape = [n_samples]
|
1384
|
+
True binary labels in range {0, 1} or {-1, 1}.
|
1385
|
+
y_score : array, shape = [n_samples]
|
1386
|
+
Target scores, can either be probability estimates of the positive class,
|
1387
|
+
confidence values, or non-thresholded measure of decisions (as returned
|
1388
|
+
by decision_function on some classifiers).
|
1389
|
+
|
1390
|
+
Returns:
|
1391
|
+
fpr : array, shape = [n]
|
1392
|
+
Increasing false positive rates such that element i is the false positive rate
|
1393
|
+
of predictions with score >= thresholds[i].
|
1394
|
+
tpr : array, shape = [n]
|
1395
|
+
Increasing true positive rates such that element i is the true positive rate
|
1396
|
+
of predictions with score >= thresholds[i].
|
1397
|
+
thresholds : array, shape = [n]
|
1398
|
+
Decreasing thresholds on the decision function used to compute fpr and tpr.
|
1399
|
+
"""
|
1400
|
+
|
1401
|
+
y_true = np.asarray(y_true)
|
1402
|
+
y_score = np.asarray(y_score)
|
1403
|
+
|
1404
|
+
if len(np.unique(y_true)) != 2:
|
1405
|
+
raise ValueError("Only binary classification is supported.")
|
1406
|
+
|
1407
|
+
|
1408
|
+
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
|
1409
|
+
y_score = y_score[desc_score_indices]
|
1410
|
+
y_true = y_true[desc_score_indices]
|
1411
|
+
|
1412
|
+
|
1413
|
+
fpr = []
|
1414
|
+
tpr = []
|
1415
|
+
thresholds = []
|
1416
|
+
n_pos = np.sum(y_true)
|
1417
|
+
n_neg = len(y_true) - n_pos
|
1418
|
+
|
1419
|
+
tp = 0
|
1420
|
+
fp = 0
|
1421
|
+
prev_score = None
|
1422
|
+
|
1423
|
+
|
1424
|
+
for i, score in enumerate(y_score):
|
1425
|
+
if score != prev_score:
|
1426
|
+
fpr.append(fp / n_neg)
|
1427
|
+
tpr.append(tp / n_pos)
|
1428
|
+
thresholds.append(score)
|
1429
|
+
prev_score = score
|
1430
|
+
|
1431
|
+
if y_true[i] == 1:
|
1432
|
+
tp += 1
|
1433
|
+
else:
|
1434
|
+
fp += 1
|
1435
|
+
|
1436
|
+
fpr.append(fp / n_neg)
|
1437
|
+
tpr.append(tp / n_pos)
|
1438
|
+
thresholds.append(score)
|
1439
|
+
|
1440
|
+
return np.array(fpr), np.array(tpr), np.array(thresholds)
|
1441
|
+
|
1442
|
+
|
1443
|
+
def confusion_matrix(y_true, y_pred, class_count):
|
1444
|
+
"""
|
1445
|
+
Computes confusion matrix.
|
1446
|
+
|
1447
|
+
Args:
|
1448
|
+
y_true (numpy.ndarray): True class labels (1D array).
|
1449
|
+
y_pred (numpy.ndarray): Predicted class labels (1D array).
|
1450
|
+
num_classes (int): Number of classes.
|
1451
|
+
|
1452
|
+
Returns:
|
1453
|
+
numpy.ndarray: Confusion matrix of shape (num_classes, num_classes).
|
1454
|
+
"""
|
1455
|
+
confusion = np.zeros((class_count, class_count), dtype=int)
|
1456
|
+
|
1457
|
+
for i in range(len(y_true)):
|
1458
|
+
true_label = y_true[i]
|
1459
|
+
pred_label = y_pred[i]
|
1460
|
+
confusion[true_label, pred_label] += 1
|
1461
|
+
|
1462
|
+
return confusion
|
1463
|
+
|
1464
|
+
|
1465
|
+
def plot_evaluate(y_test, y_preds, acc_list):
|
1466
|
+
|
1467
|
+
acc = acc_list[len(acc_list) - 1]
|
1468
|
+
y_true = decode_one_hot(y_test)
|
1469
|
+
|
1470
|
+
y_true = np.array(y_true)
|
1471
|
+
y_preds = np.array(y_preds)
|
1472
|
+
Class = np.unique(decode_one_hot(y_test))
|
1473
|
+
|
1474
|
+
precision, recall, f1 = metrics(y_test, y_preds)
|
1475
|
+
|
1476
|
+
|
1477
|
+
# Confusion matrix
|
1478
|
+
cm = confusion_matrix(y_true, y_preds, len(Class))
|
1479
|
+
# Subplot içinde düzenleme
|
1480
|
+
fig, axs = plt.subplots(2, 2, figsize=(16, 12))
|
1481
|
+
|
1482
|
+
# Confusion Matrix
|
1483
|
+
sns.heatmap(cm, annot=True, fmt='d', ax=axs[0, 0])
|
1484
|
+
axs[0, 0].set_title("Confusion Matrix")
|
1485
|
+
axs[0, 0].set_xlabel("Predicted Class")
|
1486
|
+
axs[0, 0].set_ylabel("Actual Class")
|
1487
|
+
|
1488
|
+
if len(Class) == 2:
|
1489
|
+
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
1490
|
+
# ROC Curve
|
1491
|
+
roc_auc = np.trapz(tpr, fpr)
|
1492
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1493
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1494
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1495
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1496
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1497
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1498
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1499
|
+
axs[1, 0].legend(loc="lower right")
|
1500
|
+
axs[1, 0].legend(loc="lower right")
|
1501
|
+
else:
|
1502
|
+
|
1503
|
+
for i in range(len(Class)):
|
1504
|
+
|
1505
|
+
y_true_copy = np.copy(y_true)
|
1506
|
+
y_preds_copy = np.copy(y_preds)
|
1507
|
+
|
1508
|
+
y_true_copy[y_true_copy == i] = 0
|
1509
|
+
y_true_copy[y_true_copy != 0] = 1
|
1510
|
+
|
1511
|
+
y_preds_copy[y_preds_copy == i] = 0
|
1512
|
+
y_preds_copy[y_preds_copy != 0] = 1
|
1513
|
+
|
1514
|
+
|
1515
|
+
fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
|
1516
|
+
|
1517
|
+
roc_auc = np.trapz(tpr, fpr)
|
1518
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1519
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1520
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1521
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1522
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1523
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1524
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1525
|
+
axs[1, 0].legend(loc="lower right")
|
1526
|
+
axs[1, 0].legend(loc="lower right")
|
1047
1527
|
|
1048
|
-
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(TrainInputs)) + " to: " + str(len(BalancedInputs)) + ". from: AutoBalancer " + Style.RESET_ALL)
|
1049
|
-
time.sleep(1.5)
|
1050
|
-
except:
|
1051
|
-
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoAutoBalancer)
|
1052
|
-
return 'e'
|
1053
1528
|
|
1054
|
-
|
1529
|
+
"""
|
1530
|
+
accuracy_per_class = []
|
1531
|
+
|
1532
|
+
for cls in Class:
|
1533
|
+
correct = np.sum((y_true == cls) & (y_preds == cls))
|
1534
|
+
total = np.sum(y_true == cls)
|
1535
|
+
accuracy_cls = correct / total if total > 0 else 0.0
|
1536
|
+
accuracy_per_class.append(accuracy_cls)
|
1537
|
+
|
1538
|
+
axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
|
1539
|
+
axs[2, 0].set_xlabel('Class')
|
1540
|
+
axs[2, 0].set_ylabel('Accuracy')
|
1541
|
+
axs[2, 0].set_title('Class-wise Accuracy')
|
1542
|
+
axs[2, 0].set_xticks(Class)
|
1543
|
+
axs[2, 0].grid(True)
|
1544
|
+
"""
|
1545
|
+
|
1546
|
+
|
1547
|
+
|
1548
|
+
|
1549
|
+
# Precision, Recall, F1 Score, Accuracy
|
1550
|
+
metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
|
1551
|
+
values = [precision, recall, f1, acc]
|
1552
|
+
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
|
1553
|
+
|
1554
|
+
#
|
1555
|
+
bars = axs[0, 1].bar(metric, values, color=colors)
|
1556
|
+
|
1557
|
+
|
1558
|
+
for bar, value in zip(bars, values):
|
1559
|
+
axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
|
1560
|
+
ha='center', va='bottom', fontsize=12, color='white', weight='bold')
|
1561
|
+
|
1562
|
+
axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
|
1563
|
+
axs[0, 1].set_xlabel('Metrics')
|
1564
|
+
axs[0, 1].set_ylabel('Score')
|
1565
|
+
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Weighted)')
|
1566
|
+
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
1567
|
+
|
1568
|
+
# Accuracy
|
1569
|
+
plt.plot(acc_list, marker='o', linestyle='-',
|
1570
|
+
color='r', label='Accuracy')
|
1571
|
+
|
1572
|
+
|
1573
|
+
plt.axhline(y=1, color='g', linestyle='--', label='Maximum Accuracy')
|
1574
|
+
|
1575
|
+
|
1576
|
+
plt.xlabel('Samples')
|
1577
|
+
plt.ylabel('Accuracy')
|
1578
|
+
plt.title('Accuracy History')
|
1579
|
+
plt.legend()
|
1580
|
+
|
1581
|
+
|
1582
|
+
plt.tight_layout()
|
1583
|
+
plt.show()
|
1584
|
+
|
1585
|
+
def manuel_balancer(x_train, y_train, target_samples_per_class):
|
1586
|
+
"""
|
1587
|
+
Generates synthetic examples to balance classes to the specified number of examples per class.
|
1588
|
+
|
1589
|
+
Arguments:
|
1590
|
+
x_train -- Input dataset (examples) - NumPy array format
|
1591
|
+
y_train -- Class labels (one-hot encoded) - NumPy array format
|
1592
|
+
target_samples_per_class -- Desired number of samples per class
|
1593
|
+
|
1594
|
+
Returns:
|
1595
|
+
x_balanced -- Balanced input dataset (NumPy array format)
|
1596
|
+
y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
|
1597
|
+
"""
|
1598
|
+
start_time = time.time()
|
1599
|
+
x_train = np.array(x_train)
|
1600
|
+
y_train = np.array(y_train)
|
1601
|
+
classes = np.arange(y_train.shape[1])
|
1602
|
+
class_count = len(classes)
|
1603
|
+
|
1604
|
+
x_balanced = []
|
1605
|
+
y_balanced = []
|
1606
|
+
|
1607
|
+
for class_label in range(class_count):
|
1608
|
+
class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
|
1609
|
+
num_samples = len(class_indices)
|
1610
|
+
|
1611
|
+
if num_samples > target_samples_per_class:
|
1612
|
+
|
1613
|
+
selected_indices = np.random.choice(class_indices, target_samples_per_class, replace=False)
|
1614
|
+
x_balanced.append(x_train[selected_indices])
|
1615
|
+
y_balanced.append(y_train[selected_indices])
|
1616
|
+
|
1617
|
+
else:
|
1618
|
+
|
1619
|
+
x_balanced.append(x_train[class_indices])
|
1620
|
+
y_balanced.append(y_train[class_indices])
|
1621
|
+
|
1622
|
+
if num_samples < target_samples_per_class:
|
1623
|
+
|
1624
|
+
samples_to_add = target_samples_per_class - num_samples
|
1625
|
+
additional_samples = np.zeros((samples_to_add, x_train.shape[1]))
|
1626
|
+
additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
|
1627
|
+
|
1628
|
+
for i in range(samples_to_add):
|
1629
|
+
uni_start_time = time.time()
|
1630
|
+
|
1631
|
+
random_indices = np.random.choice(class_indices, 2, replace=False)
|
1632
|
+
sample1 = x_train[random_indices[0]]
|
1633
|
+
sample2 = x_train[random_indices[1]]
|
1634
|
+
|
1635
|
+
|
1636
|
+
synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
|
1637
|
+
|
1638
|
+
additional_samples[i] = synthetic_sample
|
1639
|
+
additional_labels[i] = y_train[class_indices[0]]
|
1640
|
+
|
1641
|
+
uni_end_time = time.time()
|
1642
|
+
calculating_est = round(
|
1643
|
+
(uni_end_time - uni_start_time) * (samples_to_add - i), 3)
|
1644
|
+
|
1645
|
+
if calculating_est < 60:
|
1646
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
1647
|
+
|
1648
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1649
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
1650
|
+
|
1651
|
+
elif calculating_est > 3600:
|
1652
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
1653
|
+
|
1654
|
+
print('Augmenting: ', class_label, '/', class_count, '...', i, "/", samples_to_add, "\n", end="")
|
1655
|
+
|
1656
|
+
x_balanced.append(additional_samples)
|
1657
|
+
y_balanced.append(additional_labels)
|
1658
|
+
|
1659
|
+
|
1660
|
+
EndTime = time.time()
|
1661
|
+
|
1662
|
+
calculating_est = round(EndTime - start_time, 2)
|
1663
|
+
|
1664
|
+
print(Fore.GREEN + " \nBalancing Finished with 0 ERROR\n" + Style.RESET_ALL)
|
1665
|
+
|
1666
|
+
if calculating_est < 60:
|
1667
|
+
print('Total balancing time(sec): ', calculating_est)
|
1668
|
+
|
1669
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1670
|
+
print('Total balancing time(min): ', calculating_est/60)
|
1671
|
+
|
1672
|
+
elif calculating_est > 3600:
|
1673
|
+
print('Total balancing time(h): ', calculating_est/3600)
|
1674
|
+
|
1675
|
+
# Stack the balanced arrays
|
1676
|
+
x_balanced = np.vstack(x_balanced)
|
1677
|
+
y_balanced = np.vstack(y_balanced)
|
1678
|
+
|
1679
|
+
return x_balanced, y_balanced
|
1680
|
+
|
1681
|
+
def get_weights():
|
1682
|
+
|
1683
|
+
return 0
|
1684
|
+
|
1685
|
+
|
1686
|
+
def get_df():
|
1687
|
+
|
1688
|
+
return 2
|
1689
|
+
|
1690
|
+
|
1691
|
+
def get_preds():
|
1692
|
+
|
1693
|
+
return 1
|
1694
|
+
|
1695
|
+
|
1696
|
+
def get_acc():
|
1697
|
+
|
1698
|
+
return 2
|