pyerualjetwork 1.2.9__py3-none-any.whl → 2.5.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan/__init__.py +1 -1
- plan/plan.py +1420 -817
- pyerualjetwork-2.5.8.dist-info/METADATA +8 -0
- pyerualjetwork-2.5.8.dist-info/RECORD +6 -0
- pyerualjetwork-1.2.9.dist-info/METADATA +0 -17
- pyerualjetwork-1.2.9.dist-info/RECORD +0 -6
- {pyerualjetwork-1.2.9.dist-info → pyerualjetwork-2.5.8.dist-info}/WHEEL +0 -0
- {pyerualjetwork-1.2.9.dist-info → pyerualjetwork-2.5.8.dist-info}/top_level.txt +0 -0
plan/plan.py
CHANGED
@@ -1,6 +1,34 @@
|
|
1
|
+
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
"""
|
4
|
+
Created on Tue Jun 18 23:32:16 2024
|
5
|
+
|
6
|
+
@author: hasan
|
7
|
+
"""
|
8
|
+
|
9
|
+
# optic
|
10
|
+
|
11
|
+
# -*- coding: utf-8 -*-
|
12
|
+
"""
|
13
|
+
Created on Fri Jun 21 05:21:35 2024
|
14
|
+
|
15
|
+
@author: hasan
|
16
|
+
"""
|
17
|
+
|
18
|
+
# -*- coding: utf-8 -*-
|
19
|
+
|
20
|
+
|
21
|
+
|
22
|
+
"""
|
23
|
+
Created on Thu Jun 12 00:00:00 2024
|
24
|
+
|
25
|
+
@author: hasan can beydili
|
26
|
+
"""
|
27
|
+
|
28
|
+
import pandas as pd
|
1
29
|
import numpy as np
|
2
30
|
import time
|
3
|
-
from colorama import Fore,Style
|
31
|
+
from colorama import Fore, Style
|
4
32
|
from typing import List, Union
|
5
33
|
import math
|
6
34
|
from scipy.special import expit, softmax
|
@@ -8,1088 +36,1663 @@ import matplotlib.pyplot as plt
|
|
8
36
|
import seaborn as sns
|
9
37
|
|
10
38
|
# BUILD -----
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
39
|
+
|
40
|
+
|
41
|
+
def fit(
|
42
|
+
x_train: List[Union[int, float]],
|
43
|
+
y_train: List[Union[int, float]], # At least two.. and one hot encoded
|
44
|
+
show_training,
|
45
|
+
show_count= None,
|
46
|
+
val_count= None,
|
47
|
+
val= None,
|
48
|
+
x_val= None,
|
49
|
+
y_val= None,
|
50
|
+
activation_potential=None # (float): Input activation potential (optional)
|
22
51
|
) -> str:
|
23
|
-
|
52
|
+
|
24
53
|
infoPLAN = """
|
25
54
|
Creates and configures a PLAN model.
|
26
55
|
|
27
56
|
Args:
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
Activations (list[str]): List of activation functions.
|
37
|
-
Visualize (str): Visualize Training procces or not visualize ('y' or 'n')
|
38
|
-
|
57
|
+
x_train (list[num]): List of input data.
|
58
|
+
y_train (list[num]): List of target labels. (one hot encoded)
|
59
|
+
show_training (bool, str): True, None or'final'
|
60
|
+
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
61
|
+
val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
|
62
|
+
x_val (list[num]): List of validation data. (optional) Default: x_train
|
63
|
+
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
|
64
|
+
activation_potential (float): Input activation potential (for binary injection) (optional) in range: -1, 1
|
39
65
|
Returns:
|
40
|
-
list([num]): (Weight matrices list,
|
66
|
+
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
41
67
|
error handled ?: Process status ('e')
|
42
68
|
"""
|
43
|
-
|
44
|
-
if Visualize != 'y' and Visualize != 'n':
|
45
|
-
print(Fore.RED + "ERROR109: Visualize parameter must be 'y' or 'n'. TrainPLAN",infoPLAN)
|
46
|
-
return 'e'
|
47
|
-
|
48
|
-
|
49
|
-
LastNeuron = Neurons[-1:][0]
|
50
|
-
if LastNeuron != ClassCount:
|
51
|
-
print(Fore.RED + "ERROR108: Last layer of neuron count must be equal class count. from: TrainPLAN",infoPLAN)
|
52
|
-
return 'e'
|
53
|
-
|
54
|
-
if len(Normalizations) != len(MembranPotentials):
|
55
|
-
|
56
|
-
print(Fore.RED + "ERROR307: Normalization list length must be equal to length of MembranThresholds List,MembranPotentials List,Layers List,Neurons List. from: TrainPLAN",infoPLAN)
|
57
|
-
return 'e'
|
58
|
-
|
59
|
-
if len(TrainInputs) != len(TrainLabels):
|
60
|
-
print(Fore.RED + "ERROR301: TrainInputs list and TrainLabels list must be same length.",infoPLAN)
|
61
|
-
return 'e'
|
62
|
-
|
63
|
-
for i, Value in enumerate(MembranPotentials):
|
64
|
-
|
65
|
-
if Normalizations[i] != 'y' and Normalizations[i] != 'n':
|
66
|
-
print(Fore.RED + "ERROR105: Normalization list must be 'y' or 'n'.",infoPLAN)
|
67
|
-
return 'e'
|
68
|
-
|
69
|
-
if MembranThresholds[i] == 'none':
|
70
|
-
print(Fore.MAGENTA + "WARNING102: We are advise to do not put 'none' Threshold sign. But some cases improves performance of the model from: TrainPLAN",infoPLAN + Style.RESET_ALL)
|
71
|
-
time.sleep(3)
|
72
|
-
|
73
|
-
if isinstance(Value, str):
|
74
|
-
print(Fore.RED + "ERROR201: MEMBRAN POTENTIALS must be numeric. from: TrainPLAN")
|
75
|
-
return 'e'
|
76
|
-
|
77
|
-
if isinstance(Neurons[i], str):
|
78
|
-
print(Fore.RED + "ERROR202: Neurons list must be numeric.")
|
79
|
-
return 'e'
|
80
|
-
|
81
|
-
if len(MembranThresholds) != len(MembranPotentials):
|
82
|
-
print(Fore.RED + "ERROR302: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
|
83
|
-
return 'e'
|
84
|
-
|
85
|
-
if len(Layers) != len(Neurons):
|
86
|
-
print(Fore.RED + "ERROR303: Layers list and Neurons list must same length. from: TrainPLAN",infoPLAN)
|
87
|
-
return 'e'
|
88
|
-
|
89
|
-
if len(MembranPotentials) != len(Layers) or len(MembranThresholds) != len(Layers):
|
90
|
-
print(Fore.RED + "ERROR306: MEMBRAN POTENTIALS and MEMBRAN THRESHOLDS lists length must be same Layers list length. from: TrainPLAN",infoPLAN)
|
91
|
-
return 'e'
|
92
|
-
|
93
|
-
|
94
|
-
for Activation in Activations:
|
95
|
-
if Activation != 'softmax' and Activation != 'sigmoid' and Activation != 'relu' and Activation != 'none':
|
96
|
-
print(Fore.RED + "ERROR108: Activations list must be 'sigmoid' or 'softmax' or 'relu' or 'none' from: TrainPLAN",infoPLAN)
|
97
|
-
return 'e'
|
98
|
-
|
69
|
+
|
99
70
|
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
return 'e'
|
104
|
-
|
105
|
-
if index + 1 != len(Neurons) and Neuron % 2 != 0:
|
106
|
-
print(Fore.MAGENTA + "WARNING101: We strongly advise to do Neuron counts be should even numbers. from: TrainPLAN",infoPLAN)
|
107
|
-
time.sleep(3)
|
108
|
-
|
109
|
-
if Neuron < ClassCount:
|
110
|
-
print(Fore.RED + "ERROR102: Neuron count must be greater than class count(For PLAN). from: TrainPLAN")
|
111
|
-
return 'e'
|
112
|
-
|
113
|
-
if Layers[index] != 'fex' and Layers[index] != 'cat':
|
114
|
-
print(Fore.RED + "ERROR107: Layers list must be 'fex'(Feature Extraction Layer) or 'cat' (Catalyser Layer). from: TrainPLAN",infoPLAN)
|
115
|
-
return 'e'
|
116
|
-
|
117
|
-
if len(MembranThresholds) != len(MembranPotentials):
|
118
|
-
print(Fore.RED + "ERROR305: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
|
71
|
+
if len(x_train) != len(y_train):
|
72
|
+
|
73
|
+
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
|
119
74
|
return 'e'
|
120
75
|
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
76
|
+
if x_val == None and y_val == None:
|
77
|
+
|
78
|
+
x_val = x_train
|
79
|
+
y_val = y_train
|
80
|
+
|
81
|
+
if val == True and val_count == None:
|
82
|
+
|
83
|
+
val_count = 0.1
|
84
|
+
val_count_copy = val_count
|
85
|
+
|
86
|
+
if val == True:
|
126
87
|
|
127
|
-
|
128
|
-
|
129
|
-
|
88
|
+
val_count = int(len(x_train) * val_count)
|
89
|
+
|
90
|
+
val_count_copy = val_count
|
91
|
+
|
92
|
+
if show_training == True or show_training == 'final' and show_count == None:
|
130
93
|
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
UniqueTrainLabels = list(UniqueTrainLabels)
|
138
|
-
|
139
|
-
TrainLabels = [tuple(sublist) for sublist in TrainLabels]
|
140
|
-
|
141
|
-
|
142
|
-
if len(UniqueTrainLabels) != ClassCount:
|
143
|
-
print(Fore.RED + "ERROR106: Label variety length must be same Class Count. from: TrainPLAN",infoPLAN)
|
144
|
-
return 'e'
|
145
|
-
|
146
|
-
TrainInputs[0] = np.array(TrainInputs[0])
|
147
|
-
TrainInputs[0] = TrainInputs[0].ravel()
|
148
|
-
TrainInputsize = len(TrainInputs[0])
|
94
|
+
show_count = 10
|
95
|
+
|
96
|
+
if show_training == True or show_training == 'final':
|
97
|
+
|
98
|
+
row, col = shape_control(x_train)
|
149
99
|
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
100
|
+
class_count = set()
|
101
|
+
|
102
|
+
for sublist in y_train:
|
103
|
+
|
104
|
+
class_count.add(tuple(sublist))
|
105
|
+
|
106
|
+
class_count = list(class_count)
|
107
|
+
|
108
|
+
y_train = [tuple(sublist) for sublist in y_train]
|
109
|
+
|
110
|
+
neurons = [len(class_count), len(class_count)]
|
111
|
+
layers = ['fex']
|
112
|
+
val_list = [None]
|
113
|
+
|
114
|
+
x_train[0] = np.array(x_train[0])
|
115
|
+
x_train[0] = x_train[0].ravel()
|
116
|
+
x_train_size = len(x_train[0])
|
117
|
+
|
118
|
+
W = weight_identification(
|
119
|
+
len(layers) - 1, len(class_count), neurons, x_train_size)
|
120
|
+
|
121
|
+
trained_W = [1] * len(W)
|
122
|
+
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
|
123
|
+
start_time = time.time()
|
124
|
+
y = decode_one_hot(y_train)
|
125
|
+
|
126
|
+
for index, inp in enumerate(x_train):
|
127
|
+
|
128
|
+
progress = index / len(x_train) * 100
|
129
|
+
|
130
|
+
uni_start_time = time.time()
|
159
131
|
inp = np.array(inp)
|
160
132
|
inp = inp.ravel()
|
161
133
|
|
162
|
-
if
|
163
|
-
print(Fore.RED +"ERROR304: All input matrices or vectors in
|
134
|
+
if x_train_size != len(inp):
|
135
|
+
print(Fore.RED + "ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",
|
136
|
+
infoPLAN + Style.RESET_ALL)
|
164
137
|
return 'e'
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
NeuralLayer = inp
|
179
|
-
|
180
|
-
for Lindex, Layer in enumerate(Layers):
|
138
|
+
|
139
|
+
neural_layer = inp
|
140
|
+
|
141
|
+
for Lindex, Layer in enumerate(layers):
|
142
|
+
|
143
|
+
neural_layer = normalization(neural_layer)
|
144
|
+
|
145
|
+
if Layer == 'fex':
|
146
|
+
W[Lindex] = fex(neural_layer, W[Lindex], True, y[index], activation_potential)
|
147
|
+
|
148
|
+
for i, w in enumerate(W):
|
149
|
+
trained_W[i] = trained_W[i] + w
|
150
|
+
|
181
151
|
|
182
|
-
|
183
|
-
|
152
|
+
if val == True:
|
153
|
+
|
154
|
+
if index == val_count:
|
184
155
|
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
156
|
+
val_count += val_count_copy
|
157
|
+
|
158
|
+
layers.append('cat')
|
159
|
+
trained_W.append(np.eye(len(class_count)))
|
160
|
+
|
161
|
+
validation_model = evaluate(x_val, y_val, None, trained_W)
|
162
|
+
|
163
|
+
layers.pop()
|
164
|
+
trained_W.pop()
|
165
|
+
|
166
|
+
val_acc = validation_model[get_acc()]
|
167
|
+
|
168
|
+
val_list.append(val_acc)
|
169
|
+
|
191
170
|
|
192
|
-
|
193
|
-
|
194
|
-
elif Layer == 'cat':
|
195
|
-
NeuralLayer,W[Lindex] = Cat(NeuralLayer, W[Lindex], MembranThresholds[Lindex], MembranPotentials[Lindex],1)
|
171
|
+
plt.plot(val_list, linestyle='-',
|
172
|
+
color='r')
|
196
173
|
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
if index + 1 != len(TrainInputs):
|
219
|
-
|
220
|
-
plt.close('all')
|
221
|
-
|
222
|
-
if index == 0:
|
223
|
-
for i, w in enumerate(W):
|
224
|
-
TrainedWs[i] = w
|
225
|
-
|
226
|
-
else:
|
227
|
-
for i, w in enumerate(W):
|
228
|
-
TrainedWs[i] = TrainedWs[i] + w
|
229
|
-
|
174
|
+
progress_status = f"{progress:.1f}"
|
175
|
+
plt.title('Validation accuracy graph. Amount of data learned by the model: % ' + progress_status)
|
176
|
+
plt.xlabel('Learning Progress')
|
177
|
+
plt.ylabel('Accuracy')
|
178
|
+
plt.ylim(0, 1)
|
179
|
+
plt.draw()
|
180
|
+
plt.pause(0.1)
|
181
|
+
|
182
|
+
if show_training == True:
|
183
|
+
|
184
|
+
if index %show_count == 0:
|
185
|
+
|
186
|
+
|
187
|
+
if index != 0:
|
188
|
+
plt.close(fig)
|
189
|
+
|
190
|
+
if row != 0:
|
191
|
+
|
192
|
+
fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
|
193
|
+
|
194
|
+
else:
|
230
195
|
|
231
|
-
|
232
|
-
|
196
|
+
fig, ax = plt.subplots(1, 1, figsize=(18, 14))
|
197
|
+
|
198
|
+
for j in range(len(class_count)):
|
199
|
+
|
200
|
+
|
201
|
+
if row != 0:
|
202
|
+
|
203
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
204
|
+
suptitle_info = 'Neurons Learning Progress: % '
|
205
|
+
title_info = f'{j+1}. Neuron'
|
206
|
+
|
207
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
208
|
+
|
209
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
210
|
+
|
211
|
+
ax[j].set_aspect('equal')
|
212
|
+
|
213
|
+
ax[j].set_xticks([])
|
214
|
+
ax[j].set_yticks([])
|
215
|
+
ax[j].set_title(title_info)
|
216
|
+
|
217
|
+
else:
|
218
|
+
|
219
|
+
mat = trained_W[0]
|
220
|
+
ax.imshow(mat, interpolation='sinc', cmap='viridis')
|
221
|
+
suptitle_info = 'Weight Learning Progress: % '
|
222
|
+
title_info = 'Weight Matrix Of Fex Layer'
|
223
|
+
|
224
|
+
|
233
225
|
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
226
|
+
|
227
|
+
progress_status = f"{progress:.1f}"
|
228
|
+
fig.suptitle(suptitle_info + progress_status)
|
229
|
+
plt.draw()
|
230
|
+
plt.pause(0.1)
|
231
|
+
|
232
|
+
|
233
|
+
W = weight_identification(
|
234
|
+
len(layers) - 1, len(class_count), neurons, x_train_size)
|
235
|
+
|
236
|
+
uni_end_time = time.time()
|
237
|
+
|
238
|
+
calculating_est = round(
|
239
|
+
(uni_end_time - uni_start_time) * (len(x_train) - index), 3)
|
240
|
+
|
241
|
+
if calculating_est < 60:
|
242
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
243
|
+
|
244
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
245
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
246
|
+
|
247
|
+
elif calculating_est > 3600:
|
248
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
249
|
+
|
250
|
+
print('\rTraining: ', index, "/", len(x_train), "\n", end="")
|
251
|
+
|
252
|
+
if show_training == 'final':
|
253
|
+
|
254
|
+
fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
|
255
|
+
|
256
|
+
for j in range(len(class_count)):
|
257
|
+
|
258
|
+
mat = trained_W[0][j,:].reshape(row, col)
|
259
|
+
|
260
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
261
|
+
ax[j].set_aspect('equal')
|
262
|
+
|
263
|
+
ax[j].set_xticks([])
|
264
|
+
ax[j].set_yticks([])
|
265
|
+
ax[j].set_title(f'{j+1}. Neuron')
|
266
|
+
|
267
|
+
progress_status = f"{progress:.1f}"
|
268
|
+
fig.suptitle('Neurons Learning Progress: % ' + progress_status)
|
269
|
+
plt.draw()
|
270
|
+
plt.pause(0.1)
|
271
|
+
|
272
|
+
|
250
273
|
EndTime = time.time()
|
274
|
+
|
275
|
+
calculating_est = round(EndTime - start_time, 2)
|
276
|
+
|
277
|
+
print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n" + Style.RESET_ALL)
|
278
|
+
|
279
|
+
if calculating_est < 60:
|
280
|
+
print('Total training time(sec): ', calculating_est)
|
281
|
+
|
282
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
283
|
+
print('Total training time(min): ', calculating_est/60)
|
284
|
+
|
285
|
+
elif calculating_est > 3600:
|
286
|
+
print('Total training time(h): ', calculating_est/3600)
|
251
287
|
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
288
|
+
layers.append('cat')
|
289
|
+
trained_W.append(np.eye(len(class_count)))
|
290
|
+
|
291
|
+
|
292
|
+
return trained_W
|
293
|
+
|
294
|
+
# FUNCTIONS -----
|
295
|
+
|
296
|
+
def shape_control(x_train):
|
297
|
+
|
298
|
+
try:
|
299
|
+
row = x_train[1].shape[0]
|
300
|
+
col = x_train[1].shape[1]
|
258
301
|
|
259
|
-
|
260
|
-
|
302
|
+
except:
|
303
|
+
|
304
|
+
print(Fore.MAGENTA + 'WARNING: You trying show_training but inputs is raveled. x_train inputs should be reshaped for show_training.' + Style.RESET_ALL)
|
261
305
|
|
262
|
-
|
263
|
-
|
306
|
+
try:
|
307
|
+
row, col = find_numbers(len(x_train[0]))
|
308
|
+
|
309
|
+
except:
|
264
310
|
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
elif Acc < 0.8 and Acc > 0.6:
|
269
|
-
print(Fore.MAGENTA + '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
|
270
|
-
|
271
|
-
elif Acc < 0.6:
|
272
|
-
print(Fore.RED+ '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
return TrainedWs,TrainPredictions,Acc
|
311
|
+
print(Fore.MAGENTA + 'WARNING: Input length cannot be reshaped. Neurons learning progression cannot be draw, weight learning progress drwaing started.' + Style.RESET_ALL)
|
312
|
+
return [0, 0]
|
278
313
|
|
279
|
-
|
314
|
+
return row, col
|
280
315
|
|
281
|
-
def
|
282
|
-
|
283
|
-
|
284
|
-
Neurons, # list[num]: List of neuron counts for each layer.
|
285
|
-
TrainInputsize # int: Size of the input data.
|
286
|
-
) -> str:
|
287
|
-
"""
|
288
|
-
Identifies the weights for a neural network model.
|
289
|
-
|
290
|
-
Args:
|
291
|
-
LayerCount (int): Number of layers in the neural network.
|
292
|
-
ClassCount (int): Number of classes in the classification task.
|
293
|
-
Neurons (list[num]): List of neuron counts for each layer.
|
294
|
-
TrainInputsize (int): Size of the input data.
|
316
|
+
def find_numbers(n):
|
317
|
+
if n <= 1:
|
318
|
+
raise ValueError("Parameter 'n' must be greater than 1.")
|
295
319
|
|
296
|
-
|
297
|
-
|
298
|
-
|
320
|
+
for i in range(2, int(n**0.5) + 1):
|
321
|
+
if n % i == 0:
|
322
|
+
factor1 = i
|
323
|
+
factor2 = n // i
|
324
|
+
if factor1 == factor2:
|
325
|
+
return factor1, factor2
|
299
326
|
|
300
|
-
|
301
|
-
Wlen = LayerCount + 1
|
302
|
-
W = [None] * Wlen
|
303
|
-
W[0] = np.ones((Neurons[0],TrainInputsize))
|
304
|
-
ws = LayerCount - 1
|
305
|
-
for w in range(ws):
|
306
|
-
W[w + 1] = np.ones((Neurons[w + 1],Neurons[w]))
|
307
|
-
W[LayerCount] = np.ones((ClassCount,Neurons[LayerCount - 1]))
|
308
|
-
return W
|
327
|
+
return None
|
309
328
|
|
310
|
-
def
|
311
|
-
|
312
|
-
|
313
|
-
Key, # int: Key for identifying synaptic connections.
|
314
|
-
Class, # int: Class label for the current training instance.
|
315
|
-
ClassCount # int: Total number of classes in the dataset.
|
316
|
-
|
329
|
+
def weight_normalization(
|
330
|
+
W,
|
331
|
+
class_count
|
317
332
|
) -> str:
|
318
|
-
|
319
|
-
|
333
|
+
"""
|
334
|
+
Row(Neuron) based normalization. For unbalanced models.
|
320
335
|
|
321
336
|
Args:
|
322
|
-
|
323
|
-
|
324
|
-
Key (str): Key for identifying synaptic row or col connections.
|
325
|
-
Class (int): Class label for the current training instance.
|
326
|
-
ClassCount (int): Total number of classes in the dataset.
|
337
|
+
W (list(num)): Trained weight matrix list.
|
338
|
+
class_count (int): Class count of model.
|
327
339
|
|
328
340
|
Returns:
|
329
|
-
|
341
|
+
list([numpy_arrays],[...]): posttrained weight matices of the model. .
|
330
342
|
"""
|
331
343
|
|
332
|
-
|
333
|
-
Class += 1 # because index start 0
|
334
|
-
|
335
|
-
if Class != ClassCount and Class != 1:
|
336
|
-
|
337
|
-
Ce = Cs / Class
|
338
|
-
|
339
|
-
w[int(Ce)-1::-1,:] = 0
|
340
|
-
|
341
|
-
w[Cs:,:] = 0
|
342
|
-
|
343
|
-
|
344
|
-
else:
|
344
|
+
for i in range(class_count):
|
345
345
|
|
346
|
-
|
347
|
-
if Key == 'row':
|
348
|
-
|
349
|
-
w[Cs:,:] = 0
|
350
|
-
|
351
|
-
elif Key == 'col':
|
352
|
-
|
353
|
-
w[:,Cs] = 0
|
354
|
-
|
355
|
-
else:
|
356
|
-
print(Fore.RED + "ERROR103: SynapticPruning func's Key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning)
|
357
|
-
return 'e'
|
358
|
-
else:
|
359
|
-
if Key == 'row':
|
360
|
-
|
361
|
-
w[Cs:,:] = 0
|
362
|
-
|
363
|
-
Ce = int(round(w.shape[0] - Cs / ClassCount))
|
364
|
-
w[Ce-1::-1,:] = 0
|
346
|
+
W[0][i,:] = normalization(W[0][i,:])
|
365
347
|
|
366
|
-
|
367
|
-
|
368
|
-
w[:,Cs] = 0
|
369
|
-
|
370
|
-
else:
|
371
|
-
print(Fore.RED + "ERROR103: SynapticPruning func's Key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning + Style.RESET_ALL)
|
372
|
-
return 'e'
|
373
|
-
return w
|
348
|
+
return W
|
374
349
|
|
375
|
-
def
|
376
|
-
|
377
|
-
|
350
|
+
def weight_identification(
|
351
|
+
layer_count, # int: Number of layers in the neural network.
|
352
|
+
class_count, # int: Number of classes in the classification task.
|
353
|
+
neurons, # list[num]: List of neuron counts for each layer.
|
354
|
+
x_train_size # int: Size of the input data.
|
378
355
|
) -> str:
|
379
356
|
"""
|
380
|
-
|
357
|
+
Identifies the weights for a neural network model.
|
381
358
|
|
382
359
|
Args:
|
383
|
-
|
384
|
-
|
360
|
+
layer_count (int): Number of layers in the neural network.
|
361
|
+
class_count (int): Number of classes in the classification task.
|
362
|
+
neurons (list[num]): List of neuron counts for each layer.
|
363
|
+
x_train_size (int): Size of the input data.
|
385
364
|
|
386
365
|
Returns:
|
387
|
-
list:
|
366
|
+
list([numpy_arrays],[...]): pretrained weight matices of the model. .
|
388
367
|
"""
|
389
368
|
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
Piece[i] = int(math.floor(W[i].shape[0] / ClassCount))
|
401
|
-
|
402
|
-
Cs = 0
|
403
|
-
# j = Classes, i = Weights, [0] = CutStart.
|
369
|
+
Wlen = layer_count + 1
|
370
|
+
W = [None] * Wlen
|
371
|
+
W[0] = np.ones((neurons[0], x_train_size))
|
372
|
+
ws = layer_count - 1
|
373
|
+
for w in range(ws):
|
374
|
+
W[w + 1] = np.ones((neurons[w + 1], neurons[w]))
|
375
|
+
|
376
|
+
return W
|
404
377
|
|
405
|
-
for i in range(len(W)):
|
406
|
-
for j in range(ClassCount):
|
407
|
-
Cs = Cs + Piece[i]
|
408
|
-
Divides[j][i][0] = Cs
|
409
|
-
#print('Divides: ' + j + i + ' = ' + Divides[j][i][0])
|
410
|
-
#input()
|
411
|
-
|
412
|
-
j = 0
|
413
|
-
Cs = 0
|
414
|
-
|
415
|
-
return Divides
|
416
|
-
|
417
378
|
|
418
|
-
def
|
379
|
+
def fex(
|
419
380
|
Input, # list[num]: Input data.
|
420
|
-
w, #
|
421
|
-
|
422
|
-
|
381
|
+
w, # num: Weight matrix of the neural network.
|
382
|
+
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
383
|
+
Class, # int: Which class is, if training.
|
384
|
+
activation_potential # float or None: Input activation potential (optional)
|
423
385
|
) -> tuple:
|
424
386
|
"""
|
425
387
|
Applies feature extraction process to the input data using synaptic pruning.
|
426
388
|
|
427
389
|
Args:
|
428
|
-
Input (
|
429
|
-
w (
|
430
|
-
|
431
|
-
|
390
|
+
Input (num): Input data.
|
391
|
+
w (num): Weight matrix of the neural network.
|
392
|
+
is_training (bool): Flag indicating if the function is called during training (True or False).
|
393
|
+
Class (int): if is during training then which class(label) ? is isnt then put None.
|
394
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
432
395
|
|
433
396
|
Returns:
|
434
397
|
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
435
398
|
"""
|
436
399
|
|
437
|
-
if
|
438
|
-
|
439
|
-
|
440
|
-
PruneIndex = np.where(Input > MembranPotential)
|
441
|
-
elif MembranThreshold == '==':
|
442
|
-
PruneIndex = np.where(Input == MembranPotential)
|
443
|
-
elif MembranThreshold == '!=':
|
444
|
-
PruneIndex = np.where(Input != MembranPotential)
|
445
|
-
|
446
|
-
w = SynapticPruning(w, PruneIndex, 'col', 0, 0)
|
447
|
-
|
448
|
-
NeuralLayer = np.dot(w, Input)
|
449
|
-
return NeuralLayer,w
|
450
|
-
|
451
|
-
def Cat(
|
452
|
-
Input, # list[num]: Input data.
|
453
|
-
w, # list[list[num]]: Weight matrix of the neural network.
|
454
|
-
MembranThreshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
|
455
|
-
MembranPotential, # num: Threshold value for comparison.
|
456
|
-
isTrain # int: Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
457
|
-
) -> tuple:
|
458
|
-
"""
|
459
|
-
Applies categorization process to the input data using synaptic pruning if specified.
|
460
|
-
|
461
|
-
Args:
|
462
|
-
Input (list[num]): Input data.
|
463
|
-
w (list[list[num]]): Weight matrix of the neural network.
|
464
|
-
MembranThreshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
|
465
|
-
MembranPotential (num): Threshold value for comparison.
|
466
|
-
isTrain (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
400
|
+
if is_training == True and activation_potential == None:
|
401
|
+
|
402
|
+
w[Class, :] = Input
|
467
403
|
|
468
|
-
|
469
|
-
tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
|
470
|
-
"""
|
404
|
+
return w
|
471
405
|
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
406
|
+
elif is_training == True and activation_potential != None:
|
407
|
+
|
408
|
+
|
409
|
+
Input[Input < activation_potential] = 0
|
410
|
+
Input[Input > activation_potential] = 1
|
411
|
+
|
412
|
+
w[Class,:] = Input
|
413
|
+
|
414
|
+
return w
|
415
|
+
|
416
|
+
elif is_training == False and activation_potential == None:
|
417
|
+
|
418
|
+
neural_layer = np.dot(w, Input)
|
483
419
|
|
484
|
-
|
485
|
-
|
486
|
-
|
420
|
+
return neural_layer
|
421
|
+
|
422
|
+
elif is_training == False and activation_potential != None:
|
423
|
+
|
424
|
+
Input[Input < activation_potential] = 0
|
425
|
+
Input[Input > activation_potential] = 1
|
426
|
+
|
427
|
+
neural_layer = np.dot(w, Input)
|
428
|
+
|
429
|
+
return neural_layer
|
487
430
|
|
488
431
|
|
489
|
-
|
490
|
-
|
432
|
+
|
433
|
+
def normalization(
|
434
|
+
Input # num: Input data to be normalized.
|
491
435
|
):
|
492
436
|
"""
|
493
437
|
Normalizes the input data using maximum absolute scaling.
|
494
438
|
|
495
439
|
Args:
|
496
|
-
Input (
|
440
|
+
Input (num): Input data to be normalized.
|
497
441
|
|
498
442
|
Returns:
|
499
|
-
|
443
|
+
(num) Scaled input data after normalization.
|
500
444
|
"""
|
501
445
|
|
502
|
-
|
503
446
|
AbsVector = np.abs(Input)
|
504
|
-
|
447
|
+
|
505
448
|
MaxAbs = np.max(AbsVector)
|
506
|
-
|
449
|
+
|
507
450
|
ScaledInput = Input / MaxAbs
|
508
|
-
|
451
|
+
|
509
452
|
return ScaledInput
|
510
453
|
|
511
454
|
|
512
455
|
def Softmax(
|
513
|
-
x #
|
456
|
+
x # num: Input data to be transformed using softmax function.
|
514
457
|
):
|
515
458
|
"""
|
516
459
|
Applies the softmax function to the input data.
|
517
460
|
|
518
461
|
Args:
|
519
|
-
|
462
|
+
(num): Input data to be transformed using softmax function.
|
520
463
|
|
521
464
|
Returns:
|
522
|
-
|
465
|
+
(num): Transformed data after applying softmax function.
|
523
466
|
"""
|
524
|
-
|
467
|
+
|
525
468
|
return softmax(x)
|
526
469
|
|
527
470
|
|
528
471
|
def Sigmoid(
|
529
|
-
x #
|
472
|
+
x # num: Input data to be transformed using sigmoid function.
|
530
473
|
):
|
531
474
|
"""
|
532
475
|
Applies the sigmoid function to the input data.
|
533
476
|
|
534
477
|
Args:
|
535
|
-
|
478
|
+
(num): Input data to be transformed using sigmoid function.
|
536
479
|
|
537
480
|
Returns:
|
538
|
-
|
481
|
+
(num): Transformed data after applying sigmoid function.
|
539
482
|
"""
|
540
483
|
return expit(x)
|
541
484
|
|
542
485
|
|
543
486
|
def Relu(
|
544
|
-
x #
|
487
|
+
x # num: Input data to be transformed using ReLU function.
|
545
488
|
):
|
546
489
|
"""
|
547
490
|
Applies the Rectified Linear Unit (ReLU) function to the input data.
|
548
491
|
|
549
492
|
Args:
|
550
|
-
|
493
|
+
(num): Input data to be transformed using ReLU function.
|
551
494
|
|
552
495
|
Returns:
|
553
|
-
|
496
|
+
(num): Transformed data after applying ReLU function.
|
554
497
|
"""
|
555
498
|
|
556
|
-
|
557
499
|
return np.maximum(0, x)
|
558
500
|
|
559
501
|
|
560
|
-
def
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
Normalizations, # str: Whether normalization will be performed ("y" or "n").
|
567
|
-
Activations, # str: Activation function list for the neural network.
|
568
|
-
Visualize, # Visualize Testing procces or not visualize ('y' or 'n')
|
569
|
-
W # list[list[num]]: Weight matrix of the neural network.
|
502
|
+
def evaluate(
|
503
|
+
x_test, # list[num]: Test input data.
|
504
|
+
y_test, # list[num]: Test labels.
|
505
|
+
show_metrices, # show_metrices (bool): (True or False)
|
506
|
+
W, # list[num]: Weight matrix list of the neural network.
|
507
|
+
activation_potential=None # activation_potential (float or None): Threshold value for comparison. (optional)
|
570
508
|
) -> tuple:
|
571
|
-
infoTestModel =
|
509
|
+
infoTestModel = """
|
572
510
|
Tests the neural network model with the given test data.
|
573
511
|
|
574
512
|
Args:
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
Normalizatios list([str]): Whether normalization will be performed ("yes" or "no").
|
581
|
-
Activation (str): Activation function for the neural network.
|
582
|
-
W (list[list[num]]): Weight matrix of the neural network.
|
513
|
+
x_test (list[num]): Test input data.
|
514
|
+
y_test (list[num]): Test labels.
|
515
|
+
show_metrices (bool): (True or False)
|
516
|
+
W (list[num]): Weight matrix list of the neural network.
|
517
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
583
518
|
|
584
519
|
Returns:
|
585
520
|
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
586
521
|
"""
|
587
522
|
|
523
|
+
layers = ['fex', 'cat']
|
588
524
|
|
589
525
|
try:
|
590
|
-
Wc = [0] * len(W)
|
526
|
+
Wc = [0] * len(W) # Wc = Weight copy
|
591
527
|
true = 0
|
592
|
-
|
528
|
+
y_preds = [-1] * len(y_test)
|
529
|
+
acc_list = []
|
530
|
+
|
593
531
|
for i, w in enumerate(W):
|
594
532
|
Wc[i] = np.copy(w)
|
595
|
-
print('\rCopying weights.....',i+1,'/',len(W),end
|
596
|
-
|
533
|
+
print('\rCopying weights.....', i+1, '/', len(W), end="")
|
534
|
+
|
597
535
|
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
598
|
-
|
599
|
-
for inpIndex,Input in enumerate(
|
536
|
+
start_time = time.time()
|
537
|
+
for inpIndex, Input in enumerate(x_test):
|
600
538
|
Input = np.array(Input)
|
601
539
|
Input = Input.ravel()
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
for index, Layer in enumerate(
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
if Layers[index] == 'fex':
|
616
|
-
NeuralLayer,useless = Fex(NeuralLayer, W[index], MembranThresholds[index], MembranPotentials[index])
|
617
|
-
if Layers[index] == 'cat':
|
618
|
-
NeuralLayer,useless = Cat(NeuralLayer, W[index], MembranThresholds[index], MembranPotentials[index],0)
|
540
|
+
uni_start_time = time.time()
|
541
|
+
neural_layer = Input
|
542
|
+
|
543
|
+
for index, Layer in enumerate(layers):
|
544
|
+
|
545
|
+
neural_layer = normalization(neural_layer)
|
546
|
+
|
547
|
+
if Layer == 'fex':
|
548
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
549
|
+
elif Layer == 'cat':
|
550
|
+
neural_layer = np.dot(W[index], neural_layer)
|
551
|
+
|
619
552
|
for i, w in enumerate(Wc):
|
620
553
|
W[i] = np.copy(w)
|
621
|
-
RealOutput = np.argmax(
|
622
|
-
PredictedOutput = np.argmax(
|
554
|
+
RealOutput = np.argmax(y_test[inpIndex])
|
555
|
+
PredictedOutput = np.argmax(neural_layer)
|
623
556
|
if RealOutput == PredictedOutput:
|
624
557
|
true += 1
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
|
634
|
-
|
635
|
-
|
636
|
-
|
637
|
-
|
638
|
-
|
639
|
-
|
640
|
-
|
641
|
-
|
642
|
-
|
643
|
-
|
644
|
-
|
645
|
-
|
646
|
-
|
647
|
-
|
648
|
-
|
649
|
-
|
650
|
-
if CalculatingEst < 60:
|
651
|
-
print('\rest......(sec):',CalculatingEst,'\n',end= "")
|
652
|
-
print('\rTest Accuracy: ' ,Acc ,"\n", end="")
|
653
|
-
|
654
|
-
elif CalculatingEst > 60 and CalculatingEst < 3600:
|
655
|
-
print('\rest......(min):',CalculatingEst/60,'\n',end= "")
|
656
|
-
print('\rTest Accuracy: ' ,Acc ,"\n", end="")
|
657
|
-
|
658
|
-
elif CalculatingEst > 3600:
|
659
|
-
print('\rest......(h):',CalculatingEst/3600,'\n',end= "")
|
660
|
-
print('\rTest Accuracy: ' ,Acc ,"\n", end="")
|
661
|
-
|
558
|
+
acc = true / len(y_test)
|
559
|
+
if show_metrices == True:
|
560
|
+
acc_list.append(acc)
|
561
|
+
y_preds[inpIndex] = PredictedOutput
|
562
|
+
|
563
|
+
uni_end_time = time.time()
|
564
|
+
|
565
|
+
calculating_est = round(
|
566
|
+
(uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
|
567
|
+
|
568
|
+
if calculating_est < 60:
|
569
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
570
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
571
|
+
|
572
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
573
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
574
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
575
|
+
|
576
|
+
elif calculating_est > 3600:
|
577
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
578
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
579
|
+
if show_metrices == True:
|
580
|
+
plot_evaluate(y_test, y_preds, acc_list)
|
581
|
+
|
662
582
|
EndTime = time.time()
|
663
583
|
for i, w in enumerate(Wc):
|
664
584
|
W[i] = np.copy(w)
|
665
|
-
|
666
|
-
|
667
|
-
|
585
|
+
|
586
|
+
calculating_est = round(EndTime - start_time, 2)
|
587
|
+
|
668
588
|
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
669
|
-
|
670
|
-
if CalculatingEst < 60:
|
671
|
-
print('Total testing time(sec): ',CalculatingEst)
|
672
|
-
|
673
|
-
elif CalculatingEst > 60 and CalculatingEst < 3600:
|
674
|
-
print('Total testing time(min): ',CalculatingEst/60)
|
675
|
-
|
676
|
-
elif CalculatingEst > 3600:
|
677
|
-
print('Total testing time(h): ',CalculatingEst/3600)
|
678
|
-
|
679
|
-
if Acc >= 0.8:
|
680
|
-
print(Fore.GREEN + '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
681
|
-
|
682
|
-
elif Acc < 0.8 and Acc > 0.6:
|
683
|
-
print(Fore.MAGENTA + '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
684
|
-
|
685
|
-
elif Acc <= 0.6:
|
686
|
-
print(Fore.RED+ '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
687
589
|
|
590
|
+
if calculating_est < 60:
|
591
|
+
print('Total testing time(sec): ', calculating_est)
|
592
|
+
|
593
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
594
|
+
print('Total testing time(min): ', calculating_est/60)
|
595
|
+
|
596
|
+
elif calculating_est > 3600:
|
597
|
+
print('Total testing time(h): ', calculating_est/3600)
|
598
|
+
|
599
|
+
if acc >= 0.8:
|
600
|
+
print(Fore.GREEN + '\nTotal Test accuracy: ',
|
601
|
+
acc, '\n' + Style.RESET_ALL)
|
602
|
+
|
603
|
+
elif acc < 0.8 and acc > 0.6:
|
604
|
+
print(Fore.MAGENTA + '\nTotal Test accuracy: ',
|
605
|
+
acc, '\n' + Style.RESET_ALL)
|
606
|
+
|
607
|
+
elif acc <= 0.6:
|
608
|
+
print(Fore.RED + '\nTotal Test accuracy: ', acc, '\n' + Style.RESET_ALL)
|
609
|
+
|
688
610
|
except:
|
611
|
+
|
612
|
+
print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" +
|
613
|
+
infoTestModel + Style.RESET_ALL)
|
614
|
+
return 'e'
|
615
|
+
|
616
|
+
return W, y_preds, acc
|
617
|
+
|
618
|
+
|
619
|
+
def multiple_evaluate(
|
620
|
+
x_test, # list[num]: Test input data.
|
621
|
+
y_test, # list[num]: Test labels.
|
622
|
+
show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
|
623
|
+
MW, # list[list[num]]: Weight matrix of the neural network.
|
624
|
+
activation_potential=None # (float or None): Threshold value for comparison. (optional)
|
625
|
+
) -> tuple:
|
626
|
+
infoTestModel = """
|
627
|
+
Tests the neural network model with the given test data.
|
628
|
+
|
629
|
+
Args:
|
630
|
+
x_test (list[num]): Test input data.
|
631
|
+
y_test (list[num]): Test labels.
|
632
|
+
show_metrices (bool): (True or False)
|
633
|
+
MW (list(list[num])): Multiple Weight matrix list of the neural network. (Multiple model testing)
|
634
|
+
|
635
|
+
Returns:
|
636
|
+
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
637
|
+
"""
|
638
|
+
|
639
|
+
layers = ['fex', 'cat']
|
640
|
+
|
641
|
+
try:
|
642
|
+
y_preds = [-1] * len(y_test)
|
643
|
+
acc_list = []
|
644
|
+
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
645
|
+
start_time = time.time()
|
646
|
+
true = 0
|
647
|
+
for inpIndex, Input in enumerate(x_test):
|
648
|
+
|
649
|
+
output_layer = 0
|
650
|
+
|
651
|
+
for m, Model in enumerate(MW):
|
652
|
+
|
653
|
+
W = Model
|
654
|
+
|
655
|
+
Wc = [0] * len(W) # Wc = weight copy
|
656
|
+
|
657
|
+
y_preds = [None] * len(y_test)
|
658
|
+
for i, w in enumerate(W):
|
659
|
+
Wc[i] = np.copy(w)
|
660
|
+
|
661
|
+
Input = np.array(Input)
|
662
|
+
Input = Input.ravel()
|
663
|
+
uni_start_time = time.time()
|
664
|
+
neural_layer = Input
|
665
|
+
|
666
|
+
for index, Layer in enumerate(layers):
|
667
|
+
|
668
|
+
neural_layer = normalization(neural_layer)
|
669
|
+
|
670
|
+
if Layer == 'fex':
|
671
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
672
|
+
elif Layer == 'cat':
|
673
|
+
neural_layer = np.dot(W[index], neural_layer)
|
674
|
+
|
675
|
+
output_layer += neural_layer
|
676
|
+
|
677
|
+
for i, w in enumerate(Wc):
|
678
|
+
W[i] = np.copy(w)
|
679
|
+
for i, w in enumerate(Wc):
|
680
|
+
W[i] = np.copy(w)
|
681
|
+
RealOutput = np.argmax(y_test[inpIndex])
|
682
|
+
PredictedOutput = np.argmax(output_layer)
|
683
|
+
if RealOutput == PredictedOutput:
|
684
|
+
true += 1
|
685
|
+
acc = true / len(y_test)
|
686
|
+
if show_metrices == True:
|
687
|
+
acc_list.append(acc)
|
688
|
+
y_preds[inpIndex] = PredictedOutput
|
689
|
+
|
690
|
+
|
691
|
+
uni_end_time = time.time()
|
692
|
+
|
693
|
+
calculating_est = round(
|
694
|
+
(uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
|
695
|
+
|
696
|
+
if calculating_est < 60:
|
697
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
698
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
699
|
+
|
700
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
701
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
702
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
703
|
+
|
704
|
+
elif calculating_est > 3600:
|
705
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
706
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
707
|
+
if show_metrices == True:
|
708
|
+
plot_evaluate(y_test, y_preds, acc_list)
|
689
709
|
|
690
|
-
|
710
|
+
EndTime = time.time()
|
711
|
+
for i, w in enumerate(Wc):
|
712
|
+
W[i] = np.copy(w)
|
713
|
+
|
714
|
+
calculating_est = round(EndTime - start_time, 2)
|
715
|
+
|
716
|
+
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
717
|
+
|
718
|
+
if calculating_est < 60:
|
719
|
+
print('Total testing time(sec): ', calculating_est)
|
720
|
+
|
721
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
722
|
+
print('Total testing time(min): ', calculating_est/60)
|
723
|
+
|
724
|
+
elif calculating_est > 3600:
|
725
|
+
print('Total testing time(h): ', calculating_est/3600)
|
726
|
+
|
727
|
+
if acc >= 0.8:
|
728
|
+
print(Fore.GREEN + '\nTotal Test accuracy: ',
|
729
|
+
acc, '\n' + Style.RESET_ALL)
|
730
|
+
|
731
|
+
elif acc < 0.8 and acc > 0.6:
|
732
|
+
print(Fore.MAGENTA + '\nTotal Test accuracy: ',
|
733
|
+
acc, '\n' + Style.RESET_ALL)
|
734
|
+
|
735
|
+
elif acc <= 0.6:
|
736
|
+
print(Fore.RED + '\nTotal Test accuracy: ',
|
737
|
+
acc, '\n' + Style.RESET_ALL)
|
738
|
+
|
739
|
+
except:
|
740
|
+
|
741
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'activation_potential' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
|
691
742
|
return 'e'
|
692
|
-
|
693
|
-
return W,
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
infoSavePLAN = """
|
712
|
-
Function to save a deep learning model.
|
743
|
+
|
744
|
+
return W, y_preds, acc
|
745
|
+
|
746
|
+
|
747
|
+
def save_model(model_name,
|
748
|
+
model_type,
|
749
|
+
class_count,
|
750
|
+
test_acc,
|
751
|
+
weights_type,
|
752
|
+
weights_format,
|
753
|
+
model_path,
|
754
|
+
scaler_params,
|
755
|
+
W,
|
756
|
+
activation_potential=None
|
757
|
+
):
|
758
|
+
|
759
|
+
infosave_model = """
|
760
|
+
Function to save a pruning learning model.
|
713
761
|
|
714
762
|
Arguments:
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
MembranPotentials (list): List containing MEMBRAN POTENTIALS.
|
721
|
-
DoNormalization (str): is that normalized data ? 'y' or 'n'.
|
722
|
-
Activations (list): List containing activation functions for each layer.
|
723
|
-
TestAcc (float): Test accuracy of the model.
|
724
|
-
LogType (str): Type of log to save (options: 'csv', 'txt', 'hdf5').
|
725
|
-
WeightsType (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
763
|
+
model_name (str): Name of the model.
|
764
|
+
model_type (str): Type of the model.(options: PLAN)
|
765
|
+
class_count (int): Number of classes.
|
766
|
+
test_acc (float): Test accuracy of the model.
|
767
|
+
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
726
768
|
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
727
|
-
|
769
|
+
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
770
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
728
771
|
W: Weights of the model.
|
772
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
729
773
|
|
730
774
|
Returns:
|
731
775
|
str: Message indicating if the model was saved successfully or encountered an error.
|
732
776
|
"""
|
733
|
-
|
777
|
+
|
734
778
|
# Operations to be performed by the function will be written here
|
735
779
|
pass
|
736
780
|
|
737
|
-
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
|
742
|
-
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
781
|
+
layers = ['fex', 'cat']
|
782
|
+
|
783
|
+
if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
|
784
|
+
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
|
785
|
+
infosave_model + Style.RESET_ALL)
|
743
786
|
return 'e'
|
744
|
-
|
745
|
-
if
|
746
|
-
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from:
|
787
|
+
|
788
|
+
if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
|
789
|
+
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" +
|
790
|
+
infosave_model + Style.RESET_ALL)
|
747
791
|
return 'e'
|
748
|
-
|
792
|
+
|
749
793
|
NeuronCount = 0
|
750
794
|
SynapseCount = 0
|
795
|
+
|
751
796
|
try:
|
752
797
|
for w in W:
|
753
798
|
NeuronCount += np.shape(w)[0]
|
754
799
|
SynapseCount += np.shape(w)[0] * np.shape(w)[1]
|
755
800
|
except:
|
756
|
-
|
757
|
-
print(Fore.RED + "ERROR: Weight matrices has a problem from:
|
801
|
+
|
802
|
+
print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
|
803
|
+
infosave_model + Style.RESET_ALL)
|
758
804
|
return 'e'
|
759
805
|
import pandas as pd
|
760
806
|
from datetime import datetime
|
761
807
|
from scipy import io
|
762
|
-
|
763
|
-
data = {'MODEL NAME':
|
764
|
-
'MODEL TYPE':
|
765
|
-
'LAYERS':
|
766
|
-
'LAYER COUNT': len(
|
767
|
-
'CLASS COUNT':
|
768
|
-
'MEMBRAN THRESHOLDS': MembranThresholds,
|
769
|
-
'MEMBRAN POTENTIALS': MembranPotentials,
|
770
|
-
'NORMALIZATION': Normalizations,
|
771
|
-
'ACTIVATIONS': Activations,
|
808
|
+
|
809
|
+
data = {'MODEL NAME': model_name,
|
810
|
+
'MODEL TYPE': model_type,
|
811
|
+
'LAYERS': layers,
|
812
|
+
'LAYER COUNT': len(layers),
|
813
|
+
'CLASS COUNT': class_count,
|
772
814
|
'NEURON COUNT': NeuronCount,
|
773
815
|
'SYNAPSE COUNT': SynapseCount,
|
774
|
-
'TEST ACCURACY':
|
816
|
+
'TEST ACCURACY': test_acc,
|
775
817
|
'SAVE DATE': datetime.now(),
|
776
|
-
'WEIGHTS TYPE':
|
777
|
-
'WEIGHTS FORMAT':
|
778
|
-
'MODEL PATH':
|
818
|
+
'WEIGHTS TYPE': weights_type,
|
819
|
+
'WEIGHTS FORMAT': weights_format,
|
820
|
+
'MODEL PATH': model_path,
|
821
|
+
'STANDARD SCALER': scaler_params,
|
822
|
+
'ACTIVATION POTENTIAL': activation_potential
|
779
823
|
}
|
780
824
|
try:
|
781
|
-
|
825
|
+
|
782
826
|
df = pd.DataFrame(data)
|
783
|
-
|
784
|
-
|
785
|
-
|
786
|
-
df.to_csv(ModelPath + ModelName + '.csv', sep='\t', index=False)
|
787
|
-
|
788
|
-
elif LogType == 'txt':
|
789
|
-
|
790
|
-
df.to_csv(ModelPath + ModelName + '.txt', sep='\t', index=False)
|
791
|
-
|
792
|
-
elif LogType == 'hdf5':
|
793
|
-
|
794
|
-
df.to_hdf(ModelPath + ModelName + '.h5', key='data', mode='w')
|
795
|
-
|
827
|
+
|
828
|
+
df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
|
829
|
+
|
796
830
|
except:
|
797
|
-
|
798
|
-
print(Fore.RED + "ERROR: Model log not saved probably
|
831
|
+
|
832
|
+
print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
|
833
|
+
infosave_model + Style.RESET_ALL)
|
799
834
|
return 'e'
|
800
835
|
try:
|
801
|
-
|
802
|
-
if
|
803
|
-
|
836
|
+
|
837
|
+
if weights_type == 'txt' and weights_format == 'd':
|
838
|
+
|
804
839
|
for i, w in enumerate(W):
|
805
|
-
np.savetxt(
|
806
|
-
|
807
|
-
|
808
|
-
|
840
|
+
np.savetxt(model_path + model_name +
|
841
|
+
str(i+1) + 'w.txt', w, fmt='%d')
|
842
|
+
|
843
|
+
if weights_type == 'txt' and weights_format == 'f':
|
844
|
+
|
809
845
|
for i, w in enumerate(W):
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
846
|
+
np.savetxt(model_path + model_name +
|
847
|
+
str(i+1) + 'w.txt', w, fmt='%f')
|
848
|
+
|
849
|
+
if weights_type == 'txt' and weights_format == 'raw':
|
850
|
+
|
814
851
|
for i, w in enumerate(W):
|
815
|
-
np.savetxt(
|
816
|
-
|
817
|
-
|
852
|
+
np.savetxt(model_path + model_name + str(i+1) + 'w.txt', w)
|
853
|
+
|
818
854
|
###
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
855
|
+
|
856
|
+
if weights_type == 'npy' and weights_format == 'd':
|
857
|
+
|
823
858
|
for i, w in enumerate(W):
|
824
|
-
np.save(
|
825
|
-
|
826
|
-
|
827
|
-
|
859
|
+
np.save(model_path + model_name +
|
860
|
+
str(i+1) + 'w.npy', w.astype(int))
|
861
|
+
|
862
|
+
if weights_type == 'npy' and weights_format == 'f':
|
863
|
+
|
828
864
|
for i, w in enumerate(W):
|
829
|
-
|
830
|
-
|
831
|
-
|
832
|
-
|
865
|
+
np.save(model_path + model_name + str(i+1) +
|
866
|
+
'w.npy', w, w.astype(float))
|
867
|
+
|
868
|
+
if weights_type == 'npy' and weights_format == 'raw':
|
869
|
+
|
833
870
|
for i, w in enumerate(W):
|
834
|
-
np.save(
|
835
|
-
|
836
|
-
|
871
|
+
np.save(model_path + model_name + str(i+1) + 'w.npy', w)
|
872
|
+
|
837
873
|
###
|
838
|
-
|
839
|
-
|
840
|
-
|
841
|
-
|
874
|
+
|
875
|
+
if weights_type == 'mat' and weights_format == 'd':
|
876
|
+
|
842
877
|
for i, w in enumerate(W):
|
843
878
|
w = {'w': w.astype(int)}
|
844
|
-
io.savemat(
|
845
|
-
|
846
|
-
if
|
847
|
-
|
879
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
880
|
+
|
881
|
+
if weights_type == 'mat' and weights_format == 'f':
|
882
|
+
|
848
883
|
for i, w in enumerate(W):
|
849
884
|
w = {'w': w.astype(float)}
|
850
|
-
io.savemat(
|
851
|
-
|
852
|
-
if
|
853
|
-
|
885
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
886
|
+
|
887
|
+
if weights_type == 'mat' and weights_format == 'raw':
|
888
|
+
|
854
889
|
for i, w in enumerate(W):
|
855
890
|
w = {'w': w}
|
856
|
-
io.savemat(
|
857
|
-
|
891
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
892
|
+
|
858
893
|
except:
|
859
|
-
|
860
|
-
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from:
|
894
|
+
|
895
|
+
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
|
861
896
|
return 'e'
|
862
897
|
print(df)
|
863
898
|
message = (
|
864
899
|
Fore.GREEN + "Model Saved Successfully\n" +
|
865
|
-
Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
|
900
|
+
Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
|
866
901
|
Style.RESET_ALL
|
867
|
-
|
868
|
-
|
902
|
+
)
|
903
|
+
|
869
904
|
return print(message)
|
870
905
|
|
871
906
|
|
872
|
-
def
|
873
|
-
|
874
|
-
|
875
|
-
|
876
|
-
|
877
|
-
Function to load a deep learning model.
|
907
|
+
def load_model(model_name,
|
908
|
+
model_path,
|
909
|
+
):
|
910
|
+
infoload_model = """
|
911
|
+
Function to load a pruning learning model.
|
878
912
|
|
879
913
|
Arguments:
|
880
|
-
|
881
|
-
|
882
|
-
LogType (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
914
|
+
model_name (str): Name of the model.
|
915
|
+
model_path (str): Path where the model is saved.
|
883
916
|
|
884
917
|
Returns:
|
885
|
-
lists: W(list[num]),
|
918
|
+
lists: W(list[num]), activation_potential, DataFrame of the model
|
886
919
|
"""
|
887
|
-
|
920
|
+
pass
|
888
921
|
|
922
|
+
import scipy.io as sio
|
923
|
+
|
924
|
+
try:
|
925
|
+
|
926
|
+
df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
|
927
|
+
|
928
|
+
except:
|
929
|
+
|
930
|
+
print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" +
|
931
|
+
infoload_model + Style.RESET_ALL)
|
932
|
+
|
933
|
+
model_name = str(df['MODEL NAME'].iloc[0])
|
934
|
+
layer_count = int(df['LAYER COUNT'].iloc[0])
|
935
|
+
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
936
|
+
|
937
|
+
W = [0] * layer_count
|
938
|
+
|
939
|
+
if WeightType == 'txt':
|
940
|
+
for i in range(layer_count):
|
941
|
+
W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
|
942
|
+
elif WeightType == 'npy':
|
943
|
+
for i in range(layer_count):
|
944
|
+
W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
|
945
|
+
elif WeightType == 'mat':
|
946
|
+
for i in range(layer_count):
|
947
|
+
W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
|
948
|
+
else:
|
949
|
+
raise ValueError(
|
950
|
+
Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
|
951
|
+
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
952
|
+
return W, df
|
953
|
+
|
954
|
+
|
955
|
+
def predict_model_ssd(Input, model_name, model_path):
|
956
|
+
|
957
|
+
infopredict_model_ssd = """
|
958
|
+
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
959
|
+
|
960
|
+
Arguments:
|
961
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
962
|
+
model_name (str): Name of the model.
|
963
|
+
Returns:
|
964
|
+
ndarray: Output from the model.
|
965
|
+
"""
|
966
|
+
W, df = load_model(model_name, model_path)
|
889
967
|
|
890
|
-
|
891
|
-
|
892
|
-
|
893
|
-
|
894
|
-
|
895
|
-
if LogType == 'csv':
|
896
|
-
df = pd.read_csv(ModelPath + ModelName + '.' + LogType)
|
968
|
+
activation_potential = str(df['ACTIVATION POTENTIAL'].iloc[0])
|
969
|
+
|
970
|
+
if activation_potential != 'nan':
|
971
|
+
|
972
|
+
activation_potential = float(activation_potential)
|
897
973
|
|
974
|
+
else:
|
975
|
+
|
976
|
+
activation_potential = None
|
898
977
|
|
899
|
-
|
900
|
-
|
978
|
+
try:
|
979
|
+
|
980
|
+
scaler_params = df['STANDARD SCALER'].tolist()
|
981
|
+
|
982
|
+
|
983
|
+
scaler_params = [np.fromstring(arr.strip('[]'), sep=' ') for arr in scaler_params]
|
984
|
+
|
985
|
+
Input = standard_scaler(None, Input, scaler_params)
|
901
986
|
|
987
|
+
except:
|
902
988
|
|
903
|
-
|
904
|
-
|
905
|
-
except:
|
906
|
-
print(Fore.RED + "ERROR: Model Path error. Accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: LoadPLAN" + infoLoadPLAN + Style.RESET_ALL)
|
907
|
-
|
908
|
-
ModelName = str(df['MODEL NAME'].iloc[0])
|
909
|
-
Layers = df['LAYERS'].tolist()
|
910
|
-
LayerCount = int(df['LAYER COUNT'].iloc[0])
|
911
|
-
ClassCount = int(df['CLASS COUNT'].iloc[0])
|
912
|
-
MembranThresholds = df['MEMBRAN THRESHOLDS'].tolist()
|
913
|
-
MembranPotentials = df['MEMBRAN POTENTIALS'].tolist()
|
914
|
-
Normalizations = df['NORMALIZATION'].tolist()
|
915
|
-
Activations = df['ACTIVATIONS'].tolist()
|
916
|
-
NeuronCount = int(df['NEURON COUNT'].iloc[0])
|
917
|
-
SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
|
918
|
-
TestAcc = int(df['TEST ACCURACY'].iloc[0])
|
919
|
-
ModelType = str(df['MODEL TYPE'].iloc[0])
|
920
|
-
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
921
|
-
WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
|
922
|
-
ModelPath = str(df['MODEL PATH'].iloc[0])
|
923
|
-
|
924
|
-
W = [0] * LayerCount
|
925
|
-
|
926
|
-
if WeightType == 'txt':
|
927
|
-
for i in range(LayerCount):
|
928
|
-
W[i] = np.loadtxt(ModelPath + ModelName + str(i+1) + 'w.txt')
|
929
|
-
elif WeightType == 'npy':
|
930
|
-
for i in range(LayerCount):
|
931
|
-
W[i] = np.load(ModelPath + ModelName + str(i+1) + 'w.npy')
|
932
|
-
elif WeightType == 'mat':
|
933
|
-
for i in range(LayerCount):
|
934
|
-
W[i] = sio.loadmat(ModelPath + ModelName + str(i+1) + 'w.mat')
|
935
|
-
else:
|
936
|
-
raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: LoadPLAN." + infoLoadPLAN + Style.RESET_ALL)
|
937
|
-
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
938
|
-
return W,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations,df
|
939
|
-
|
940
|
-
def PredictFromDiscPLAN(Input,ModelName,ModelPath,LogType):
|
941
|
-
infoPredictFromDİscPLAN = """
|
942
|
-
Function to make a prediction using a divided pruning deep learning neural network (PLAN).
|
989
|
+
non_scaled = True
|
990
|
+
|
943
991
|
|
944
|
-
|
945
|
-
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
946
|
-
ModelName (str): Name of the model.
|
947
|
-
ModelPath (str): Path where the model is saved.
|
948
|
-
LogType (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
992
|
+
layers = ['fex', 'cat']
|
949
993
|
|
950
|
-
Returns:
|
951
|
-
ndarray: Output from the model.
|
952
|
-
"""
|
953
|
-
W,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations = LoadPLAN(ModelName,ModelPath,
|
954
|
-
LogType)[0:6]
|
955
994
|
Wc = [0] * len(W)
|
956
995
|
for i, w in enumerate(W):
|
957
996
|
Wc[i] = np.copy(w)
|
958
997
|
try:
|
959
|
-
|
960
|
-
|
961
|
-
|
962
|
-
for index, Layer in enumerate(
|
963
|
-
|
964
|
-
|
965
|
-
|
966
|
-
|
967
|
-
|
968
|
-
|
969
|
-
|
970
|
-
NeuralLayer = Softmax(NeuralLayer)
|
971
|
-
|
972
|
-
if Layers[index] == 'fex':
|
973
|
-
NeuralLayer,useless = Fex(NeuralLayer, W[index],
|
974
|
-
MembranThresholds[index],
|
975
|
-
MembranPotentials[index])
|
976
|
-
if Layers[index] == 'cat':
|
977
|
-
NeuralLayer,useless = Cat(NeuralLayer, W[index],
|
978
|
-
MembranThresholds[index],
|
979
|
-
MembranPotentials[index],
|
980
|
-
0)
|
998
|
+
neural_layer = Input
|
999
|
+
neural_layer = np.array(neural_layer)
|
1000
|
+
neural_layer = neural_layer.ravel()
|
1001
|
+
for index, Layer in enumerate(layers):
|
1002
|
+
|
1003
|
+
neural_layer = normalization(neural_layer)
|
1004
|
+
|
1005
|
+
if Layer == 'fex':
|
1006
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
1007
|
+
elif Layer == 'cat':
|
1008
|
+
neural_layer = np.dot(W[index], neural_layer)
|
981
1009
|
except:
|
982
|
-
|
983
|
-
|
1010
|
+
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
|
1011
|
+
infopredict_model_ssd + Style.RESET_ALL)
|
1012
|
+
return 'e'
|
984
1013
|
for i, w in enumerate(Wc):
|
985
1014
|
W[i] = np.copy(w)
|
986
|
-
return
|
1015
|
+
return neural_layer
|
1016
|
+
|
987
1017
|
|
1018
|
+
def predict_model_ram(Input, scaler_params, W, activation_potential=None):
|
988
1019
|
|
989
|
-
|
990
|
-
|
991
|
-
Function to make a prediction using a pruning learning artificial neural network (PLAN)
|
1020
|
+
infopredict_model_ram = """
|
1021
|
+
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
992
1022
|
from weights and parameters stored in memory.
|
993
1023
|
|
994
1024
|
Arguments:
|
995
1025
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
996
|
-
|
997
|
-
MembranThresholds (list): MEMBRAN THRESHOLDS.
|
998
|
-
MembranPotentials (list): MEMBRAN POTENTIALS.
|
999
|
-
DoNormalization (str): Whether to normalize ('y' or 'n').
|
1000
|
-
Activations (list): Activation functions for each layer.
|
1026
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
1001
1027
|
W (list of ndarrays): Weights of the model.
|
1028
|
+
activation_potential (float or None): Threshold value for comparison. (optional)
|
1002
1029
|
|
1003
1030
|
Returns:
|
1004
1031
|
ndarray: Output from the model.
|
1005
1032
|
"""
|
1006
1033
|
|
1034
|
+
if scaler_params != None:
|
1035
|
+
|
1036
|
+
Input = standard_scaler(None, Input, scaler_params)
|
1037
|
+
|
1038
|
+
layers = ['fex', 'cat']
|
1039
|
+
|
1007
1040
|
Wc = [0] * len(W)
|
1008
1041
|
for i, w in enumerate(W):
|
1009
1042
|
Wc[i] = np.copy(w)
|
1010
1043
|
try:
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1014
|
-
for index, Layer in enumerate(
|
1015
|
-
|
1016
|
-
|
1017
|
-
|
1018
|
-
|
1019
|
-
|
1020
|
-
|
1021
|
-
|
1022
|
-
|
1023
|
-
|
1024
|
-
if Layers[index] == 'fex':
|
1025
|
-
NeuralLayer,useless = Fex(NeuralLayer, W[index],
|
1026
|
-
MembranThresholds[index],
|
1027
|
-
MembranPotentials[index])
|
1028
|
-
if Layers[index] == 'cat':
|
1029
|
-
NeuralLayer,useless = Cat(NeuralLayer, W[index],
|
1030
|
-
MembranThresholds[index],
|
1031
|
-
MembranPotentials[index],0)
|
1044
|
+
neural_layer = Input
|
1045
|
+
neural_layer = np.array(neural_layer)
|
1046
|
+
neural_layer = neural_layer.ravel()
|
1047
|
+
for index, Layer in enumerate(layers):
|
1048
|
+
|
1049
|
+
neural_layer = normalization(neural_layer)
|
1050
|
+
|
1051
|
+
if Layer == 'fex':
|
1052
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
|
1053
|
+
elif Layer == 'cat':
|
1054
|
+
neural_layer = np.dot(W[index], neural_layer)
|
1055
|
+
|
1032
1056
|
except:
|
1033
|
-
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from:
|
1057
|
+
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
|
1058
|
+
infopredict_model_ram + Style.RESET_ALL)
|
1034
1059
|
return 'e'
|
1035
1060
|
for i, w in enumerate(Wc):
|
1036
1061
|
W[i] = np.copy(w)
|
1037
|
-
return
|
1038
|
-
|
1062
|
+
return neural_layer
|
1063
|
+
|
1064
|
+
|
1065
|
+
def auto_balancer(x_train, y_train):
|
1039
1066
|
|
1040
|
-
|
1041
|
-
infoAutoBalancer = """
|
1067
|
+
infoauto_balancer = """
|
1042
1068
|
Function to balance the training data across different classes.
|
1043
1069
|
|
1044
1070
|
Arguments:
|
1045
|
-
|
1046
|
-
|
1047
|
-
ClassCount (int): Number of classes.
|
1071
|
+
x_train (list): Input data for training.
|
1072
|
+
y_train (list): Labels corresponding to the input data.
|
1048
1073
|
|
1049
1074
|
Returns:
|
1050
1075
|
tuple: A tuple containing balanced input data and labels.
|
1051
1076
|
"""
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1056
|
-
|
1057
|
-
|
1058
|
-
|
1059
|
-
|
1060
|
-
|
1061
|
-
|
1077
|
+
classes = np.arange(y_train.shape[1])
|
1078
|
+
class_count = len(classes)
|
1079
|
+
|
1080
|
+
try:
|
1081
|
+
ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[
|
1082
|
+
0] for i in range(class_count)}
|
1083
|
+
classes = [len(ClassIndices[i]) for i in range(class_count)]
|
1084
|
+
|
1085
|
+
if len(set(classes)) == 1:
|
1086
|
+
print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
|
1087
|
+
return x_train, y_train
|
1088
|
+
|
1089
|
+
MinCount = min(classes)
|
1090
|
+
|
1062
1091
|
BalancedIndices = []
|
1063
|
-
for i in range(
|
1092
|
+
for i in range(class_count):
|
1064
1093
|
if len(ClassIndices[i]) > MinCount:
|
1065
|
-
SelectedIndices = np.random.choice(
|
1094
|
+
SelectedIndices = np.random.choice(
|
1095
|
+
ClassIndices[i], MinCount, replace=False)
|
1066
1096
|
else:
|
1067
1097
|
SelectedIndices = ClassIndices[i]
|
1068
1098
|
BalancedIndices.extend(SelectedIndices)
|
1099
|
+
|
1100
|
+
BalancedInputs = [x_train[idx] for idx in BalancedIndices]
|
1101
|
+
BalancedLabels = [y_train[idx] for idx in BalancedIndices]
|
1102
|
+
|
1103
|
+
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)
|
1104
|
+
) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
|
1105
|
+
except:
|
1106
|
+
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
|
1107
|
+
return 'e'
|
1108
|
+
|
1109
|
+
return BalancedInputs, BalancedLabels
|
1110
|
+
|
1111
|
+
|
1112
|
+
def synthetic_augmentation(x_train, y_train):
|
1113
|
+
"""
|
1114
|
+
Generates synthetic examples to balance classes with fewer examples.
|
1115
|
+
|
1116
|
+
Arguments:
|
1117
|
+
x -- Input dataset (examples) - list format
|
1118
|
+
y -- Class labels (one-hot encoded) - list format
|
1119
|
+
|
1120
|
+
Returns:
|
1121
|
+
x_balanced -- Balanced input dataset (list format)
|
1122
|
+
y_balanced -- Balanced class labels (one-hot encoded, list format)
|
1123
|
+
"""
|
1124
|
+
x = x_train
|
1125
|
+
y = y_train
|
1126
|
+
classes = np.arange(y_train.shape[1])
|
1127
|
+
class_count = len(classes)
|
1128
|
+
|
1129
|
+
# Calculate class distribution
|
1130
|
+
class_distribution = {i: 0 for i in range(class_count)}
|
1131
|
+
for label in y:
|
1132
|
+
class_distribution[np.argmax(label)] += 1
|
1133
|
+
|
1134
|
+
max_class_count = max(class_distribution.values())
|
1135
|
+
|
1136
|
+
x_balanced = list(x)
|
1137
|
+
y_balanced = list(y)
|
1138
|
+
|
1139
|
+
for class_label in range(class_count):
|
1140
|
+
class_indices = [i for i, label in enumerate(
|
1141
|
+
y) if np.argmax(label) == class_label]
|
1142
|
+
num_samples = len(class_indices)
|
1143
|
+
|
1144
|
+
if num_samples < max_class_count:
|
1145
|
+
while num_samples < max_class_count:
|
1146
|
+
|
1147
|
+
random_indices = np.random.choice(
|
1148
|
+
class_indices, 2, replace=False)
|
1149
|
+
sample1 = x[random_indices[0]]
|
1150
|
+
sample2 = x[random_indices[1]]
|
1151
|
+
|
1152
|
+
synthetic_sample = sample1 + \
|
1153
|
+
(np.array(sample2) - np.array(sample1)) * np.random.rand()
|
1154
|
+
|
1155
|
+
x_balanced.append(synthetic_sample.tolist())
|
1156
|
+
y_balanced.append(y[class_indices[0]])
|
1157
|
+
|
1158
|
+
num_samples += 1
|
1159
|
+
|
1160
|
+
return np.array(x_balanced), np.array(y_balanced)
|
1161
|
+
|
1162
|
+
|
1163
|
+
def standard_scaler(x_train, x_test, scaler_params=None):
|
1164
|
+
info_standard_scaler = """
|
1165
|
+
Standardizes training and test datasets. x_test may be None.
|
1166
|
+
|
1167
|
+
Args:
|
1168
|
+
train_data: numpy.ndarray
|
1169
|
+
Training data
|
1170
|
+
test_data: numpy.ndarray
|
1171
|
+
Test data
|
1172
|
+
|
1173
|
+
Returns:
|
1174
|
+
list:
|
1175
|
+
Scaler parameters: mean and std
|
1176
|
+
tuple
|
1177
|
+
Standardized training and test datasets
|
1178
|
+
"""
|
1179
|
+
try:
|
1180
|
+
|
1181
|
+
if scaler_params == None and x_test != None:
|
1182
|
+
|
1183
|
+
mean = np.mean(x_train, axis=0)
|
1184
|
+
std = np.std(x_train, axis=0)
|
1185
|
+
train_data_scaled = (x_train - mean) / std
|
1186
|
+
test_data_scaled = (x_test - mean) / std
|
1187
|
+
|
1188
|
+
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1189
|
+
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1190
|
+
|
1191
|
+
scaler_params = [mean, std]
|
1192
|
+
|
1193
|
+
return scaler_params, train_data_scaled, test_data_scaled
|
1194
|
+
|
1195
|
+
if scaler_params == None and x_test == None:
|
1196
|
+
|
1197
|
+
mean = np.mean(x_train, axis=0)
|
1198
|
+
std = np.std(x_train, axis=0)
|
1199
|
+
train_data_scaled = (x_train - mean) / std
|
1200
|
+
|
1201
|
+
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1202
|
+
|
1203
|
+
scaler_params = [mean, std]
|
1204
|
+
|
1205
|
+
return scaler_params, train_data_scaled
|
1206
|
+
|
1207
|
+
if scaler_params != None:
|
1208
|
+
|
1209
|
+
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
1210
|
+
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1211
|
+
|
1212
|
+
return test_data_scaled
|
1213
|
+
|
1214
|
+
except:
|
1215
|
+
print(
|
1216
|
+
Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
|
1217
|
+
|
1218
|
+
|
1219
|
+
def encode_one_hot(y_train, y_test):
|
1220
|
+
info_one_hot_encode = """
|
1221
|
+
Performs one-hot encoding on y_train and y_test data..
|
1222
|
+
|
1223
|
+
Args:
|
1224
|
+
y_train (numpy.ndarray): Eğitim etiketi verisi.
|
1225
|
+
y_test (numpy.ndarray): Test etiketi verisi.
|
1226
|
+
|
1227
|
+
Returns:
|
1228
|
+
tuple: One-hot encoded y_train ve y_test verileri.
|
1229
|
+
"""
|
1230
|
+
try:
|
1231
|
+
classes = np.unique(y_train)
|
1232
|
+
class_count = len(classes)
|
1233
|
+
|
1234
|
+
class_to_index = {cls: idx for idx, cls in enumerate(classes)}
|
1235
|
+
|
1236
|
+
y_train_encoded = np.zeros((y_train.shape[0], class_count))
|
1237
|
+
for i, label in enumerate(y_train):
|
1238
|
+
y_train_encoded[i, class_to_index[label]] = 1
|
1239
|
+
|
1240
|
+
y_test_encoded = np.zeros((y_test.shape[0], class_count))
|
1241
|
+
for i, label in enumerate(y_test):
|
1242
|
+
y_test_encoded[i, class_to_index[label]] = 1
|
1243
|
+
except:
|
1244
|
+
print(Fore.RED + 'ERROR: y_train and y_test must be numpy array. from: one_hot_encode' + info_one_hot_encode)
|
1245
|
+
|
1246
|
+
return y_train_encoded, y_test_encoded
|
1247
|
+
|
1248
|
+
|
1249
|
+
def split(X, y, test_size, random_state):
|
1250
|
+
"""
|
1251
|
+
Splits the given X (features) and y (labels) data into training and testing subsets.
|
1252
|
+
|
1253
|
+
Args:
|
1254
|
+
X (numpy.ndarray): Features data.
|
1255
|
+
y (numpy.ndarray): Labels data.
|
1256
|
+
test_size (float or int): Proportion or number of samples for the test subset.
|
1257
|
+
random_state (int or None): Seed for random state.
|
1258
|
+
|
1259
|
+
Returns:
|
1260
|
+
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
1261
|
+
"""
|
1262
|
+
# Size of the dataset
|
1263
|
+
num_samples = X.shape[0]
|
1264
|
+
|
1265
|
+
if isinstance(test_size, float):
|
1266
|
+
test_size = int(test_size * num_samples)
|
1267
|
+
elif isinstance(test_size, int):
|
1268
|
+
if test_size > num_samples:
|
1269
|
+
raise ValueError(
|
1270
|
+
"test_size cannot be larger than the number of samples.")
|
1271
|
+
else:
|
1272
|
+
raise ValueError("test_size should be float or int.")
|
1273
|
+
|
1274
|
+
if random_state is not None:
|
1275
|
+
np.random.seed(random_state)
|
1276
|
+
|
1277
|
+
indices = np.arange(num_samples)
|
1278
|
+
np.random.shuffle(indices)
|
1279
|
+
|
1280
|
+
test_indices = indices[:test_size]
|
1281
|
+
train_indices = indices[test_size:]
|
1282
|
+
|
1283
|
+
x_train, x_test = X[train_indices], X[test_indices]
|
1284
|
+
y_train, y_test = y[train_indices], y[test_indices]
|
1285
|
+
|
1286
|
+
return x_train, x_test, y_train, y_test
|
1287
|
+
|
1288
|
+
|
1289
|
+
def metrics(y_ts, test_preds, average='weighted'):
|
1290
|
+
"""
|
1291
|
+
Calculates precision, recall and F1 score for a classification task.
|
1292
|
+
|
1293
|
+
Args:
|
1294
|
+
y_ts (list or numpy.ndarray): True labels.
|
1295
|
+
test_preds (list or numpy.ndarray): Predicted labels.
|
1296
|
+
average (str): Type of averaging ('micro', 'macro', 'weighted').
|
1297
|
+
|
1298
|
+
Returns:
|
1299
|
+
tuple: Precision, recall, F1 score.
|
1300
|
+
"""
|
1301
|
+
y_test_d = decode_one_hot(y_ts)
|
1302
|
+
y_test_d = np.array(y_test_d)
|
1303
|
+
y_pred = np.array(test_preds)
|
1304
|
+
|
1305
|
+
if y_test_d.ndim > 1:
|
1306
|
+
y_test_d = y_test_d.reshape(-1)
|
1307
|
+
if y_pred.ndim > 1:
|
1308
|
+
y_pred = y_pred.reshape(-1)
|
1309
|
+
|
1310
|
+
tp = {}
|
1311
|
+
fp = {}
|
1312
|
+
fn = {}
|
1313
|
+
|
1314
|
+
classes = np.unique(np.concatenate((y_test_d, y_pred)))
|
1315
|
+
|
1316
|
+
for c in classes:
|
1317
|
+
tp[c] = 0
|
1318
|
+
fp[c] = 0
|
1319
|
+
fn[c] = 0
|
1320
|
+
|
1321
|
+
for c in classes:
|
1322
|
+
for true, pred in zip(y_test_d, y_pred):
|
1323
|
+
if true == c and pred == c:
|
1324
|
+
tp[c] += 1
|
1325
|
+
elif true != c and pred == c:
|
1326
|
+
fp[c] += 1
|
1327
|
+
elif true == c and pred != c:
|
1328
|
+
fn[c] += 1
|
1329
|
+
|
1330
|
+
precision = {}
|
1331
|
+
recall = {}
|
1332
|
+
f1 = {}
|
1333
|
+
|
1334
|
+
for c in classes:
|
1335
|
+
precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
|
1336
|
+
recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
|
1337
|
+
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
|
1338
|
+
|
1339
|
+
if average == 'micro':
|
1340
|
+
precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
|
1341
|
+
recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
|
1342
|
+
f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
|
1343
|
+
|
1344
|
+
elif average == 'macro':
|
1345
|
+
precision_val = np.mean(list(precision.values()))
|
1346
|
+
recall_val = np.mean(list(recall.values()))
|
1347
|
+
f1_val = np.mean(list(f1.values()))
|
1348
|
+
|
1349
|
+
elif average == 'weighted':
|
1350
|
+
weights = np.array([np.sum(y_test_d == c) for c in classes])
|
1351
|
+
weights = weights / np.sum(weights)
|
1352
|
+
precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
|
1353
|
+
recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
|
1354
|
+
f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
|
1355
|
+
|
1356
|
+
else:
|
1357
|
+
raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
|
1358
|
+
|
1359
|
+
return precision_val, recall_val, f1_val
|
1360
|
+
|
1361
|
+
|
1362
|
+
def decode_one_hot(encoded_data):
|
1363
|
+
"""
|
1364
|
+
Decodes one-hot encoded data to original categorical labels.
|
1365
|
+
|
1366
|
+
Args:
|
1367
|
+
encoded_data (numpy.ndarray): One-hot encoded data with shape (n_samples, n_classes).
|
1368
|
+
|
1369
|
+
Returns:
|
1370
|
+
numpy.ndarray: Decoded categorical labels with shape (n_samples,).
|
1371
|
+
"""
|
1372
|
+
|
1373
|
+
decoded_labels = np.argmax(encoded_data, axis=1)
|
1374
|
+
|
1375
|
+
return decoded_labels
|
1376
|
+
|
1377
|
+
|
1378
|
+
def roc_curve(y_true, y_score):
|
1379
|
+
"""
|
1380
|
+
Compute Receiver Operating Characteristic (ROC) curve.
|
1381
|
+
|
1382
|
+
Parameters:
|
1383
|
+
y_true : array, shape = [n_samples]
|
1384
|
+
True binary labels in range {0, 1} or {-1, 1}.
|
1385
|
+
y_score : array, shape = [n_samples]
|
1386
|
+
Target scores, can either be probability estimates of the positive class,
|
1387
|
+
confidence values, or non-thresholded measure of decisions (as returned
|
1388
|
+
by decision_function on some classifiers).
|
1389
|
+
|
1390
|
+
Returns:
|
1391
|
+
fpr : array, shape = [n]
|
1392
|
+
Increasing false positive rates such that element i is the false positive rate
|
1393
|
+
of predictions with score >= thresholds[i].
|
1394
|
+
tpr : array, shape = [n]
|
1395
|
+
Increasing true positive rates such that element i is the true positive rate
|
1396
|
+
of predictions with score >= thresholds[i].
|
1397
|
+
thresholds : array, shape = [n]
|
1398
|
+
Decreasing thresholds on the decision function used to compute fpr and tpr.
|
1399
|
+
"""
|
1400
|
+
|
1401
|
+
y_true = np.asarray(y_true)
|
1402
|
+
y_score = np.asarray(y_score)
|
1403
|
+
|
1404
|
+
if len(np.unique(y_true)) != 2:
|
1405
|
+
raise ValueError("Only binary classification is supported.")
|
1406
|
+
|
1407
|
+
|
1408
|
+
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
|
1409
|
+
y_score = y_score[desc_score_indices]
|
1410
|
+
y_true = y_true[desc_score_indices]
|
1411
|
+
|
1412
|
+
|
1413
|
+
fpr = []
|
1414
|
+
tpr = []
|
1415
|
+
thresholds = []
|
1416
|
+
n_pos = np.sum(y_true)
|
1417
|
+
n_neg = len(y_true) - n_pos
|
1418
|
+
|
1419
|
+
tp = 0
|
1420
|
+
fp = 0
|
1421
|
+
prev_score = None
|
1422
|
+
|
1423
|
+
|
1424
|
+
for i, score in enumerate(y_score):
|
1425
|
+
if score != prev_score:
|
1426
|
+
fpr.append(fp / n_neg)
|
1427
|
+
tpr.append(tp / n_pos)
|
1428
|
+
thresholds.append(score)
|
1429
|
+
prev_score = score
|
1430
|
+
|
1431
|
+
if y_true[i] == 1:
|
1432
|
+
tp += 1
|
1433
|
+
else:
|
1434
|
+
fp += 1
|
1435
|
+
|
1436
|
+
fpr.append(fp / n_neg)
|
1437
|
+
tpr.append(tp / n_pos)
|
1438
|
+
thresholds.append(score)
|
1439
|
+
|
1440
|
+
return np.array(fpr), np.array(tpr), np.array(thresholds)
|
1441
|
+
|
1442
|
+
|
1443
|
+
def confusion_matrix(y_true, y_pred, class_count):
|
1444
|
+
"""
|
1445
|
+
Computes confusion matrix.
|
1446
|
+
|
1447
|
+
Args:
|
1448
|
+
y_true (numpy.ndarray): True class labels (1D array).
|
1449
|
+
y_pred (numpy.ndarray): Predicted class labels (1D array).
|
1450
|
+
num_classes (int): Number of classes.
|
1451
|
+
|
1452
|
+
Returns:
|
1453
|
+
numpy.ndarray: Confusion matrix of shape (num_classes, num_classes).
|
1454
|
+
"""
|
1455
|
+
confusion = np.zeros((class_count, class_count), dtype=int)
|
1456
|
+
|
1457
|
+
for i in range(len(y_true)):
|
1458
|
+
true_label = y_true[i]
|
1459
|
+
pred_label = y_pred[i]
|
1460
|
+
confusion[true_label, pred_label] += 1
|
1461
|
+
|
1462
|
+
return confusion
|
1463
|
+
|
1464
|
+
|
1465
|
+
def plot_evaluate(y_test, y_preds, acc_list):
|
1466
|
+
|
1467
|
+
acc = acc_list[len(acc_list) - 1]
|
1468
|
+
y_true = decode_one_hot(y_test)
|
1469
|
+
|
1470
|
+
y_true = np.array(y_true)
|
1471
|
+
y_preds = np.array(y_preds)
|
1472
|
+
Class = np.unique(decode_one_hot(y_test))
|
1473
|
+
|
1474
|
+
precision, recall, f1 = metrics(y_test, y_preds)
|
1475
|
+
|
1476
|
+
|
1477
|
+
# Confusion matrix
|
1478
|
+
cm = confusion_matrix(y_true, y_preds, len(Class))
|
1479
|
+
# Subplot içinde düzenleme
|
1480
|
+
fig, axs = plt.subplots(2, 2, figsize=(16, 12))
|
1481
|
+
|
1482
|
+
# Confusion Matrix
|
1483
|
+
sns.heatmap(cm, annot=True, fmt='d', ax=axs[0, 0])
|
1484
|
+
axs[0, 0].set_title("Confusion Matrix")
|
1485
|
+
axs[0, 0].set_xlabel("Predicted Class")
|
1486
|
+
axs[0, 0].set_ylabel("Actual Class")
|
1487
|
+
|
1488
|
+
if len(Class) == 2:
|
1489
|
+
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
1490
|
+
# ROC Curve
|
1491
|
+
roc_auc = np.trapz(tpr, fpr)
|
1492
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1493
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1494
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1495
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1496
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1497
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1498
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1499
|
+
axs[1, 0].legend(loc="lower right")
|
1500
|
+
axs[1, 0].legend(loc="lower right")
|
1501
|
+
else:
|
1502
|
+
|
1503
|
+
for i in range(len(Class)):
|
1504
|
+
|
1505
|
+
y_true_copy = np.copy(y_true)
|
1506
|
+
y_preds_copy = np.copy(y_preds)
|
1069
1507
|
|
1070
|
-
|
1071
|
-
|
1508
|
+
y_true_copy[y_true_copy == i] = 0
|
1509
|
+
y_true_copy[y_true_copy != 0] = 1
|
1510
|
+
|
1511
|
+
y_preds_copy[y_preds_copy == i] = 0
|
1512
|
+
y_preds_copy[y_preds_copy != 0] = 1
|
1513
|
+
|
1514
|
+
|
1515
|
+
fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
|
1516
|
+
|
1517
|
+
roc_auc = np.trapz(tpr, fpr)
|
1518
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1519
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1520
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1521
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1522
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1523
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1524
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1525
|
+
axs[1, 0].legend(loc="lower right")
|
1526
|
+
axs[1, 0].legend(loc="lower right")
|
1072
1527
|
|
1073
|
-
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(TrainInputs)) + " to: " + str(len(BalancedInputs)) + ". from: AutoBalancer " + Style.RESET_ALL)
|
1074
|
-
except:
|
1075
|
-
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoAutoBalancer)
|
1076
|
-
return 'e'
|
1077
1528
|
|
1078
|
-
|
1079
|
-
|
1080
|
-
|
1081
|
-
def GetWeights():
|
1529
|
+
"""
|
1530
|
+
accuracy_per_class = []
|
1082
1531
|
|
1083
|
-
|
1084
|
-
|
1085
|
-
|
1532
|
+
for cls in Class:
|
1533
|
+
correct = np.sum((y_true == cls) & (y_preds == cls))
|
1534
|
+
total = np.sum(y_true == cls)
|
1535
|
+
accuracy_cls = correct / total if total > 0 else 0.0
|
1536
|
+
accuracy_per_class.append(accuracy_cls)
|
1086
1537
|
|
1087
|
-
|
1538
|
+
axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
|
1539
|
+
axs[2, 0].set_xlabel('Class')
|
1540
|
+
axs[2, 0].set_ylabel('Accuracy')
|
1541
|
+
axs[2, 0].set_title('Class-wise Accuracy')
|
1542
|
+
axs[2, 0].set_xticks(Class)
|
1543
|
+
axs[2, 0].grid(True)
|
1544
|
+
"""
|
1545
|
+
|
1546
|
+
|
1547
|
+
|
1548
|
+
|
1549
|
+
# Precision, Recall, F1 Score, Accuracy
|
1550
|
+
metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
|
1551
|
+
values = [precision, recall, f1, acc]
|
1552
|
+
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
|
1553
|
+
|
1554
|
+
#
|
1555
|
+
bars = axs[0, 1].bar(metric, values, color=colors)
|
1556
|
+
|
1557
|
+
|
1558
|
+
for bar, value in zip(bars, values):
|
1559
|
+
axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
|
1560
|
+
ha='center', va='bottom', fontsize=12, color='white', weight='bold')
|
1561
|
+
|
1562
|
+
axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
|
1563
|
+
axs[0, 1].set_xlabel('Metrics')
|
1564
|
+
axs[0, 1].set_ylabel('Score')
|
1565
|
+
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Weighted)')
|
1566
|
+
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
1567
|
+
|
1568
|
+
# Accuracy
|
1569
|
+
plt.plot(acc_list, marker='o', linestyle='-',
|
1570
|
+
color='r', label='Accuracy')
|
1571
|
+
|
1572
|
+
|
1573
|
+
plt.axhline(y=1, color='g', linestyle='--', label='Maximum Accuracy')
|
1088
1574
|
|
1089
|
-
def GetPreds():
|
1090
|
-
|
1091
|
-
return 1
|
1092
1575
|
|
1093
|
-
|
1576
|
+
plt.xlabel('Samples')
|
1577
|
+
plt.ylabel('Accuracy')
|
1578
|
+
plt.title('Accuracy History')
|
1579
|
+
plt.legend()
|
1580
|
+
|
1581
|
+
|
1582
|
+
plt.tight_layout()
|
1583
|
+
plt.show()
|
1584
|
+
|
1585
|
+
def manuel_balancer(x_train, y_train, target_samples_per_class):
|
1586
|
+
"""
|
1587
|
+
Generates synthetic examples to balance classes to the specified number of examples per class.
|
1588
|
+
|
1589
|
+
Arguments:
|
1590
|
+
x_train -- Input dataset (examples) - NumPy array format
|
1591
|
+
y_train -- Class labels (one-hot encoded) - NumPy array format
|
1592
|
+
target_samples_per_class -- Desired number of samples per class
|
1593
|
+
|
1594
|
+
Returns:
|
1595
|
+
x_balanced -- Balanced input dataset (NumPy array format)
|
1596
|
+
y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
|
1597
|
+
"""
|
1598
|
+
start_time = time.time()
|
1599
|
+
x_train = np.array(x_train)
|
1600
|
+
y_train = np.array(y_train)
|
1601
|
+
classes = np.arange(y_train.shape[1])
|
1602
|
+
class_count = len(classes)
|
1603
|
+
|
1604
|
+
x_balanced = []
|
1605
|
+
y_balanced = []
|
1606
|
+
|
1607
|
+
for class_label in range(class_count):
|
1608
|
+
class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
|
1609
|
+
num_samples = len(class_indices)
|
1094
1610
|
|
1611
|
+
if num_samples > target_samples_per_class:
|
1612
|
+
|
1613
|
+
selected_indices = np.random.choice(class_indices, target_samples_per_class, replace=False)
|
1614
|
+
x_balanced.append(x_train[selected_indices])
|
1615
|
+
y_balanced.append(y_train[selected_indices])
|
1616
|
+
|
1617
|
+
else:
|
1618
|
+
|
1619
|
+
x_balanced.append(x_train[class_indices])
|
1620
|
+
y_balanced.append(y_train[class_indices])
|
1621
|
+
|
1622
|
+
if num_samples < target_samples_per_class:
|
1623
|
+
|
1624
|
+
samples_to_add = target_samples_per_class - num_samples
|
1625
|
+
additional_samples = np.zeros((samples_to_add, x_train.shape[1]))
|
1626
|
+
additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
|
1627
|
+
|
1628
|
+
for i in range(samples_to_add):
|
1629
|
+
uni_start_time = time.time()
|
1630
|
+
|
1631
|
+
random_indices = np.random.choice(class_indices, 2, replace=False)
|
1632
|
+
sample1 = x_train[random_indices[0]]
|
1633
|
+
sample2 = x_train[random_indices[1]]
|
1634
|
+
|
1635
|
+
|
1636
|
+
synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
|
1637
|
+
|
1638
|
+
additional_samples[i] = synthetic_sample
|
1639
|
+
additional_labels[i] = y_train[class_indices[0]]
|
1640
|
+
|
1641
|
+
uni_end_time = time.time()
|
1642
|
+
calculating_est = round(
|
1643
|
+
(uni_end_time - uni_start_time) * (samples_to_add - i), 3)
|
1644
|
+
|
1645
|
+
if calculating_est < 60:
|
1646
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
1647
|
+
|
1648
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1649
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
1650
|
+
|
1651
|
+
elif calculating_est > 3600:
|
1652
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
1653
|
+
|
1654
|
+
print('Augmenting: ', class_label, '/', class_count, '...', i, "/", samples_to_add, "\n", end="")
|
1655
|
+
|
1656
|
+
x_balanced.append(additional_samples)
|
1657
|
+
y_balanced.append(additional_labels)
|
1658
|
+
|
1659
|
+
|
1660
|
+
EndTime = time.time()
|
1661
|
+
|
1662
|
+
calculating_est = round(EndTime - start_time, 2)
|
1663
|
+
|
1664
|
+
print(Fore.GREEN + " \nBalancing Finished with 0 ERROR\n" + Style.RESET_ALL)
|
1665
|
+
|
1666
|
+
if calculating_est < 60:
|
1667
|
+
print('Total balancing time(sec): ', calculating_est)
|
1668
|
+
|
1669
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1670
|
+
print('Total balancing time(min): ', calculating_est/60)
|
1671
|
+
|
1672
|
+
elif calculating_est > 3600:
|
1673
|
+
print('Total balancing time(h): ', calculating_est/3600)
|
1674
|
+
|
1675
|
+
# Stack the balanced arrays
|
1676
|
+
x_balanced = np.vstack(x_balanced)
|
1677
|
+
y_balanced = np.vstack(y_balanced)
|
1678
|
+
|
1679
|
+
return x_balanced, y_balanced
|
1680
|
+
|
1681
|
+
def get_weights():
|
1682
|
+
|
1683
|
+
return 0
|
1684
|
+
|
1685
|
+
|
1686
|
+
def get_df():
|
1687
|
+
|
1688
|
+
return 2
|
1689
|
+
|
1690
|
+
|
1691
|
+
def get_preds():
|
1692
|
+
|
1693
|
+
return 1
|
1694
|
+
|
1695
|
+
|
1696
|
+
def get_acc():
|
1697
|
+
|
1095
1698
|
return 2
|