pyerualjetwork 1.2.9__py3-none-any.whl → 2.6.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan/__init__.py +1 -1
- plan/plan.py +1334 -815
- pyerualjetwork-2.6.6.dist-info/METADATA +8 -0
- pyerualjetwork-2.6.6.dist-info/RECORD +6 -0
- pyerualjetwork-1.2.9.dist-info/METADATA +0 -17
- pyerualjetwork-1.2.9.dist-info/RECORD +0 -6
- {pyerualjetwork-1.2.9.dist-info → pyerualjetwork-2.6.6.dist-info}/WHEEL +0 -0
- {pyerualjetwork-1.2.9.dist-info → pyerualjetwork-2.6.6.dist-info}/top_level.txt +0 -0
plan/plan.py
CHANGED
@@ -1,1095 +1,1614 @@
|
|
1
|
+
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
"""
|
4
|
+
Created on Tue Jun 18 23:32:16 2024
|
5
|
+
|
6
|
+
@author: hasan
|
7
|
+
"""
|
8
|
+
|
9
|
+
import pandas as pd
|
1
10
|
import numpy as np
|
2
11
|
import time
|
3
|
-
from colorama import Fore,Style
|
12
|
+
from colorama import Fore, Style
|
4
13
|
from typing import List, Union
|
5
14
|
import math
|
6
15
|
from scipy.special import expit, softmax
|
7
16
|
import matplotlib.pyplot as plt
|
8
17
|
import seaborn as sns
|
18
|
+
from tqdm import tqdm
|
9
19
|
|
10
20
|
# BUILD -----
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
21
|
+
|
22
|
+
|
23
|
+
def fit(
|
24
|
+
x_train: List[Union[int, float]],
|
25
|
+
y_train: List[Union[int, float]], # At least two.. and one hot encoded
|
26
|
+
show_training,
|
27
|
+
show_count= None,
|
28
|
+
val= None,
|
29
|
+
x_val= None,
|
30
|
+
y_val= None,
|
31
|
+
activation_potentiation=None # (float): Input activation_potentiation (optional)
|
22
32
|
) -> str:
|
23
|
-
|
33
|
+
|
24
34
|
infoPLAN = """
|
25
35
|
Creates and configures a PLAN model.
|
26
36
|
|
27
37
|
Args:
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
Visualize (str): Visualize Training procces or not visualize ('y' or 'n')
|
38
|
-
|
38
|
+
x_train (list[num]): List of input data.
|
39
|
+
y_train (list[num]): List of target labels. (one hot encoded)
|
40
|
+
show_training (bool, str): True, None or'final'
|
41
|
+
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
42
|
+
val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None
|
43
|
+
val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
|
44
|
+
x_val (list[num]): List of validation data. (optional) Default: x_train
|
45
|
+
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
|
46
|
+
activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
|
39
47
|
Returns:
|
40
|
-
list([num]): (Weight matrices list,
|
48
|
+
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
41
49
|
error handled ?: Process status ('e')
|
42
50
|
"""
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
LastNeuron = Neurons[-1:][0]
|
50
|
-
if LastNeuron != ClassCount:
|
51
|
-
print(Fore.RED + "ERROR108: Last layer of neuron count must be equal class count. from: TrainPLAN",infoPLAN)
|
52
|
-
return 'e'
|
53
|
-
|
54
|
-
if len(Normalizations) != len(MembranPotentials):
|
55
|
-
|
56
|
-
print(Fore.RED + "ERROR307: Normalization list length must be equal to length of MembranThresholds List,MembranPotentials List,Layers List,Neurons List. from: TrainPLAN",infoPLAN)
|
57
|
-
return 'e'
|
58
|
-
|
59
|
-
if len(TrainInputs) != len(TrainLabels):
|
60
|
-
print(Fore.RED + "ERROR301: TrainInputs list and TrainLabels list must be same length.",infoPLAN)
|
51
|
+
|
52
|
+
|
53
|
+
if len(x_train) != len(y_train):
|
54
|
+
|
55
|
+
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
|
61
56
|
return 'e'
|
62
57
|
|
63
|
-
|
58
|
+
if x_val == None and y_val == None:
|
59
|
+
|
60
|
+
x_val = x_train
|
61
|
+
y_val = y_train
|
62
|
+
|
63
|
+
if val == True or val == 'final':
|
64
64
|
|
65
|
-
|
66
|
-
print(Fore.RED + "ERROR105: Normalization list must be 'y' or 'n'.",infoPLAN)
|
67
|
-
return 'e'
|
68
|
-
|
69
|
-
if MembranThresholds[i] == 'none':
|
70
|
-
print(Fore.MAGENTA + "WARNING102: We are advise to do not put 'none' Threshold sign. But some cases improves performance of the model from: TrainPLAN",infoPLAN + Style.RESET_ALL)
|
71
|
-
time.sleep(3)
|
72
|
-
|
73
|
-
if isinstance(Value, str):
|
74
|
-
print(Fore.RED + "ERROR201: MEMBRAN POTENTIALS must be numeric. from: TrainPLAN")
|
75
|
-
return 'e'
|
65
|
+
val_bar = tqdm(total=1, desc="Training / Validating Accuracy", ncols=120)
|
76
66
|
|
77
|
-
if isinstance(Neurons[i], str):
|
78
|
-
print(Fore.RED + "ERROR202: Neurons list must be numeric.")
|
79
|
-
return 'e'
|
80
|
-
|
81
|
-
if len(MembranThresholds) != len(MembranPotentials):
|
82
|
-
print(Fore.RED + "ERROR302: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
|
83
|
-
return 'e'
|
84
|
-
|
85
|
-
if len(Layers) != len(Neurons):
|
86
|
-
print(Fore.RED + "ERROR303: Layers list and Neurons list must same length. from: TrainPLAN",infoPLAN)
|
87
|
-
return 'e'
|
88
|
-
|
89
|
-
if len(MembranPotentials) != len(Layers) or len(MembranThresholds) != len(Layers):
|
90
|
-
print(Fore.RED + "ERROR306: MEMBRAN POTENTIALS and MEMBRAN THRESHOLDS lists length must be same Layers list length. from: TrainPLAN",infoPLAN)
|
91
|
-
return 'e'
|
92
|
-
|
93
|
-
|
94
|
-
for Activation in Activations:
|
95
|
-
if Activation != 'softmax' and Activation != 'sigmoid' and Activation != 'relu' and Activation != 'none':
|
96
|
-
print(Fore.RED + "ERROR108: Activations list must be 'sigmoid' or 'softmax' or 'relu' or 'none' from: TrainPLAN",infoPLAN)
|
97
|
-
return 'e'
|
98
|
-
|
99
67
|
|
100
|
-
|
101
|
-
if Neuron < 1:
|
102
|
-
print(Fore.RED + "ERROR101: Neurons list must be positive non zero integer. from: TrainPLAN",infoPLAN)
|
103
|
-
return 'e'
|
104
|
-
|
105
|
-
if index + 1 != len(Neurons) and Neuron % 2 != 0:
|
106
|
-
print(Fore.MAGENTA + "WARNING101: We strongly advise to do Neuron counts be should even numbers. from: TrainPLAN",infoPLAN)
|
107
|
-
time.sleep(3)
|
108
|
-
|
109
|
-
if Neuron < ClassCount:
|
110
|
-
print(Fore.RED + "ERROR102: Neuron count must be greater than class count(For PLAN). from: TrainPLAN")
|
111
|
-
return 'e'
|
68
|
+
if show_count == None:
|
112
69
|
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
print(Fore.RED + "ERROR305: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
|
119
|
-
return 'e'
|
120
|
-
|
70
|
+
show_count = 10
|
71
|
+
|
72
|
+
if show_training == True or show_training == 'final':
|
73
|
+
|
74
|
+
row, col = shape_control(x_train)
|
121
75
|
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
76
|
+
class_count = set()
|
77
|
+
|
78
|
+
for sublist in y_train:
|
79
|
+
|
80
|
+
class_count.add(tuple(sublist))
|
81
|
+
|
82
|
+
class_count = list(class_count)
|
83
|
+
|
84
|
+
y_train = [tuple(sublist) for sublist in y_train]
|
85
|
+
|
86
|
+
neurons = [len(class_count), len(class_count)]
|
87
|
+
layers = ['fex']
|
88
|
+
val_list = []
|
89
|
+
|
90
|
+
x_train[0] = np.array(x_train[0])
|
91
|
+
x_train[0] = x_train[0].ravel()
|
92
|
+
x_train_size = len(x_train[0])
|
93
|
+
|
94
|
+
STPW = weight_identification(
|
95
|
+
len(layers) - 1, len(class_count), neurons, x_train_size) # STPW = SHORT TIME POTENTIATION WEIGHT
|
96
|
+
|
97
|
+
LTPW = [1] * len(STPW) # LTPW = LONG TIME POTENTIATION WEIGHT
|
98
|
+
|
99
|
+
y = decode_one_hot(y_train)
|
100
|
+
|
101
|
+
for index, inp in enumerate(x_train):
|
130
102
|
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
UniqueTrainLabels.add(tuple(sublist))
|
135
|
-
|
136
|
-
|
137
|
-
UniqueTrainLabels = list(UniqueTrainLabels)
|
138
|
-
|
139
|
-
TrainLabels = [tuple(sublist) for sublist in TrainLabels]
|
140
|
-
|
141
|
-
|
142
|
-
if len(UniqueTrainLabels) != ClassCount:
|
143
|
-
print(Fore.RED + "ERROR106: Label variety length must be same Class Count. from: TrainPLAN",infoPLAN)
|
144
|
-
return 'e'
|
145
|
-
|
146
|
-
TrainInputs[0] = np.array(TrainInputs[0])
|
147
|
-
TrainInputs[0] = TrainInputs[0].ravel()
|
148
|
-
TrainInputsize = len(TrainInputs[0])
|
149
|
-
|
150
|
-
W = WeightIdentification(len(Layers) - 1,ClassCount,Neurons,TrainInputsize)
|
151
|
-
Divides = SynapticDividing(ClassCount,W)
|
152
|
-
TrainedWs = [1] * len(W)
|
153
|
-
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL,)
|
154
|
-
TrainPredictions = [None] * len(TrainLabels)
|
155
|
-
true = 0
|
156
|
-
StartTime = time.time()
|
157
|
-
for index, inp in enumerate(TrainInputs):
|
158
|
-
UniStartTime = time.time()
|
103
|
+
progress = index / len(x_train) * 100
|
104
|
+
|
159
105
|
inp = np.array(inp)
|
160
106
|
inp = inp.ravel()
|
161
107
|
|
162
|
-
if
|
163
|
-
print(Fore.RED +"ERROR304: All input matrices or vectors in
|
108
|
+
if x_train_size != len(inp):
|
109
|
+
print(Fore.RED + "ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",
|
110
|
+
infoPLAN + Style.RESET_ALL)
|
164
111
|
return 'e'
|
165
|
-
|
166
|
-
|
167
|
-
|
112
|
+
|
113
|
+
neural_layer = inp
|
114
|
+
|
115
|
+
for Lindex, Layer in enumerate(layers):
|
116
|
+
|
117
|
+
neural_layer = normalization(neural_layer)
|
118
|
+
|
119
|
+
if Layer == 'fex':
|
120
|
+
STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation)
|
121
|
+
|
122
|
+
for i, w in enumerate(STPW):
|
123
|
+
LTPW[i] = LTPW[i] + w
|
124
|
+
|
168
125
|
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
for Lindex, Layer in enumerate(Layers):
|
126
|
+
if val == True:
|
127
|
+
|
128
|
+
if show_training == True:
|
129
|
+
try:
|
130
|
+
plt.close(fig)
|
131
|
+
|
132
|
+
except:
|
133
|
+
pass
|
134
|
+
|
135
|
+
validation_model = evaluate(x_val, y_val, None, LTPW, activation_potentiation, None)
|
181
136
|
|
182
|
-
|
183
|
-
|
137
|
+
val_acc = validation_model[get_acc()]
|
138
|
+
|
139
|
+
val_list.append(val_acc)
|
140
|
+
|
141
|
+
if index == 0:
|
184
142
|
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
NeuralLayer = Sigmoid(NeuralLayer)
|
189
|
-
elif Activations[Lindex] == 'softmax':
|
190
|
-
NeuralLayer = Softmax(NeuralLayer)
|
143
|
+
val_bar.update(val_acc)
|
144
|
+
|
145
|
+
if index != 0:
|
191
146
|
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
147
|
+
val_acc = val_acc - val_list[index - 1]
|
148
|
+
|
149
|
+
val_bar.update(val_acc)
|
150
|
+
|
151
|
+
|
152
|
+
if show_training == True:
|
153
|
+
|
154
|
+
if index %show_count == 0:
|
155
|
+
|
156
|
+
|
157
|
+
if index != 0:
|
158
|
+
plt.close(fig)
|
159
|
+
|
160
|
+
if row != 0:
|
161
|
+
|
162
|
+
fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
|
163
|
+
|
164
|
+
else:
|
165
|
+
|
166
|
+
fig, ax = plt.subplots(1, 1, figsize=(18, 14))
|
167
|
+
|
168
|
+
for j in range(len(class_count)):
|
169
|
+
|
170
|
+
|
171
|
+
if row != 0:
|
172
|
+
|
173
|
+
mat = LTPW[0][j,:].reshape(row, col)
|
174
|
+
suptitle_info = 'Neurons Learning Progress: % '
|
175
|
+
title_info = f'{j+1}. Neuron'
|
176
|
+
|
177
|
+
mat = LTPW[0][j,:].reshape(row, col)
|
178
|
+
|
179
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
180
|
+
|
181
|
+
ax[j].set_aspect('equal')
|
182
|
+
|
183
|
+
ax[j].set_xticks([])
|
184
|
+
ax[j].set_yticks([])
|
185
|
+
ax[j].set_title(title_info)
|
186
|
+
|
187
|
+
else:
|
188
|
+
|
189
|
+
mat = LTPW[0]
|
190
|
+
ax.imshow(mat, interpolation='sinc', cmap='viridis')
|
191
|
+
suptitle_info = 'Weight Learning Progress: % '
|
192
|
+
title_info = 'Weight Matrix Of Fex Layer'
|
193
|
+
|
196
194
|
|
197
|
-
RealOutput = np.argmax(TrainLabels[index])
|
198
|
-
PredictedOutput = np.argmax(NeuralLayer)
|
199
|
-
if RealOutput == PredictedOutput:
|
200
|
-
true += 1
|
201
|
-
Acc = true / len(TrainLabels)
|
202
|
-
TrainPredictions[index] = PredictedOutput
|
203
|
-
|
204
|
-
if Visualize == 'y':
|
205
|
-
|
206
|
-
TrainLabelsVisual = np.copy(TrainLabels)
|
207
|
-
TrainLabelsVisual = np.argmax(TrainLabelsVisual, axis=1)
|
208
|
-
|
209
|
-
plt.figure(figsize=(12, 6))
|
210
|
-
sns.kdeplot(TrainLabelsVisual, label='Real Outputs', shade=True)
|
211
|
-
sns.kdeplot(TrainPredictions, label='Predictions', shade=True)
|
212
|
-
plt.legend()
|
213
|
-
plt.xlabel('Class')
|
214
|
-
plt.ylabel('Data size')
|
215
|
-
plt.title('Predictions and Real Outputs for Training KDE Plot')
|
216
|
-
plt.show()
|
217
195
|
|
218
|
-
|
196
|
+
|
197
|
+
progress_status = f"{progress:.1f}"
|
198
|
+
fig.suptitle(suptitle_info + progress_status)
|
199
|
+
plt.draw()
|
200
|
+
plt.pause(0.1)
|
219
201
|
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
else:
|
227
|
-
for i, w in enumerate(W):
|
228
|
-
TrainedWs[i] = TrainedWs[i] + w
|
202
|
+
|
203
|
+
W = weight_identification(
|
204
|
+
len(layers) - 1, len(class_count), neurons, x_train_size)
|
205
|
+
|
206
|
+
|
207
|
+
if show_training == 'final':
|
229
208
|
|
209
|
+
fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
|
210
|
+
|
211
|
+
for j in range(len(class_count)):
|
212
|
+
|
213
|
+
mat = LTPW[0][j,:].reshape(row, col)
|
214
|
+
|
215
|
+
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
216
|
+
ax[j].set_aspect('equal')
|
230
217
|
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
print('\rTrain Accuracy: ' ,Acc ,"\n", end="")
|
245
|
-
|
246
|
-
elif CalculatingEst > 3600:
|
247
|
-
print('\rest......(h):',CalculatingEst/3600,'\n',end= "")
|
248
|
-
print('\rTrain Accuracy: ' ,Acc ,"\n", end="")
|
218
|
+
ax[j].set_xticks([])
|
219
|
+
ax[j].set_yticks([])
|
220
|
+
ax[j].set_title(f'{j+1}. Neuron')
|
221
|
+
|
222
|
+
progress_status = f"{progress:.1f}"
|
223
|
+
fig.suptitle('Neurons Learning Progress: % ' + progress_status)
|
224
|
+
plt.draw()
|
225
|
+
plt.pause(0.1)
|
226
|
+
|
227
|
+
|
228
|
+
if val == 'final':
|
229
|
+
|
230
|
+
validation_model = evaluate(x_val, y_val, None, LTPW, activation_potentiation, None)
|
249
231
|
|
250
|
-
|
232
|
+
val_acc = validation_model[get_acc()]
|
251
233
|
|
252
|
-
|
253
|
-
|
254
|
-
print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n")
|
255
|
-
|
256
|
-
if CalculatingEst < 60:
|
257
|
-
print('Total training time(sec): ',CalculatingEst)
|
258
|
-
|
259
|
-
elif CalculatingEst > 60 and CalculatingEst < 3600:
|
260
|
-
print('Total training time(min): ',CalculatingEst/60)
|
261
|
-
|
262
|
-
elif CalculatingEst > 3600:
|
263
|
-
print('Total training time(h): ',CalculatingEst/3600)
|
234
|
+
val_list.append(val_acc)
|
264
235
|
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
print(Fore.MAGENTA + '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
|
270
|
-
|
271
|
-
elif Acc < 0.6:
|
272
|
-
print(Fore.RED+ '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
|
273
|
-
|
274
|
-
|
275
|
-
|
236
|
+
val_bar.update(val_acc)
|
237
|
+
|
238
|
+
|
239
|
+
return LTPW
|
276
240
|
|
277
|
-
return TrainedWs,TrainPredictions,Acc
|
278
|
-
|
279
241
|
# FUNCTIONS -----
|
280
242
|
|
281
|
-
def
|
282
|
-
LayerCount, # int: Number of layers in the neural network.
|
283
|
-
ClassCount, # int: Number of classes in the classification task.
|
284
|
-
Neurons, # list[num]: List of neuron counts for each layer.
|
285
|
-
TrainInputsize # int: Size of the input data.
|
286
|
-
) -> str:
|
287
|
-
"""
|
288
|
-
Identifies the weights for a neural network model.
|
243
|
+
def shape_control(x_train):
|
289
244
|
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
245
|
+
try:
|
246
|
+
row = x_train[1].shape[0]
|
247
|
+
col = x_train[1].shape[1]
|
248
|
+
|
249
|
+
except:
|
295
250
|
|
296
|
-
|
297
|
-
|
298
|
-
|
251
|
+
print(Fore.MAGENTA + 'WARNING: You trying show_training but inputs is raveled. x_train inputs should be reshaped for show_training.' + Style.RESET_ALL)
|
252
|
+
|
253
|
+
try:
|
254
|
+
row, col = find_numbers(len(x_train[0]))
|
255
|
+
|
256
|
+
except:
|
257
|
+
|
258
|
+
print(Fore.MAGENTA + 'WARNING: Input length cannot be reshaped. Neurons learning progression cannot be draw, weight learning progress drwaing started.' + Style.RESET_ALL)
|
259
|
+
return [0, 0]
|
260
|
+
|
261
|
+
return row, col
|
299
262
|
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
W[0] = np.ones((Neurons[0],TrainInputsize))
|
304
|
-
ws = LayerCount - 1
|
305
|
-
for w in range(ws):
|
306
|
-
W[w + 1] = np.ones((Neurons[w + 1],Neurons[w]))
|
307
|
-
W[LayerCount] = np.ones((ClassCount,Neurons[LayerCount - 1]))
|
308
|
-
return W
|
263
|
+
def find_numbers(n):
|
264
|
+
if n <= 1:
|
265
|
+
raise ValueError("Parameter 'n' must be greater than 1.")
|
309
266
|
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
267
|
+
for i in range(2, int(n**0.5) + 1):
|
268
|
+
if n % i == 0:
|
269
|
+
factor1 = i
|
270
|
+
factor2 = n // i
|
271
|
+
if factor1 == factor2:
|
272
|
+
return factor1, factor2
|
273
|
+
|
274
|
+
return None
|
275
|
+
|
276
|
+
def weight_normalization(
|
277
|
+
W,
|
278
|
+
class_count
|
317
279
|
) -> str:
|
318
|
-
|
319
|
-
|
280
|
+
"""
|
281
|
+
Row(Neuron) based normalization. For unbalanced models.
|
320
282
|
|
321
283
|
Args:
|
322
|
-
|
323
|
-
|
324
|
-
Key (str): Key for identifying synaptic row or col connections.
|
325
|
-
Class (int): Class label for the current training instance.
|
326
|
-
ClassCount (int): Total number of classes in the dataset.
|
284
|
+
W (list(num)): Trained weight matrix list.
|
285
|
+
class_count (int): Class count of model.
|
327
286
|
|
328
287
|
Returns:
|
329
|
-
|
288
|
+
list([numpy_arrays],[...]): posttrained weight matices of the model. .
|
330
289
|
"""
|
331
290
|
|
332
|
-
|
333
|
-
Class += 1 # because index start 0
|
334
|
-
|
335
|
-
if Class != ClassCount and Class != 1:
|
336
|
-
|
337
|
-
Ce = Cs / Class
|
338
|
-
|
339
|
-
w[int(Ce)-1::-1,:] = 0
|
340
|
-
|
341
|
-
w[Cs:,:] = 0
|
342
|
-
|
343
|
-
|
344
|
-
else:
|
291
|
+
for i in range(class_count):
|
345
292
|
|
346
|
-
|
347
|
-
if Key == 'row':
|
293
|
+
W[0][i,:] = normalization(W[0][i,:])
|
348
294
|
|
349
|
-
|
350
|
-
|
351
|
-
elif Key == 'col':
|
352
|
-
|
353
|
-
w[:,Cs] = 0
|
354
|
-
|
355
|
-
else:
|
356
|
-
print(Fore.RED + "ERROR103: SynapticPruning func's Key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning)
|
357
|
-
return 'e'
|
358
|
-
else:
|
359
|
-
if Key == 'row':
|
360
|
-
|
361
|
-
w[Cs:,:] = 0
|
362
|
-
|
363
|
-
Ce = int(round(w.shape[0] - Cs / ClassCount))
|
364
|
-
w[Ce-1::-1,:] = 0
|
365
|
-
|
366
|
-
elif Key == 'col':
|
367
|
-
|
368
|
-
w[:,Cs] = 0
|
369
|
-
|
370
|
-
else:
|
371
|
-
print(Fore.RED + "ERROR103: SynapticPruning func's Key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning + Style.RESET_ALL)
|
372
|
-
return 'e'
|
373
|
-
return w
|
295
|
+
return W
|
374
296
|
|
375
|
-
def
|
376
|
-
|
377
|
-
|
297
|
+
def weight_identification(
|
298
|
+
layer_count, # int: Number of layers in the neural network.
|
299
|
+
class_count, # int: Number of classes in the classification task.
|
300
|
+
neurons, # list[num]: List of neuron counts for each layer.
|
301
|
+
x_train_size # int: Size of the input data.
|
378
302
|
) -> str:
|
379
303
|
"""
|
380
|
-
|
304
|
+
Identifies the weights for a neural network model.
|
381
305
|
|
382
306
|
Args:
|
383
|
-
|
384
|
-
|
307
|
+
layer_count (int): Number of layers in the neural network.
|
308
|
+
class_count (int): Number of classes in the classification task.
|
309
|
+
neurons (list[num]): List of neuron counts for each layer.
|
310
|
+
x_train_size (int): Size of the input data.
|
385
311
|
|
386
312
|
Returns:
|
387
|
-
list:
|
313
|
+
list([numpy_arrays],[...]): pretrained weight matices of the model. .
|
388
314
|
"""
|
389
315
|
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
Piece[i] = int(math.floor(W[i].shape[0] / ClassCount))
|
401
|
-
|
402
|
-
Cs = 0
|
403
|
-
# j = Classes, i = Weights, [0] = CutStart.
|
316
|
+
Wlen = layer_count + 1
|
317
|
+
W = [None] * Wlen
|
318
|
+
W[0] = np.ones((neurons[0], x_train_size))
|
319
|
+
ws = layer_count - 1
|
320
|
+
for w in range(ws):
|
321
|
+
W[w + 1] = np.ones((neurons[w + 1], neurons[w]))
|
322
|
+
|
323
|
+
return W
|
404
324
|
|
405
|
-
for i in range(len(W)):
|
406
|
-
for j in range(ClassCount):
|
407
|
-
Cs = Cs + Piece[i]
|
408
|
-
Divides[j][i][0] = Cs
|
409
|
-
#print('Divides: ' + j + i + ' = ' + Divides[j][i][0])
|
410
|
-
#input()
|
411
|
-
|
412
|
-
j = 0
|
413
|
-
Cs = 0
|
414
|
-
|
415
|
-
return Divides
|
416
|
-
|
417
325
|
|
418
|
-
def
|
326
|
+
def fex(
|
419
327
|
Input, # list[num]: Input data.
|
420
|
-
w, #
|
421
|
-
|
422
|
-
|
328
|
+
w, # num: Weight matrix of the neural network.
|
329
|
+
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
330
|
+
Class, # int: Which class is, if training.
|
331
|
+
activation_potentiation # float or None: Input activation potentiation (optional)
|
423
332
|
) -> tuple:
|
424
333
|
"""
|
425
|
-
Applies feature extraction process to the input data using synaptic
|
334
|
+
Applies feature extraction process to the input data using synaptic potentiation.
|
426
335
|
|
427
336
|
Args:
|
428
|
-
Input (
|
429
|
-
w (
|
430
|
-
|
431
|
-
|
337
|
+
Input (num): Input data.
|
338
|
+
w (num): Weight matrix of the neural network.
|
339
|
+
is_training (bool): Flag indicating if the function is called during training (True or False).
|
340
|
+
Class (int): if is during training then which class(label) ? is isnt then put None.
|
341
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
432
342
|
|
433
343
|
Returns:
|
434
344
|
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
435
345
|
"""
|
436
346
|
|
437
|
-
if
|
438
|
-
|
439
|
-
|
440
|
-
PruneIndex = np.where(Input > MembranPotential)
|
441
|
-
elif MembranThreshold == '==':
|
442
|
-
PruneIndex = np.where(Input == MembranPotential)
|
443
|
-
elif MembranThreshold == '!=':
|
444
|
-
PruneIndex = np.where(Input != MembranPotential)
|
445
|
-
|
446
|
-
w = SynapticPruning(w, PruneIndex, 'col', 0, 0)
|
447
|
-
|
448
|
-
NeuralLayer = np.dot(w, Input)
|
449
|
-
return NeuralLayer,w
|
450
|
-
|
451
|
-
def Cat(
|
452
|
-
Input, # list[num]: Input data.
|
453
|
-
w, # list[list[num]]: Weight matrix of the neural network.
|
454
|
-
MembranThreshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
|
455
|
-
MembranPotential, # num: Threshold value for comparison.
|
456
|
-
isTrain # int: Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
457
|
-
) -> tuple:
|
458
|
-
"""
|
459
|
-
Applies categorization process to the input data using synaptic pruning if specified.
|
460
|
-
|
461
|
-
Args:
|
462
|
-
Input (list[num]): Input data.
|
463
|
-
w (list[list[num]]): Weight matrix of the neural network.
|
464
|
-
MembranThreshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
|
465
|
-
MembranPotential (num): Threshold value for comparison.
|
466
|
-
isTrain (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
347
|
+
if is_training == True and activation_potentiation == None:
|
348
|
+
|
349
|
+
w[Class, :] = Input
|
467
350
|
|
468
|
-
|
469
|
-
tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
|
470
|
-
"""
|
351
|
+
return w
|
471
352
|
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
353
|
+
elif is_training == True and activation_potentiation != None:
|
354
|
+
|
355
|
+
|
356
|
+
Input[Input < activation_potentiation] = 0
|
357
|
+
Input[Input > activation_potentiation] = 1
|
358
|
+
|
359
|
+
w[Class,:] = Input
|
360
|
+
|
361
|
+
return w
|
362
|
+
|
363
|
+
elif is_training == False and activation_potentiation == None:
|
364
|
+
|
365
|
+
neural_layer = np.dot(w, Input)
|
483
366
|
|
484
|
-
|
485
|
-
|
486
|
-
|
367
|
+
return neural_layer
|
368
|
+
|
369
|
+
elif is_training == False and activation_potentiation != None:
|
370
|
+
|
371
|
+
Input[Input < activation_potentiation] = 0
|
372
|
+
Input[Input > activation_potentiation] = 1
|
373
|
+
|
374
|
+
neural_layer = np.dot(w, Input)
|
375
|
+
|
376
|
+
return neural_layer
|
487
377
|
|
488
378
|
|
489
|
-
|
490
|
-
|
379
|
+
|
380
|
+
def normalization(
|
381
|
+
Input # num: Input data to be normalized.
|
491
382
|
):
|
492
383
|
"""
|
493
384
|
Normalizes the input data using maximum absolute scaling.
|
494
385
|
|
495
386
|
Args:
|
496
|
-
Input (
|
387
|
+
Input (num): Input data to be normalized.
|
497
388
|
|
498
389
|
Returns:
|
499
|
-
|
390
|
+
(num) Scaled input data after normalization.
|
500
391
|
"""
|
501
392
|
|
502
|
-
|
503
393
|
AbsVector = np.abs(Input)
|
504
|
-
|
394
|
+
|
505
395
|
MaxAbs = np.max(AbsVector)
|
506
|
-
|
396
|
+
|
507
397
|
ScaledInput = Input / MaxAbs
|
508
|
-
|
398
|
+
|
509
399
|
return ScaledInput
|
510
400
|
|
511
401
|
|
512
402
|
def Softmax(
|
513
|
-
x #
|
403
|
+
x # num: Input data to be transformed using softmax function.
|
514
404
|
):
|
515
405
|
"""
|
516
406
|
Applies the softmax function to the input data.
|
517
407
|
|
518
408
|
Args:
|
519
|
-
|
409
|
+
(num): Input data to be transformed using softmax function.
|
520
410
|
|
521
411
|
Returns:
|
522
|
-
|
412
|
+
(num): Transformed data after applying softmax function.
|
523
413
|
"""
|
524
|
-
|
414
|
+
|
525
415
|
return softmax(x)
|
526
416
|
|
527
417
|
|
528
418
|
def Sigmoid(
|
529
|
-
x #
|
419
|
+
x # num: Input data to be transformed using sigmoid function.
|
530
420
|
):
|
531
421
|
"""
|
532
422
|
Applies the sigmoid function to the input data.
|
533
423
|
|
534
424
|
Args:
|
535
|
-
|
425
|
+
(num): Input data to be transformed using sigmoid function.
|
536
426
|
|
537
427
|
Returns:
|
538
|
-
|
428
|
+
(num): Transformed data after applying sigmoid function.
|
539
429
|
"""
|
540
430
|
return expit(x)
|
541
431
|
|
542
432
|
|
543
433
|
def Relu(
|
544
|
-
x #
|
434
|
+
x # num: Input data to be transformed using ReLU function.
|
545
435
|
):
|
546
436
|
"""
|
547
437
|
Applies the Rectified Linear Unit (ReLU) function to the input data.
|
548
438
|
|
549
439
|
Args:
|
550
|
-
|
440
|
+
(num): Input data to be transformed using ReLU function.
|
551
441
|
|
552
442
|
Returns:
|
553
|
-
|
443
|
+
(num): Transformed data after applying ReLU function.
|
554
444
|
"""
|
555
445
|
|
556
|
-
|
557
446
|
return np.maximum(0, x)
|
558
447
|
|
559
448
|
|
560
|
-
def
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
Activations, # str: Activation function list for the neural network.
|
568
|
-
Visualize, # Visualize Testing procces or not visualize ('y' or 'n')
|
569
|
-
W # list[list[num]]: Weight matrix of the neural network.
|
449
|
+
def evaluate(
|
450
|
+
x_test, # list[num]: Test input data.
|
451
|
+
y_test, # list[num]: Test labels.
|
452
|
+
show_metrices, # show_metrices (bool): (True or False)
|
453
|
+
W, # list[num]: Weight matrix list of the neural network.
|
454
|
+
activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional)
|
455
|
+
acc_bar_status = True
|
570
456
|
) -> tuple:
|
571
|
-
infoTestModel =
|
457
|
+
infoTestModel = """
|
572
458
|
Tests the neural network model with the given test data.
|
573
459
|
|
574
460
|
Args:
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
Activation (str): Activation function for the neural network.
|
582
|
-
W (list[list[num]]): Weight matrix of the neural network.
|
461
|
+
x_test (list[num]): Test input data.
|
462
|
+
y_test (list[num]): Test labels.
|
463
|
+
show_metrices (bool): (True or False)
|
464
|
+
W (list[num]): Weight matrix list of the neural network.
|
465
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
466
|
+
acc_bar_status (bool): Loading bar for accuracy (True or None)
|
583
467
|
|
584
468
|
Returns:
|
585
469
|
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
586
470
|
"""
|
587
471
|
|
472
|
+
layers = ['fex']
|
588
473
|
|
589
474
|
try:
|
590
|
-
Wc = [0] * len(W)
|
475
|
+
Wc = [0] * len(W) # Wc = Weight copy
|
591
476
|
true = 0
|
592
|
-
|
477
|
+
y_preds = [-1] * len(y_test)
|
478
|
+
acc_list = []
|
479
|
+
|
593
480
|
for i, w in enumerate(W):
|
594
481
|
Wc[i] = np.copy(w)
|
595
|
-
print('\rCopying weights.....',i+1,'/',len(W),end
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
482
|
+
print('\rCopying weights.....', i+1, '/', len(W), end="")
|
483
|
+
|
484
|
+
# print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
485
|
+
if acc_bar_status == True:
|
486
|
+
acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=75)
|
487
|
+
for inpIndex, Input in enumerate(x_test):
|
600
488
|
Input = np.array(Input)
|
601
489
|
Input = Input.ravel()
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
for index, Layer in enumerate(
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
NeuralLayer = Softmax(NeuralLayer)
|
614
|
-
|
615
|
-
if Layers[index] == 'fex':
|
616
|
-
NeuralLayer,useless = Fex(NeuralLayer, W[index], MembranThresholds[index], MembranPotentials[index])
|
617
|
-
if Layers[index] == 'cat':
|
618
|
-
NeuralLayer,useless = Cat(NeuralLayer, W[index], MembranThresholds[index], MembranPotentials[index],0)
|
490
|
+
uni_start_time = time.time()
|
491
|
+
neural_layer = Input
|
492
|
+
|
493
|
+
for index, Layer in enumerate(layers):
|
494
|
+
|
495
|
+
neural_layer = normalization(neural_layer)
|
496
|
+
|
497
|
+
if Layer == 'fex':
|
498
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
499
|
+
|
500
|
+
|
619
501
|
for i, w in enumerate(Wc):
|
620
502
|
W[i] = np.copy(w)
|
621
|
-
RealOutput = np.argmax(
|
622
|
-
PredictedOutput = np.argmax(
|
503
|
+
RealOutput = np.argmax(y_test[inpIndex])
|
504
|
+
PredictedOutput = np.argmax(neural_layer)
|
623
505
|
if RealOutput == PredictedOutput:
|
624
506
|
true += 1
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
TestLabelsVisual = np.copy(TestLabels)
|
631
|
-
TestLabelsVisual = np.argmax(TestLabelsVisual, axis=1)
|
632
|
-
|
633
|
-
plt.figure(figsize=(12, 6))
|
634
|
-
sns.kdeplot(TestLabelsVisual, label='Real Outputs', shade=True)
|
635
|
-
sns.kdeplot(TestPredictions, label='Predictions', shade=True)
|
636
|
-
plt.legend()
|
637
|
-
plt.xlabel('Class')
|
638
|
-
plt.ylabel('Data size')
|
639
|
-
plt.title('Predictions and Real Outputs for Testing KDE Plot')
|
640
|
-
plt.show()
|
641
|
-
|
642
|
-
if inpIndex + 1 != len(TestInputs):
|
643
|
-
|
644
|
-
plt.close('all')
|
645
|
-
|
646
|
-
UniEndTime = time.time()
|
647
|
-
|
648
|
-
CalculatingEst = round((UniEndTime - UniStartTime) * (len(TestInputs) - inpIndex),3)
|
649
|
-
|
650
|
-
if CalculatingEst < 60:
|
651
|
-
print('\rest......(sec):',CalculatingEst,'\n',end= "")
|
652
|
-
print('\rTest Accuracy: ' ,Acc ,"\n", end="")
|
507
|
+
acc = true / len(y_test)
|
508
|
+
|
509
|
+
|
510
|
+
acc_list.append(acc)
|
511
|
+
y_preds[inpIndex] = PredictedOutput
|
653
512
|
|
654
|
-
|
655
|
-
|
656
|
-
|
513
|
+
if acc_bar_status == True:
|
514
|
+
if inpIndex == 0:
|
515
|
+
acc_bar.update(acc)
|
516
|
+
|
517
|
+
else:
|
518
|
+
acc = acc - acc_list[inpIndex - 1]
|
519
|
+
acc_bar.update(acc)
|
657
520
|
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
|
662
|
-
EndTime = time.time()
|
521
|
+
if show_metrices == True:
|
522
|
+
plot_evaluate(y_test, y_preds, acc_list)
|
523
|
+
|
524
|
+
|
663
525
|
for i, w in enumerate(Wc):
|
664
526
|
W[i] = np.copy(w)
|
665
527
|
|
666
|
-
CalculatingEst = round(EndTime - StartTime,2)
|
667
|
-
|
668
|
-
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
669
|
-
|
670
|
-
if CalculatingEst < 60:
|
671
|
-
print('Total testing time(sec): ',CalculatingEst)
|
672
|
-
|
673
|
-
elif CalculatingEst > 60 and CalculatingEst < 3600:
|
674
|
-
print('Total testing time(min): ',CalculatingEst/60)
|
675
|
-
|
676
|
-
elif CalculatingEst > 3600:
|
677
|
-
print('Total testing time(h): ',CalculatingEst/3600)
|
678
|
-
|
679
|
-
if Acc >= 0.8:
|
680
|
-
print(Fore.GREEN + '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
681
|
-
|
682
|
-
elif Acc < 0.8 and Acc > 0.6:
|
683
|
-
print(Fore.MAGENTA + '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
684
|
-
|
685
|
-
elif Acc <= 0.6:
|
686
|
-
print(Fore.RED+ '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
687
|
-
|
688
528
|
except:
|
689
|
-
|
690
|
-
print(Fore.RED + "ERROR:
|
529
|
+
|
530
|
+
print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" +
|
531
|
+
infoTestModel + Style.RESET_ALL)
|
691
532
|
return 'e'
|
692
|
-
|
693
|
-
return W,
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
):
|
710
|
-
|
711
|
-
|
712
|
-
|
533
|
+
|
534
|
+
return W, y_preds, acc
|
535
|
+
|
536
|
+
|
537
|
+
def multiple_evaluate(
|
538
|
+
x_test, # list[num]: Test input data.
|
539
|
+
y_test, # list[num]: Test labels.
|
540
|
+
show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
|
541
|
+
MW, # list[list[num]]: Weight matrix of the neural network.
|
542
|
+
activation_potentiation=None # (float or None): Threshold value for comparison. (optional)
|
543
|
+
) -> tuple:
|
544
|
+
infoTestModel = """
|
545
|
+
Tests the neural network model with the given test data.
|
546
|
+
|
547
|
+
Args:
|
548
|
+
x_test (list[num]): Test input data.
|
549
|
+
y_test (list[num]): Test labels.
|
550
|
+
show_metrices (bool): (True or False)
|
551
|
+
MW (list(list[num])): Multiple Weight matrix list of the neural network. (Multiple model testing)
|
552
|
+
|
553
|
+
Returns:
|
554
|
+
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
555
|
+
"""
|
556
|
+
|
557
|
+
layers = ['fex', 'cat']
|
558
|
+
|
559
|
+
try:
|
560
|
+
y_preds = [-1] * len(y_test)
|
561
|
+
acc_list = []
|
562
|
+
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
563
|
+
start_time = time.time()
|
564
|
+
true = 0
|
565
|
+
for inpIndex, Input in enumerate(x_test):
|
566
|
+
|
567
|
+
output_layer = 0
|
568
|
+
|
569
|
+
for m, Model in enumerate(MW):
|
570
|
+
|
571
|
+
W = Model
|
572
|
+
|
573
|
+
Wc = [0] * len(W) # Wc = weight copy
|
574
|
+
|
575
|
+
y_preds = [None] * len(y_test)
|
576
|
+
for i, w in enumerate(W):
|
577
|
+
Wc[i] = np.copy(w)
|
578
|
+
|
579
|
+
Input = np.array(Input)
|
580
|
+
Input = Input.ravel()
|
581
|
+
uni_start_time = time.time()
|
582
|
+
neural_layer = Input
|
583
|
+
|
584
|
+
for index, Layer in enumerate(layers):
|
585
|
+
|
586
|
+
neural_layer = normalization(neural_layer)
|
587
|
+
|
588
|
+
if Layer == 'fex':
|
589
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
590
|
+
|
591
|
+
output_layer += neural_layer
|
592
|
+
|
593
|
+
for i, w in enumerate(Wc):
|
594
|
+
W[i] = np.copy(w)
|
595
|
+
for i, w in enumerate(Wc):
|
596
|
+
W[i] = np.copy(w)
|
597
|
+
RealOutput = np.argmax(y_test[inpIndex])
|
598
|
+
PredictedOutput = np.argmax(output_layer)
|
599
|
+
if RealOutput == PredictedOutput:
|
600
|
+
true += 1
|
601
|
+
acc = true / len(y_test)
|
602
|
+
if show_metrices == True:
|
603
|
+
acc_list.append(acc)
|
604
|
+
y_preds[inpIndex] = PredictedOutput
|
605
|
+
|
606
|
+
|
607
|
+
uni_end_time = time.time()
|
608
|
+
|
609
|
+
calculating_est = round(
|
610
|
+
(uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
|
611
|
+
|
612
|
+
if calculating_est < 60:
|
613
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
614
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
615
|
+
|
616
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
617
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
618
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
619
|
+
|
620
|
+
elif calculating_est > 3600:
|
621
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
622
|
+
print('\rTest accuracy: ', acc, "\n", end="")
|
623
|
+
if show_metrices == True:
|
624
|
+
plot_evaluate(y_test, y_preds, acc_list)
|
625
|
+
|
626
|
+
EndTime = time.time()
|
627
|
+
for i, w in enumerate(Wc):
|
628
|
+
W[i] = np.copy(w)
|
629
|
+
|
630
|
+
calculating_est = round(EndTime - start_time, 2)
|
631
|
+
|
632
|
+
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
633
|
+
|
634
|
+
if calculating_est < 60:
|
635
|
+
print('Total testing time(sec): ', calculating_est)
|
636
|
+
|
637
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
638
|
+
print('Total testing time(min): ', calculating_est/60)
|
639
|
+
|
640
|
+
elif calculating_est > 3600:
|
641
|
+
print('Total testing time(h): ', calculating_est/3600)
|
642
|
+
|
643
|
+
if acc >= 0.8:
|
644
|
+
print(Fore.GREEN + '\nTotal Test accuracy: ',
|
645
|
+
acc, '\n' + Style.RESET_ALL)
|
646
|
+
|
647
|
+
elif acc < 0.8 and acc > 0.6:
|
648
|
+
print(Fore.MAGENTA + '\nTotal Test accuracy: ',
|
649
|
+
acc, '\n' + Style.RESET_ALL)
|
650
|
+
|
651
|
+
elif acc <= 0.6:
|
652
|
+
print(Fore.RED + '\nTotal Test accuracy: ',
|
653
|
+
acc, '\n' + Style.RESET_ALL)
|
654
|
+
|
655
|
+
except:
|
656
|
+
|
657
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
|
658
|
+
return 'e'
|
659
|
+
|
660
|
+
return W, y_preds, acc
|
661
|
+
|
662
|
+
|
663
|
+
def save_model(model_name,
|
664
|
+
model_type,
|
665
|
+
class_count,
|
666
|
+
test_acc,
|
667
|
+
weights_type,
|
668
|
+
weights_format,
|
669
|
+
model_path,
|
670
|
+
scaler_params,
|
671
|
+
W,
|
672
|
+
activation_potentiation=None
|
673
|
+
):
|
674
|
+
|
675
|
+
infosave_model = """
|
676
|
+
Function to save a potentiation learning model.
|
713
677
|
|
714
678
|
Arguments:
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
MembranPotentials (list): List containing MEMBRAN POTENTIALS.
|
721
|
-
DoNormalization (str): is that normalized data ? 'y' or 'n'.
|
722
|
-
Activations (list): List containing activation functions for each layer.
|
723
|
-
TestAcc (float): Test accuracy of the model.
|
724
|
-
LogType (str): Type of log to save (options: 'csv', 'txt', 'hdf5').
|
725
|
-
WeightsType (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
679
|
+
model_name (str): Name of the model.
|
680
|
+
model_type (str): Type of the model.(options: PLAN)
|
681
|
+
class_count (int): Number of classes.
|
682
|
+
test_acc (float): Test accuracy of the model.
|
683
|
+
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
726
684
|
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
727
|
-
|
685
|
+
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
686
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
728
687
|
W: Weights of the model.
|
688
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
729
689
|
|
730
690
|
Returns:
|
731
691
|
str: Message indicating if the model was saved successfully or encountered an error.
|
732
692
|
"""
|
733
|
-
|
693
|
+
|
734
694
|
# Operations to be performed by the function will be written here
|
735
695
|
pass
|
736
696
|
|
737
|
-
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
|
742
|
-
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
697
|
+
layers = ['fex']
|
698
|
+
|
699
|
+
if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
|
700
|
+
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
|
701
|
+
infosave_model + Style.RESET_ALL)
|
743
702
|
return 'e'
|
744
|
-
|
745
|
-
if
|
746
|
-
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from:
|
703
|
+
|
704
|
+
if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
|
705
|
+
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" +
|
706
|
+
infosave_model + Style.RESET_ALL)
|
747
707
|
return 'e'
|
748
|
-
|
708
|
+
|
749
709
|
NeuronCount = 0
|
750
710
|
SynapseCount = 0
|
711
|
+
|
751
712
|
try:
|
752
713
|
for w in W:
|
753
714
|
NeuronCount += np.shape(w)[0]
|
754
715
|
SynapseCount += np.shape(w)[0] * np.shape(w)[1]
|
755
716
|
except:
|
756
|
-
|
757
|
-
print(Fore.RED + "ERROR: Weight matrices has a problem from:
|
717
|
+
|
718
|
+
print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
|
719
|
+
infosave_model + Style.RESET_ALL)
|
758
720
|
return 'e'
|
759
721
|
import pandas as pd
|
760
722
|
from datetime import datetime
|
761
723
|
from scipy import io
|
762
|
-
|
763
|
-
data = {'MODEL NAME':
|
764
|
-
'MODEL TYPE':
|
765
|
-
'LAYERS':
|
766
|
-
'LAYER COUNT': len(
|
767
|
-
'CLASS COUNT':
|
768
|
-
'MEMBRAN THRESHOLDS': MembranThresholds,
|
769
|
-
'MEMBRAN POTENTIALS': MembranPotentials,
|
770
|
-
'NORMALIZATION': Normalizations,
|
771
|
-
'ACTIVATIONS': Activations,
|
724
|
+
|
725
|
+
data = {'MODEL NAME': model_name,
|
726
|
+
'MODEL TYPE': model_type,
|
727
|
+
'LAYERS': layers,
|
728
|
+
'LAYER COUNT': len(layers),
|
729
|
+
'CLASS COUNT': class_count,
|
772
730
|
'NEURON COUNT': NeuronCount,
|
773
731
|
'SYNAPSE COUNT': SynapseCount,
|
774
|
-
'TEST ACCURACY':
|
732
|
+
'TEST ACCURACY': test_acc,
|
775
733
|
'SAVE DATE': datetime.now(),
|
776
|
-
'WEIGHTS TYPE':
|
777
|
-
'WEIGHTS FORMAT':
|
778
|
-
'MODEL PATH':
|
734
|
+
'WEIGHTS TYPE': weights_type,
|
735
|
+
'WEIGHTS FORMAT': weights_format,
|
736
|
+
'MODEL PATH': model_path,
|
737
|
+
'STANDARD SCALER': scaler_params,
|
738
|
+
'ACTIVATION POTENTIATION': activation_potentiation
|
779
739
|
}
|
780
740
|
try:
|
781
|
-
|
741
|
+
|
782
742
|
df = pd.DataFrame(data)
|
783
|
-
|
784
|
-
|
785
|
-
|
786
|
-
df.to_csv(ModelPath + ModelName + '.csv', sep='\t', index=False)
|
787
|
-
|
788
|
-
elif LogType == 'txt':
|
789
|
-
|
790
|
-
df.to_csv(ModelPath + ModelName + '.txt', sep='\t', index=False)
|
791
|
-
|
792
|
-
elif LogType == 'hdf5':
|
793
|
-
|
794
|
-
df.to_hdf(ModelPath + ModelName + '.h5', key='data', mode='w')
|
795
|
-
|
743
|
+
|
744
|
+
df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
|
745
|
+
|
796
746
|
except:
|
797
|
-
|
798
|
-
print(Fore.RED + "ERROR: Model log not saved probably
|
747
|
+
|
748
|
+
print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
|
749
|
+
infosave_model + Style.RESET_ALL)
|
799
750
|
return 'e'
|
800
751
|
try:
|
801
|
-
|
802
|
-
if
|
803
|
-
|
752
|
+
|
753
|
+
if weights_type == 'txt' and weights_format == 'd':
|
754
|
+
|
804
755
|
for i, w in enumerate(W):
|
805
|
-
np.savetxt(
|
806
|
-
|
807
|
-
|
808
|
-
|
756
|
+
np.savetxt(model_path + model_name +
|
757
|
+
str(i+1) + 'w.txt', w, fmt='%d')
|
758
|
+
|
759
|
+
if weights_type == 'txt' and weights_format == 'f':
|
760
|
+
|
809
761
|
for i, w in enumerate(W):
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
762
|
+
np.savetxt(model_path + model_name +
|
763
|
+
str(i+1) + 'w.txt', w, fmt='%f')
|
764
|
+
|
765
|
+
if weights_type == 'txt' and weights_format == 'raw':
|
766
|
+
|
814
767
|
for i, w in enumerate(W):
|
815
|
-
np.savetxt(
|
816
|
-
|
817
|
-
|
768
|
+
np.savetxt(model_path + model_name + str(i+1) + 'w.txt', w)
|
769
|
+
|
818
770
|
###
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
771
|
+
|
772
|
+
if weights_type == 'npy' and weights_format == 'd':
|
773
|
+
|
823
774
|
for i, w in enumerate(W):
|
824
|
-
np.save(
|
825
|
-
|
826
|
-
|
827
|
-
|
775
|
+
np.save(model_path + model_name +
|
776
|
+
str(i+1) + 'w.npy', w.astype(int))
|
777
|
+
|
778
|
+
if weights_type == 'npy' and weights_format == 'f':
|
779
|
+
|
828
780
|
for i, w in enumerate(W):
|
829
|
-
|
830
|
-
|
831
|
-
|
832
|
-
|
781
|
+
np.save(model_path + model_name + str(i+1) +
|
782
|
+
'w.npy', w, w.astype(float))
|
783
|
+
|
784
|
+
if weights_type == 'npy' and weights_format == 'raw':
|
785
|
+
|
833
786
|
for i, w in enumerate(W):
|
834
|
-
np.save(
|
835
|
-
|
836
|
-
|
787
|
+
np.save(model_path + model_name + str(i+1) + 'w.npy', w)
|
788
|
+
|
837
789
|
###
|
838
|
-
|
839
|
-
|
840
|
-
|
841
|
-
|
790
|
+
|
791
|
+
if weights_type == 'mat' and weights_format == 'd':
|
792
|
+
|
842
793
|
for i, w in enumerate(W):
|
843
794
|
w = {'w': w.astype(int)}
|
844
|
-
io.savemat(
|
845
|
-
|
846
|
-
if
|
847
|
-
|
795
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
796
|
+
|
797
|
+
if weights_type == 'mat' and weights_format == 'f':
|
798
|
+
|
848
799
|
for i, w in enumerate(W):
|
849
800
|
w = {'w': w.astype(float)}
|
850
|
-
io.savemat(
|
851
|
-
|
852
|
-
if
|
853
|
-
|
801
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
802
|
+
|
803
|
+
if weights_type == 'mat' and weights_format == 'raw':
|
804
|
+
|
854
805
|
for i, w in enumerate(W):
|
855
806
|
w = {'w': w}
|
856
|
-
io.savemat(
|
857
|
-
|
807
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
808
|
+
|
858
809
|
except:
|
859
|
-
|
860
|
-
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from:
|
810
|
+
|
811
|
+
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
|
861
812
|
return 'e'
|
862
813
|
print(df)
|
863
814
|
message = (
|
864
815
|
Fore.GREEN + "Model Saved Successfully\n" +
|
865
|
-
Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
|
816
|
+
Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
|
866
817
|
Style.RESET_ALL
|
867
|
-
|
868
|
-
|
818
|
+
)
|
819
|
+
|
869
820
|
return print(message)
|
870
821
|
|
871
822
|
|
872
|
-
def
|
873
|
-
|
874
|
-
|
875
|
-
|
876
|
-
|
877
|
-
Function to load a deep learning model.
|
823
|
+
def load_model(model_name,
|
824
|
+
model_path,
|
825
|
+
):
|
826
|
+
infoload_model = """
|
827
|
+
Function to load a potentiation learning model.
|
878
828
|
|
879
829
|
Arguments:
|
880
|
-
|
881
|
-
|
882
|
-
LogType (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
830
|
+
model_name (str): Name of the model.
|
831
|
+
model_path (str): Path where the model is saved.
|
883
832
|
|
884
833
|
Returns:
|
885
|
-
lists: W(list[num]),
|
834
|
+
lists: W(list[num]), activation_potentiation, DataFrame of the model
|
886
835
|
"""
|
887
|
-
|
836
|
+
pass
|
837
|
+
|
838
|
+
import scipy.io as sio
|
839
|
+
|
840
|
+
try:
|
841
|
+
|
842
|
+
df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
|
843
|
+
|
844
|
+
except:
|
845
|
+
|
846
|
+
print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" +
|
847
|
+
infoload_model + Style.RESET_ALL)
|
848
|
+
|
849
|
+
model_name = str(df['MODEL NAME'].iloc[0])
|
850
|
+
layer_count = int(df['LAYER COUNT'].iloc[0])
|
851
|
+
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
852
|
+
|
853
|
+
W = [0] * layer_count
|
854
|
+
|
855
|
+
if WeightType == 'txt':
|
856
|
+
for i in range(layer_count):
|
857
|
+
W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
|
858
|
+
elif WeightType == 'npy':
|
859
|
+
for i in range(layer_count):
|
860
|
+
W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
|
861
|
+
elif WeightType == 'mat':
|
862
|
+
for i in range(layer_count):
|
863
|
+
W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
|
864
|
+
else:
|
865
|
+
raise ValueError(
|
866
|
+
Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
|
867
|
+
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
868
|
+
return W, df
|
888
869
|
|
870
|
+
|
871
|
+
def predict_model_ssd(Input, model_name, model_path):
|
872
|
+
|
873
|
+
infopredict_model_ssd = """
|
874
|
+
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
875
|
+
|
876
|
+
Arguments:
|
877
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
878
|
+
model_name (str): Name of the model.
|
879
|
+
Returns:
|
880
|
+
ndarray: Output from the model.
|
881
|
+
"""
|
882
|
+
W, df = load_model(model_name, model_path)
|
889
883
|
|
890
|
-
|
891
|
-
|
892
|
-
|
893
|
-
|
894
|
-
|
895
|
-
if LogType == 'csv':
|
896
|
-
df = pd.read_csv(ModelPath + ModelName + '.' + LogType)
|
884
|
+
activation_potentiation = str(df['ACTIVATION POTENTIATION'].iloc[0])
|
885
|
+
|
886
|
+
if activation_potentiation != 'nan':
|
887
|
+
|
888
|
+
activation_potentiation = float(activation_potentiation)
|
897
889
|
|
890
|
+
else:
|
898
891
|
|
899
|
-
|
900
|
-
|
892
|
+
activation_potentiation = None
|
893
|
+
|
894
|
+
try:
|
895
|
+
|
896
|
+
scaler_params = df['STANDARD SCALER'].tolist()
|
897
|
+
|
898
|
+
|
899
|
+
scaler_params = [np.fromstring(arr.strip('[]'), sep=' ') for arr in scaler_params]
|
900
|
+
|
901
|
+
Input = standard_scaler(None, Input, scaler_params)
|
901
902
|
|
903
|
+
except:
|
902
904
|
|
903
|
-
|
904
|
-
|
905
|
-
except:
|
906
|
-
print(Fore.RED + "ERROR: Model Path error. Accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: LoadPLAN" + infoLoadPLAN + Style.RESET_ALL)
|
907
|
-
|
908
|
-
ModelName = str(df['MODEL NAME'].iloc[0])
|
909
|
-
Layers = df['LAYERS'].tolist()
|
910
|
-
LayerCount = int(df['LAYER COUNT'].iloc[0])
|
911
|
-
ClassCount = int(df['CLASS COUNT'].iloc[0])
|
912
|
-
MembranThresholds = df['MEMBRAN THRESHOLDS'].tolist()
|
913
|
-
MembranPotentials = df['MEMBRAN POTENTIALS'].tolist()
|
914
|
-
Normalizations = df['NORMALIZATION'].tolist()
|
915
|
-
Activations = df['ACTIVATIONS'].tolist()
|
916
|
-
NeuronCount = int(df['NEURON COUNT'].iloc[0])
|
917
|
-
SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
|
918
|
-
TestAcc = int(df['TEST ACCURACY'].iloc[0])
|
919
|
-
ModelType = str(df['MODEL TYPE'].iloc[0])
|
920
|
-
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
921
|
-
WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
|
922
|
-
ModelPath = str(df['MODEL PATH'].iloc[0])
|
923
|
-
|
924
|
-
W = [0] * LayerCount
|
925
|
-
|
926
|
-
if WeightType == 'txt':
|
927
|
-
for i in range(LayerCount):
|
928
|
-
W[i] = np.loadtxt(ModelPath + ModelName + str(i+1) + 'w.txt')
|
929
|
-
elif WeightType == 'npy':
|
930
|
-
for i in range(LayerCount):
|
931
|
-
W[i] = np.load(ModelPath + ModelName + str(i+1) + 'w.npy')
|
932
|
-
elif WeightType == 'mat':
|
933
|
-
for i in range(LayerCount):
|
934
|
-
W[i] = sio.loadmat(ModelPath + ModelName + str(i+1) + 'w.mat')
|
935
|
-
else:
|
936
|
-
raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: LoadPLAN." + infoLoadPLAN + Style.RESET_ALL)
|
937
|
-
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
938
|
-
return W,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations,df
|
939
|
-
|
940
|
-
def PredictFromDiscPLAN(Input,ModelName,ModelPath,LogType):
|
941
|
-
infoPredictFromDİscPLAN = """
|
942
|
-
Function to make a prediction using a divided pruning deep learning neural network (PLAN).
|
905
|
+
non_scaled = True
|
906
|
+
|
943
907
|
|
944
|
-
|
945
|
-
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
946
|
-
ModelName (str): Name of the model.
|
947
|
-
ModelPath (str): Path where the model is saved.
|
948
|
-
LogType (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
908
|
+
layers = ['fex']
|
949
909
|
|
950
|
-
Returns:
|
951
|
-
ndarray: Output from the model.
|
952
|
-
"""
|
953
|
-
W,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations = LoadPLAN(ModelName,ModelPath,
|
954
|
-
LogType)[0:6]
|
955
910
|
Wc = [0] * len(W)
|
956
911
|
for i, w in enumerate(W):
|
957
912
|
Wc[i] = np.copy(w)
|
958
913
|
try:
|
959
|
-
|
960
|
-
|
961
|
-
|
962
|
-
for index, Layer in enumerate(
|
963
|
-
|
964
|
-
|
965
|
-
|
966
|
-
|
967
|
-
|
968
|
-
|
969
|
-
|
970
|
-
NeuralLayer = Softmax(NeuralLayer)
|
971
|
-
|
972
|
-
if Layers[index] == 'fex':
|
973
|
-
NeuralLayer,useless = Fex(NeuralLayer, W[index],
|
974
|
-
MembranThresholds[index],
|
975
|
-
MembranPotentials[index])
|
976
|
-
if Layers[index] == 'cat':
|
977
|
-
NeuralLayer,useless = Cat(NeuralLayer, W[index],
|
978
|
-
MembranThresholds[index],
|
979
|
-
MembranPotentials[index],
|
980
|
-
0)
|
914
|
+
neural_layer = Input
|
915
|
+
neural_layer = np.array(neural_layer)
|
916
|
+
neural_layer = neural_layer.ravel()
|
917
|
+
for index, Layer in enumerate(layers):
|
918
|
+
|
919
|
+
neural_layer = normalization(neural_layer)
|
920
|
+
|
921
|
+
if Layer == 'fex':
|
922
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
923
|
+
elif Layer == 'cat':
|
924
|
+
neural_layer = np.dot(W[index], neural_layer)
|
981
925
|
except:
|
982
|
-
|
983
|
-
|
926
|
+
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
|
927
|
+
infopredict_model_ssd + Style.RESET_ALL)
|
928
|
+
return 'e'
|
984
929
|
for i, w in enumerate(Wc):
|
985
930
|
W[i] = np.copy(w)
|
986
|
-
return
|
931
|
+
return neural_layer
|
932
|
+
|
987
933
|
|
934
|
+
def predict_model_ram(Input, scaler_params, W, activation_potentiation=None):
|
988
935
|
|
989
|
-
|
990
|
-
|
991
|
-
Function to make a prediction using a pruning learning artificial neural network (PLAN)
|
936
|
+
infopredict_model_ram = """
|
937
|
+
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
992
938
|
from weights and parameters stored in memory.
|
993
939
|
|
994
940
|
Arguments:
|
995
941
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
996
|
-
|
997
|
-
MembranThresholds (list): MEMBRAN THRESHOLDS.
|
998
|
-
MembranPotentials (list): MEMBRAN POTENTIALS.
|
999
|
-
DoNormalization (str): Whether to normalize ('y' or 'n').
|
1000
|
-
Activations (list): Activation functions for each layer.
|
942
|
+
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
1001
943
|
W (list of ndarrays): Weights of the model.
|
944
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
1002
945
|
|
1003
946
|
Returns:
|
1004
947
|
ndarray: Output from the model.
|
1005
948
|
"""
|
1006
949
|
|
950
|
+
if scaler_params != None:
|
951
|
+
|
952
|
+
Input = standard_scaler(None, Input, scaler_params)
|
953
|
+
|
954
|
+
layers = ['fex']
|
955
|
+
|
1007
956
|
Wc = [0] * len(W)
|
1008
957
|
for i, w in enumerate(W):
|
1009
958
|
Wc[i] = np.copy(w)
|
1010
959
|
try:
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1014
|
-
for index, Layer in enumerate(
|
1015
|
-
|
1016
|
-
|
1017
|
-
|
1018
|
-
|
1019
|
-
|
1020
|
-
|
1021
|
-
|
1022
|
-
|
1023
|
-
|
1024
|
-
if Layers[index] == 'fex':
|
1025
|
-
NeuralLayer,useless = Fex(NeuralLayer, W[index],
|
1026
|
-
MembranThresholds[index],
|
1027
|
-
MembranPotentials[index])
|
1028
|
-
if Layers[index] == 'cat':
|
1029
|
-
NeuralLayer,useless = Cat(NeuralLayer, W[index],
|
1030
|
-
MembranThresholds[index],
|
1031
|
-
MembranPotentials[index],0)
|
960
|
+
neural_layer = Input
|
961
|
+
neural_layer = np.array(neural_layer)
|
962
|
+
neural_layer = neural_layer.ravel()
|
963
|
+
for index, Layer in enumerate(layers):
|
964
|
+
|
965
|
+
neural_layer = normalization(neural_layer)
|
966
|
+
|
967
|
+
if Layer == 'fex':
|
968
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
969
|
+
elif Layer == 'cat':
|
970
|
+
neural_layer = np.dot(W[index], neural_layer)
|
971
|
+
|
1032
972
|
except:
|
1033
|
-
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from:
|
973
|
+
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
|
974
|
+
infopredict_model_ram + Style.RESET_ALL)
|
1034
975
|
return 'e'
|
1035
976
|
for i, w in enumerate(Wc):
|
1036
977
|
W[i] = np.copy(w)
|
1037
|
-
return
|
1038
|
-
|
978
|
+
return neural_layer
|
979
|
+
|
1039
980
|
|
1040
|
-
def
|
1041
|
-
|
981
|
+
def auto_balancer(x_train, y_train):
|
982
|
+
|
983
|
+
infoauto_balancer = """
|
1042
984
|
Function to balance the training data across different classes.
|
1043
985
|
|
1044
986
|
Arguments:
|
1045
|
-
|
1046
|
-
|
1047
|
-
ClassCount (int): Number of classes.
|
987
|
+
x_train (list): Input data for training.
|
988
|
+
y_train (list): Labels corresponding to the input data.
|
1048
989
|
|
1049
990
|
Returns:
|
1050
991
|
tuple: A tuple containing balanced input data and labels.
|
1051
992
|
"""
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1056
|
-
|
1057
|
-
|
1058
|
-
|
1059
|
-
|
1060
|
-
|
1061
|
-
|
993
|
+
classes = np.arange(y_train.shape[1])
|
994
|
+
class_count = len(classes)
|
995
|
+
|
996
|
+
try:
|
997
|
+
ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[
|
998
|
+
0] for i in range(class_count)}
|
999
|
+
classes = [len(ClassIndices[i]) for i in range(class_count)]
|
1000
|
+
|
1001
|
+
if len(set(classes)) == 1:
|
1002
|
+
print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
|
1003
|
+
return x_train, y_train
|
1004
|
+
|
1005
|
+
MinCount = min(classes)
|
1006
|
+
|
1062
1007
|
BalancedIndices = []
|
1063
|
-
for i in range(
|
1008
|
+
for i in range(class_count):
|
1064
1009
|
if len(ClassIndices[i]) > MinCount:
|
1065
|
-
SelectedIndices = np.random.choice(
|
1010
|
+
SelectedIndices = np.random.choice(
|
1011
|
+
ClassIndices[i], MinCount, replace=False)
|
1066
1012
|
else:
|
1067
1013
|
SelectedIndices = ClassIndices[i]
|
1068
1014
|
BalancedIndices.extend(SelectedIndices)
|
1015
|
+
|
1016
|
+
BalancedInputs = [x_train[idx] for idx in BalancedIndices]
|
1017
|
+
BalancedLabels = [y_train[idx] for idx in BalancedIndices]
|
1018
|
+
|
1019
|
+
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)
|
1020
|
+
) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
|
1021
|
+
except:
|
1022
|
+
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
|
1023
|
+
return 'e'
|
1024
|
+
|
1025
|
+
return BalancedInputs, BalancedLabels
|
1026
|
+
|
1027
|
+
|
1028
|
+
def synthetic_augmentation(x_train, y_train):
|
1029
|
+
"""
|
1030
|
+
Generates synthetic examples to balance classes with fewer examples.
|
1031
|
+
|
1032
|
+
Arguments:
|
1033
|
+
x -- Input dataset (examples) - list format
|
1034
|
+
y -- Class labels (one-hot encoded) - list format
|
1035
|
+
|
1036
|
+
Returns:
|
1037
|
+
x_balanced -- Balanced input dataset (list format)
|
1038
|
+
y_balanced -- Balanced class labels (one-hot encoded, list format)
|
1039
|
+
"""
|
1040
|
+
x = x_train
|
1041
|
+
y = y_train
|
1042
|
+
classes = np.arange(y_train.shape[1])
|
1043
|
+
class_count = len(classes)
|
1044
|
+
|
1045
|
+
# Calculate class distribution
|
1046
|
+
class_distribution = {i: 0 for i in range(class_count)}
|
1047
|
+
for label in y:
|
1048
|
+
class_distribution[np.argmax(label)] += 1
|
1049
|
+
|
1050
|
+
max_class_count = max(class_distribution.values())
|
1051
|
+
|
1052
|
+
x_balanced = list(x)
|
1053
|
+
y_balanced = list(y)
|
1054
|
+
|
1055
|
+
for class_label in range(class_count):
|
1056
|
+
class_indices = [i for i, label in enumerate(
|
1057
|
+
y) if np.argmax(label) == class_label]
|
1058
|
+
num_samples = len(class_indices)
|
1059
|
+
|
1060
|
+
if num_samples < max_class_count:
|
1061
|
+
while num_samples < max_class_count:
|
1062
|
+
|
1063
|
+
random_indices = np.random.choice(
|
1064
|
+
class_indices, 2, replace=False)
|
1065
|
+
sample1 = x[random_indices[0]]
|
1066
|
+
sample2 = x[random_indices[1]]
|
1067
|
+
|
1068
|
+
synthetic_sample = sample1 + \
|
1069
|
+
(np.array(sample2) - np.array(sample1)) * np.random.rand()
|
1070
|
+
|
1071
|
+
x_balanced.append(synthetic_sample.tolist())
|
1072
|
+
y_balanced.append(y[class_indices[0]])
|
1073
|
+
|
1074
|
+
num_samples += 1
|
1075
|
+
|
1076
|
+
return np.array(x_balanced), np.array(y_balanced)
|
1077
|
+
|
1078
|
+
|
1079
|
+
def standard_scaler(x_train, x_test, scaler_params=None):
|
1080
|
+
info_standard_scaler = """
|
1081
|
+
Standardizes training and test datasets. x_test may be None.
|
1082
|
+
|
1083
|
+
Args:
|
1084
|
+
train_data: numpy.ndarray
|
1085
|
+
Training data
|
1086
|
+
test_data: numpy.ndarray
|
1087
|
+
Test data
|
1088
|
+
|
1089
|
+
Returns:
|
1090
|
+
list:
|
1091
|
+
Scaler parameters: mean and std
|
1092
|
+
tuple
|
1093
|
+
Standardized training and test datasets
|
1094
|
+
"""
|
1095
|
+
try:
|
1096
|
+
|
1097
|
+
if scaler_params == None and x_test != None:
|
1098
|
+
|
1099
|
+
mean = np.mean(x_train, axis=0)
|
1100
|
+
std = np.std(x_train, axis=0)
|
1101
|
+
train_data_scaled = (x_train - mean) / std
|
1102
|
+
test_data_scaled = (x_test - mean) / std
|
1103
|
+
|
1104
|
+
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1105
|
+
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1106
|
+
|
1107
|
+
scaler_params = [mean, std]
|
1108
|
+
|
1109
|
+
return scaler_params, train_data_scaled, test_data_scaled
|
1069
1110
|
|
1070
|
-
|
1071
|
-
|
1111
|
+
if scaler_params == None and x_test == None:
|
1112
|
+
|
1113
|
+
mean = np.mean(x_train, axis=0)
|
1114
|
+
std = np.std(x_train, axis=0)
|
1115
|
+
train_data_scaled = (x_train - mean) / std
|
1116
|
+
|
1117
|
+
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1118
|
+
|
1119
|
+
scaler_params = [mean, std]
|
1120
|
+
|
1121
|
+
return scaler_params, train_data_scaled
|
1122
|
+
|
1123
|
+
if scaler_params != None:
|
1124
|
+
|
1125
|
+
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
1126
|
+
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1127
|
+
|
1128
|
+
return test_data_scaled
|
1129
|
+
|
1130
|
+
except:
|
1131
|
+
print(
|
1132
|
+
Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
|
1133
|
+
|
1134
|
+
|
1135
|
+
def encode_one_hot(y_train, y_test):
|
1136
|
+
info_one_hot_encode = """
|
1137
|
+
Performs one-hot encoding on y_train and y_test data..
|
1138
|
+
|
1139
|
+
Args:
|
1140
|
+
y_train (numpy.ndarray): Eğitim etiketi verisi.
|
1141
|
+
y_test (numpy.ndarray): Test etiketi verisi.
|
1142
|
+
|
1143
|
+
Returns:
|
1144
|
+
tuple: One-hot encoded y_train ve y_test verileri.
|
1145
|
+
"""
|
1146
|
+
try:
|
1147
|
+
classes = np.unique(y_train)
|
1148
|
+
class_count = len(classes)
|
1149
|
+
|
1150
|
+
class_to_index = {cls: idx for idx, cls in enumerate(classes)}
|
1151
|
+
|
1152
|
+
y_train_encoded = np.zeros((y_train.shape[0], class_count))
|
1153
|
+
for i, label in enumerate(y_train):
|
1154
|
+
y_train_encoded[i, class_to_index[label]] = 1
|
1155
|
+
|
1156
|
+
y_test_encoded = np.zeros((y_test.shape[0], class_count))
|
1157
|
+
for i, label in enumerate(y_test):
|
1158
|
+
y_test_encoded[i, class_to_index[label]] = 1
|
1159
|
+
except:
|
1160
|
+
print(Fore.RED + 'ERROR: y_train and y_test must be numpy array. from: one_hot_encode' + info_one_hot_encode)
|
1161
|
+
|
1162
|
+
return y_train_encoded, y_test_encoded
|
1163
|
+
|
1164
|
+
|
1165
|
+
def split(X, y, test_size, random_state):
|
1166
|
+
"""
|
1167
|
+
Splits the given X (features) and y (labels) data into training and testing subsets.
|
1168
|
+
|
1169
|
+
Args:
|
1170
|
+
X (numpy.ndarray): Features data.
|
1171
|
+
y (numpy.ndarray): Labels data.
|
1172
|
+
test_size (float or int): Proportion or number of samples for the test subset.
|
1173
|
+
random_state (int or None): Seed for random state.
|
1174
|
+
|
1175
|
+
Returns:
|
1176
|
+
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
1177
|
+
"""
|
1178
|
+
# Size of the dataset
|
1179
|
+
num_samples = X.shape[0]
|
1180
|
+
|
1181
|
+
if isinstance(test_size, float):
|
1182
|
+
test_size = int(test_size * num_samples)
|
1183
|
+
elif isinstance(test_size, int):
|
1184
|
+
if test_size > num_samples:
|
1185
|
+
raise ValueError(
|
1186
|
+
"test_size cannot be larger than the number of samples.")
|
1187
|
+
else:
|
1188
|
+
raise ValueError("test_size should be float or int.")
|
1189
|
+
|
1190
|
+
if random_state is not None:
|
1191
|
+
np.random.seed(random_state)
|
1192
|
+
|
1193
|
+
indices = np.arange(num_samples)
|
1194
|
+
np.random.shuffle(indices)
|
1195
|
+
|
1196
|
+
test_indices = indices[:test_size]
|
1197
|
+
train_indices = indices[test_size:]
|
1198
|
+
|
1199
|
+
x_train, x_test = X[train_indices], X[test_indices]
|
1200
|
+
y_train, y_test = y[train_indices], y[test_indices]
|
1201
|
+
|
1202
|
+
return x_train, x_test, y_train, y_test
|
1203
|
+
|
1204
|
+
|
1205
|
+
def metrics(y_ts, test_preds, average='weighted'):
|
1206
|
+
"""
|
1207
|
+
Calculates precision, recall and F1 score for a classification task.
|
1208
|
+
|
1209
|
+
Args:
|
1210
|
+
y_ts (list or numpy.ndarray): True labels.
|
1211
|
+
test_preds (list or numpy.ndarray): Predicted labels.
|
1212
|
+
average (str): Type of averaging ('micro', 'macro', 'weighted').
|
1213
|
+
|
1214
|
+
Returns:
|
1215
|
+
tuple: Precision, recall, F1 score.
|
1216
|
+
"""
|
1217
|
+
y_test_d = decode_one_hot(y_ts)
|
1218
|
+
y_test_d = np.array(y_test_d)
|
1219
|
+
y_pred = np.array(test_preds)
|
1220
|
+
|
1221
|
+
if y_test_d.ndim > 1:
|
1222
|
+
y_test_d = y_test_d.reshape(-1)
|
1223
|
+
if y_pred.ndim > 1:
|
1224
|
+
y_pred = y_pred.reshape(-1)
|
1225
|
+
|
1226
|
+
tp = {}
|
1227
|
+
fp = {}
|
1228
|
+
fn = {}
|
1229
|
+
|
1230
|
+
classes = np.unique(np.concatenate((y_test_d, y_pred)))
|
1231
|
+
|
1232
|
+
for c in classes:
|
1233
|
+
tp[c] = 0
|
1234
|
+
fp[c] = 0
|
1235
|
+
fn[c] = 0
|
1236
|
+
|
1237
|
+
for c in classes:
|
1238
|
+
for true, pred in zip(y_test_d, y_pred):
|
1239
|
+
if true == c and pred == c:
|
1240
|
+
tp[c] += 1
|
1241
|
+
elif true != c and pred == c:
|
1242
|
+
fp[c] += 1
|
1243
|
+
elif true == c and pred != c:
|
1244
|
+
fn[c] += 1
|
1245
|
+
|
1246
|
+
precision = {}
|
1247
|
+
recall = {}
|
1248
|
+
f1 = {}
|
1249
|
+
|
1250
|
+
for c in classes:
|
1251
|
+
precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
|
1252
|
+
recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
|
1253
|
+
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
|
1254
|
+
|
1255
|
+
if average == 'micro':
|
1256
|
+
precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
|
1257
|
+
recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
|
1258
|
+
f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
|
1259
|
+
|
1260
|
+
elif average == 'macro':
|
1261
|
+
precision_val = np.mean(list(precision.values()))
|
1262
|
+
recall_val = np.mean(list(recall.values()))
|
1263
|
+
f1_val = np.mean(list(f1.values()))
|
1264
|
+
|
1265
|
+
elif average == 'weighted':
|
1266
|
+
weights = np.array([np.sum(y_test_d == c) for c in classes])
|
1267
|
+
weights = weights / np.sum(weights)
|
1268
|
+
precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
|
1269
|
+
recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
|
1270
|
+
f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
|
1271
|
+
|
1272
|
+
else:
|
1273
|
+
raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
|
1274
|
+
|
1275
|
+
return precision_val, recall_val, f1_val
|
1276
|
+
|
1277
|
+
|
1278
|
+
def decode_one_hot(encoded_data):
|
1279
|
+
"""
|
1280
|
+
Decodes one-hot encoded data to original categorical labels.
|
1281
|
+
|
1282
|
+
Args:
|
1283
|
+
encoded_data (numpy.ndarray): One-hot encoded data with shape (n_samples, n_classes).
|
1284
|
+
|
1285
|
+
Returns:
|
1286
|
+
numpy.ndarray: Decoded categorical labels with shape (n_samples,).
|
1287
|
+
"""
|
1288
|
+
|
1289
|
+
decoded_labels = np.argmax(encoded_data, axis=1)
|
1290
|
+
|
1291
|
+
return decoded_labels
|
1292
|
+
|
1293
|
+
|
1294
|
+
def roc_curve(y_true, y_score):
|
1295
|
+
"""
|
1296
|
+
Compute Receiver Operating Characteristic (ROC) curve.
|
1297
|
+
|
1298
|
+
Parameters:
|
1299
|
+
y_true : array, shape = [n_samples]
|
1300
|
+
True binary labels in range {0, 1} or {-1, 1}.
|
1301
|
+
y_score : array, shape = [n_samples]
|
1302
|
+
Target scores, can either be probability estimates of the positive class,
|
1303
|
+
confidence values, or non-thresholded measure of decisions (as returned
|
1304
|
+
by decision_function on some classifiers).
|
1305
|
+
|
1306
|
+
Returns:
|
1307
|
+
fpr : array, shape = [n]
|
1308
|
+
Increasing false positive rates such that element i is the false positive rate
|
1309
|
+
of predictions with score >= thresholds[i].
|
1310
|
+
tpr : array, shape = [n]
|
1311
|
+
Increasing true positive rates such that element i is the true positive rate
|
1312
|
+
of predictions with score >= thresholds[i].
|
1313
|
+
thresholds : array, shape = [n]
|
1314
|
+
Decreasing thresholds on the decision function used to compute fpr and tpr.
|
1315
|
+
"""
|
1316
|
+
|
1317
|
+
y_true = np.asarray(y_true)
|
1318
|
+
y_score = np.asarray(y_score)
|
1319
|
+
|
1320
|
+
if len(np.unique(y_true)) != 2:
|
1321
|
+
raise ValueError("Only binary classification is supported.")
|
1322
|
+
|
1323
|
+
|
1324
|
+
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
|
1325
|
+
y_score = y_score[desc_score_indices]
|
1326
|
+
y_true = y_true[desc_score_indices]
|
1327
|
+
|
1328
|
+
|
1329
|
+
fpr = []
|
1330
|
+
tpr = []
|
1331
|
+
thresholds = []
|
1332
|
+
n_pos = np.sum(y_true)
|
1333
|
+
n_neg = len(y_true) - n_pos
|
1334
|
+
|
1335
|
+
tp = 0
|
1336
|
+
fp = 0
|
1337
|
+
prev_score = None
|
1338
|
+
|
1339
|
+
|
1340
|
+
for i, score in enumerate(y_score):
|
1341
|
+
if score != prev_score:
|
1342
|
+
fpr.append(fp / n_neg)
|
1343
|
+
tpr.append(tp / n_pos)
|
1344
|
+
thresholds.append(score)
|
1345
|
+
prev_score = score
|
1346
|
+
|
1347
|
+
if y_true[i] == 1:
|
1348
|
+
tp += 1
|
1349
|
+
else:
|
1350
|
+
fp += 1
|
1351
|
+
|
1352
|
+
fpr.append(fp / n_neg)
|
1353
|
+
tpr.append(tp / n_pos)
|
1354
|
+
thresholds.append(score)
|
1355
|
+
|
1356
|
+
return np.array(fpr), np.array(tpr), np.array(thresholds)
|
1357
|
+
|
1358
|
+
|
1359
|
+
def confusion_matrix(y_true, y_pred, class_count):
|
1360
|
+
"""
|
1361
|
+
Computes confusion matrix.
|
1362
|
+
|
1363
|
+
Args:
|
1364
|
+
y_true (numpy.ndarray): True class labels (1D array).
|
1365
|
+
y_pred (numpy.ndarray): Predicted class labels (1D array).
|
1366
|
+
num_classes (int): Number of classes.
|
1367
|
+
|
1368
|
+
Returns:
|
1369
|
+
numpy.ndarray: Confusion matrix of shape (num_classes, num_classes).
|
1370
|
+
"""
|
1371
|
+
confusion = np.zeros((class_count, class_count), dtype=int)
|
1372
|
+
|
1373
|
+
for i in range(len(y_true)):
|
1374
|
+
true_label = y_true[i]
|
1375
|
+
pred_label = y_pred[i]
|
1376
|
+
confusion[true_label, pred_label] += 1
|
1377
|
+
|
1378
|
+
return confusion
|
1379
|
+
|
1380
|
+
|
1381
|
+
def plot_evaluate(y_test, y_preds, acc_list):
|
1382
|
+
|
1383
|
+
acc = acc_list[len(acc_list) - 1]
|
1384
|
+
y_true = decode_one_hot(y_test)
|
1385
|
+
|
1386
|
+
y_true = np.array(y_true)
|
1387
|
+
y_preds = np.array(y_preds)
|
1388
|
+
Class = np.unique(decode_one_hot(y_test))
|
1389
|
+
|
1390
|
+
precision, recall, f1 = metrics(y_test, y_preds)
|
1391
|
+
|
1392
|
+
|
1393
|
+
# Confusion matrix
|
1394
|
+
cm = confusion_matrix(y_true, y_preds, len(Class))
|
1395
|
+
# Subplot içinde düzenleme
|
1396
|
+
fig, axs = plt.subplots(2, 2, figsize=(16, 12))
|
1397
|
+
|
1398
|
+
# Confusion Matrix
|
1399
|
+
sns.heatmap(cm, annot=True, fmt='d', ax=axs[0, 0])
|
1400
|
+
axs[0, 0].set_title("Confusion Matrix")
|
1401
|
+
axs[0, 0].set_xlabel("Predicted Class")
|
1402
|
+
axs[0, 0].set_ylabel("Actual Class")
|
1403
|
+
|
1404
|
+
if len(Class) == 2:
|
1405
|
+
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
1406
|
+
# ROC Curve
|
1407
|
+
roc_auc = np.trapz(tpr, fpr)
|
1408
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1409
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1410
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1411
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1412
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1413
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1414
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1415
|
+
axs[1, 0].legend(loc="lower right")
|
1416
|
+
axs[1, 0].legend(loc="lower right")
|
1417
|
+
else:
|
1418
|
+
|
1419
|
+
for i in range(len(Class)):
|
1420
|
+
|
1421
|
+
y_true_copy = np.copy(y_true)
|
1422
|
+
y_preds_copy = np.copy(y_preds)
|
1072
1423
|
|
1073
|
-
|
1074
|
-
|
1075
|
-
|
1076
|
-
|
1424
|
+
y_true_copy[y_true_copy == i] = 0
|
1425
|
+
y_true_copy[y_true_copy != 0] = 1
|
1426
|
+
|
1427
|
+
y_preds_copy[y_preds_copy == i] = 0
|
1428
|
+
y_preds_copy[y_preds_copy != 0] = 1
|
1429
|
+
|
1430
|
+
|
1431
|
+
fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
|
1432
|
+
|
1433
|
+
roc_auc = np.trapz(tpr, fpr)
|
1434
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1435
|
+
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1436
|
+
axs[1, 0].set_xlim([0.0, 1.0])
|
1437
|
+
axs[1, 0].set_ylim([0.0, 1.05])
|
1438
|
+
axs[1, 0].set_xlabel('False Positive Rate')
|
1439
|
+
axs[1, 0].set_ylabel('True Positive Rate')
|
1440
|
+
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1441
|
+
axs[1, 0].legend(loc="lower right")
|
1442
|
+
axs[1, 0].legend(loc="lower right")
|
1077
1443
|
|
1078
|
-
return BalancedInputs, BalancedLabels
|
1079
|
-
|
1080
|
-
|
1081
|
-
def GetWeights():
|
1082
1444
|
|
1083
|
-
|
1084
|
-
|
1085
|
-
def GetDf():
|
1445
|
+
"""
|
1446
|
+
accuracy_per_class = []
|
1086
1447
|
|
1087
|
-
|
1088
|
-
|
1089
|
-
|
1448
|
+
for cls in Class:
|
1449
|
+
correct = np.sum((y_true == cls) & (y_preds == cls))
|
1450
|
+
total = np.sum(y_true == cls)
|
1451
|
+
accuracy_cls = correct / total if total > 0 else 0.0
|
1452
|
+
accuracy_per_class.append(accuracy_cls)
|
1090
1453
|
|
1091
|
-
|
1454
|
+
axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
|
1455
|
+
axs[2, 0].set_xlabel('Class')
|
1456
|
+
axs[2, 0].set_ylabel('Accuracy')
|
1457
|
+
axs[2, 0].set_title('Class-wise Accuracy')
|
1458
|
+
axs[2, 0].set_xticks(Class)
|
1459
|
+
axs[2, 0].grid(True)
|
1460
|
+
"""
|
1461
|
+
|
1462
|
+
|
1463
|
+
|
1464
|
+
|
1465
|
+
# Precision, Recall, F1 Score, Accuracy
|
1466
|
+
metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
|
1467
|
+
values = [precision, recall, f1, acc]
|
1468
|
+
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
|
1469
|
+
|
1470
|
+
#
|
1471
|
+
bars = axs[0, 1].bar(metric, values, color=colors)
|
1472
|
+
|
1473
|
+
|
1474
|
+
for bar, value in zip(bars, values):
|
1475
|
+
axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
|
1476
|
+
ha='center', va='bottom', fontsize=12, color='white', weight='bold')
|
1477
|
+
|
1478
|
+
axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
|
1479
|
+
axs[0, 1].set_xlabel('Metrics')
|
1480
|
+
axs[0, 1].set_ylabel('Score')
|
1481
|
+
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Weighted)')
|
1482
|
+
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
1483
|
+
|
1484
|
+
# Accuracy
|
1485
|
+
plt.plot(acc_list, marker='o', linestyle='-',
|
1486
|
+
color='r', label='Accuracy')
|
1487
|
+
|
1488
|
+
|
1489
|
+
plt.axhline(y=1, color='g', linestyle='--', label='Maximum Accuracy')
|
1490
|
+
|
1491
|
+
|
1492
|
+
plt.xlabel('Samples')
|
1493
|
+
plt.ylabel('Accuracy')
|
1494
|
+
plt.title('Accuracy History')
|
1495
|
+
plt.legend()
|
1496
|
+
|
1092
1497
|
|
1093
|
-
|
1498
|
+
plt.tight_layout()
|
1499
|
+
plt.show()
|
1500
|
+
|
1501
|
+
def manuel_balancer(x_train, y_train, target_samples_per_class):
|
1502
|
+
"""
|
1503
|
+
Generates synthetic examples to balance classes to the specified number of examples per class.
|
1504
|
+
|
1505
|
+
Arguments:
|
1506
|
+
x_train -- Input dataset (examples) - NumPy array format
|
1507
|
+
y_train -- Class labels (one-hot encoded) - NumPy array format
|
1508
|
+
target_samples_per_class -- Desired number of samples per class
|
1509
|
+
|
1510
|
+
Returns:
|
1511
|
+
x_balanced -- Balanced input dataset (NumPy array format)
|
1512
|
+
y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
|
1513
|
+
"""
|
1514
|
+
start_time = time.time()
|
1515
|
+
x_train = np.array(x_train)
|
1516
|
+
y_train = np.array(y_train)
|
1517
|
+
classes = np.arange(y_train.shape[1])
|
1518
|
+
class_count = len(classes)
|
1519
|
+
|
1520
|
+
x_balanced = []
|
1521
|
+
y_balanced = []
|
1522
|
+
|
1523
|
+
for class_label in range(class_count):
|
1524
|
+
class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
|
1525
|
+
num_samples = len(class_indices)
|
1094
1526
|
|
1527
|
+
if num_samples > target_samples_per_class:
|
1528
|
+
|
1529
|
+
selected_indices = np.random.choice(class_indices, target_samples_per_class, replace=False)
|
1530
|
+
x_balanced.append(x_train[selected_indices])
|
1531
|
+
y_balanced.append(y_train[selected_indices])
|
1532
|
+
|
1533
|
+
else:
|
1534
|
+
|
1535
|
+
x_balanced.append(x_train[class_indices])
|
1536
|
+
y_balanced.append(y_train[class_indices])
|
1537
|
+
|
1538
|
+
if num_samples < target_samples_per_class:
|
1539
|
+
|
1540
|
+
samples_to_add = target_samples_per_class - num_samples
|
1541
|
+
additional_samples = np.zeros((samples_to_add, x_train.shape[1]))
|
1542
|
+
additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
|
1543
|
+
|
1544
|
+
for i in range(samples_to_add):
|
1545
|
+
uni_start_time = time.time()
|
1546
|
+
|
1547
|
+
random_indices = np.random.choice(class_indices, 2, replace=False)
|
1548
|
+
sample1 = x_train[random_indices[0]]
|
1549
|
+
sample2 = x_train[random_indices[1]]
|
1550
|
+
|
1551
|
+
|
1552
|
+
synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
|
1553
|
+
|
1554
|
+
additional_samples[i] = synthetic_sample
|
1555
|
+
additional_labels[i] = y_train[class_indices[0]]
|
1556
|
+
|
1557
|
+
uni_end_time = time.time()
|
1558
|
+
calculating_est = round(
|
1559
|
+
(uni_end_time - uni_start_time) * (samples_to_add - i), 3)
|
1560
|
+
|
1561
|
+
if calculating_est < 60:
|
1562
|
+
print('\rest......(sec):', calculating_est, '\n', end="")
|
1563
|
+
|
1564
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1565
|
+
print('\rest......(min):', calculating_est/60, '\n', end="")
|
1566
|
+
|
1567
|
+
elif calculating_est > 3600:
|
1568
|
+
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
1569
|
+
|
1570
|
+
print('Augmenting: ', class_label, '/', class_count, '...', i, "/", samples_to_add, "\n", end="")
|
1571
|
+
|
1572
|
+
x_balanced.append(additional_samples)
|
1573
|
+
y_balanced.append(additional_labels)
|
1574
|
+
|
1575
|
+
|
1576
|
+
EndTime = time.time()
|
1577
|
+
|
1578
|
+
calculating_est = round(EndTime - start_time, 2)
|
1579
|
+
|
1580
|
+
print(Fore.GREEN + " \nBalancing Finished with 0 ERROR\n" + Style.RESET_ALL)
|
1581
|
+
|
1582
|
+
if calculating_est < 60:
|
1583
|
+
print('Total balancing time(sec): ', calculating_est)
|
1584
|
+
|
1585
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
1586
|
+
print('Total balancing time(min): ', calculating_est/60)
|
1587
|
+
|
1588
|
+
elif calculating_est > 3600:
|
1589
|
+
print('Total balancing time(h): ', calculating_est/3600)
|
1590
|
+
|
1591
|
+
# Stack the balanced arrays
|
1592
|
+
x_balanced = np.vstack(x_balanced)
|
1593
|
+
y_balanced = np.vstack(y_balanced)
|
1594
|
+
|
1595
|
+
return x_balanced, y_balanced
|
1596
|
+
|
1597
|
+
def get_weights():
|
1598
|
+
|
1599
|
+
return 0
|
1600
|
+
|
1601
|
+
|
1602
|
+
def get_df():
|
1603
|
+
|
1095
1604
|
return 2
|
1605
|
+
|
1606
|
+
|
1607
|
+
def get_preds():
|
1608
|
+
|
1609
|
+
return 1
|
1610
|
+
|
1611
|
+
|
1612
|
+
def get_acc():
|
1613
|
+
|
1614
|
+
return 2
|