pyerualjetwork 3.3.3__py3-none-any.whl → 4.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +80 -0
- pyerualjetwork/activation_functions.py +367 -0
- pyerualjetwork/data_operations.py +401 -0
- pyerualjetwork/help.py +16 -0
- pyerualjetwork/loss_functions.py +21 -0
- pyerualjetwork/metrics.py +190 -0
- pyerualjetwork/model_operations.py +350 -0
- pyerualjetwork/plan.py +645 -0
- pyerualjetwork/planeat.py +726 -0
- pyerualjetwork/ui.py +22 -0
- pyerualjetwork/visualizations.py +799 -0
- pyerualjetwork-4.0.0.dist-info/METADATA +90 -0
- pyerualjetwork-4.0.0.dist-info/RECORD +15 -0
- pyerualjetwork-4.0.0.dist-info/top_level.txt +1 -0
- plan/__init__.py +0 -5
- plan/plan.py +0 -2173
- pyerualjetwork-3.3.3.dist-info/METADATA +0 -8
- pyerualjetwork-3.3.3.dist-info/RECORD +0 -6
- pyerualjetwork-3.3.3.dist-info/top_level.txt +0 -1
- {pyerualjetwork-3.3.3.dist-info → pyerualjetwork-4.0.0.dist-info}/WHEEL +0 -0
plan/plan.py
DELETED
@@ -1,2173 +0,0 @@
|
|
1
|
-
# -*- coding: utf-8 -*-
|
2
|
-
"""
|
3
|
-
Created on Tue Jun 18 23:32:16 2024
|
4
|
-
|
5
|
-
@author: hasan can
|
6
|
-
"""
|
7
|
-
|
8
|
-
import pandas as pd
|
9
|
-
import numpy as np
|
10
|
-
import time
|
11
|
-
from colorama import Fore, Style
|
12
|
-
from typing import List, Union
|
13
|
-
from scipy.special import expit, softmax
|
14
|
-
import matplotlib.pyplot as plt
|
15
|
-
import seaborn as sns
|
16
|
-
from tqdm import tqdm
|
17
|
-
from scipy.spatial import ConvexHull
|
18
|
-
from datetime import datetime
|
19
|
-
from scipy import io
|
20
|
-
import scipy.io as sio
|
21
|
-
from matplotlib.animation import ArtistAnimation
|
22
|
-
import networkx as nx
|
23
|
-
import sys
|
24
|
-
|
25
|
-
# BUILD -----
|
26
|
-
|
27
|
-
|
28
|
-
def fit(
|
29
|
-
x_train: List[Union[int, float]],
|
30
|
-
y_train: List[Union[int, float]], # One hot encoded
|
31
|
-
val= None,
|
32
|
-
val_count = None,
|
33
|
-
activation_potentiation=[None], # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
34
|
-
x_val= None,
|
35
|
-
y_val= None,
|
36
|
-
show_training = None,
|
37
|
-
visible_layer=None, # For the future [DISABLED]
|
38
|
-
interval=100,
|
39
|
-
LTD = 0 # LONG TERM DEPRESSION
|
40
|
-
) -> str:
|
41
|
-
|
42
|
-
infoPLAN = """
|
43
|
-
Creates and configures a PLAN model.
|
44
|
-
|
45
|
-
fit Args:
|
46
|
-
x_train (list[num]): List or numarray of input data.
|
47
|
-
y_train (list[num]): List or numarray of target labels. (one hot encoded)
|
48
|
-
val (None or True): validation in training process ? None or True default: None (optional)
|
49
|
-
val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
|
50
|
-
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: help(plan.activation_functions_list) default: [None] (optional)
|
51
|
-
x_val (list[num]): List of validation data. (optional) Default: %10 of x_train (auto_balanced) it means every %1 of train progress starts validation default: x_train (optional)
|
52
|
-
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: %10 of y_train (auto_balanced) it means every %1 of train progress starts validation default: y_train (optional)
|
53
|
-
show_training (bool, str): True or None default: None (optional)
|
54
|
-
visible_layer: For the future [DISABLED]
|
55
|
-
LTD (int): Long Term Depression Hyperparameter for train PLAN neural network (optional)
|
56
|
-
interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
|
57
|
-
|
58
|
-
Returns:
|
59
|
-
list([num]): (Weight matrix).
|
60
|
-
error handled ?: Process status ('e')
|
61
|
-
"""
|
62
|
-
|
63
|
-
fit.__doc__ = infoPLAN
|
64
|
-
|
65
|
-
visible_layer = None
|
66
|
-
|
67
|
-
if len(x_train) != len(y_train):
|
68
|
-
|
69
|
-
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
|
70
|
-
sys.exit()
|
71
|
-
|
72
|
-
if val == True:
|
73
|
-
|
74
|
-
try:
|
75
|
-
|
76
|
-
if x_val == None and y_val == None:
|
77
|
-
|
78
|
-
x_val = x_train
|
79
|
-
y_val = y_train
|
80
|
-
|
81
|
-
except:
|
82
|
-
|
83
|
-
pass
|
84
|
-
|
85
|
-
if val_count == None:
|
86
|
-
|
87
|
-
val_count = 10
|
88
|
-
|
89
|
-
val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
|
90
|
-
v_iter = 0
|
91
|
-
val_list = [] * val_count
|
92
|
-
|
93
|
-
if show_training == True:
|
94
|
-
|
95
|
-
G = nx.Graph()
|
96
|
-
|
97
|
-
fig, ax = plt.subplots(2, 2)
|
98
|
-
fig.suptitle('Train Report')
|
99
|
-
|
100
|
-
artist1 = []
|
101
|
-
artist2 = []
|
102
|
-
artist3 = []
|
103
|
-
artist4 = []
|
104
|
-
|
105
|
-
if val != True:
|
106
|
-
|
107
|
-
print(Fore.RED + "ERROR115: For showing training, val parameter must be True. from: fit",
|
108
|
-
infoPLAN + Style.RESET_ALL)
|
109
|
-
sys.exit()
|
110
|
-
|
111
|
-
|
112
|
-
class_count = set()
|
113
|
-
|
114
|
-
for sublist in y_train:
|
115
|
-
|
116
|
-
class_count.add(tuple(sublist))
|
117
|
-
|
118
|
-
class_count = list(class_count)
|
119
|
-
|
120
|
-
y_train = [tuple(sublist) for sublist in y_train]
|
121
|
-
|
122
|
-
if visible_layer == None:
|
123
|
-
|
124
|
-
layers = ['fex']
|
125
|
-
else:
|
126
|
-
|
127
|
-
layers = ['fex'] * visible_layer
|
128
|
-
|
129
|
-
x_train_0 = np.array(x_train[0])
|
130
|
-
|
131
|
-
x_train__0_vec = x_train_0.ravel()
|
132
|
-
|
133
|
-
x_train_size = len(x_train__0_vec)
|
134
|
-
|
135
|
-
if visible_layer == None:
|
136
|
-
|
137
|
-
STPW = [None]
|
138
|
-
STPW[0] = np.ones((len(class_count), x_train_size)) # STPW = SHORT TIME POTENTIATION WEIGHT
|
139
|
-
|
140
|
-
else:
|
141
|
-
|
142
|
-
if visible_layer == 1:
|
143
|
-
fex_count = visible_layer
|
144
|
-
else:
|
145
|
-
fex_count = visible_layer - 1
|
146
|
-
|
147
|
-
fex_neurons = [None] * fex_count
|
148
|
-
|
149
|
-
for i in range(fex_count):
|
150
|
-
|
151
|
-
fex_neurons[i] = [x_train_size]
|
152
|
-
|
153
|
-
cat_neurons = [len(class_count), x_train_size]
|
154
|
-
|
155
|
-
STPW = weight_identification(
|
156
|
-
len(layers), len(class_count), fex_neurons, cat_neurons, x_train_size) # STPW = SHORT TIME POTENTIATION WEIGHT
|
157
|
-
|
158
|
-
LTPW = [0] * len(STPW) # LTPW = LONG TIME POTENTIATION WEIGHT
|
159
|
-
|
160
|
-
y = decode_one_hot(y_train)
|
161
|
-
|
162
|
-
train_progress = tqdm(total=len(x_train),leave=False, desc="Training",ncols= 120)
|
163
|
-
|
164
|
-
max_w = len(STPW) - 1
|
165
|
-
|
166
|
-
for index, inp in enumerate(x_train):
|
167
|
-
|
168
|
-
progress = index / len(x_train) * 100
|
169
|
-
|
170
|
-
inp = np.array(inp)
|
171
|
-
inp = inp.ravel()
|
172
|
-
|
173
|
-
if x_train_size != len(inp):
|
174
|
-
print(Fore.RED + "ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",
|
175
|
-
infoPLAN + Style.RESET_ALL)
|
176
|
-
sys.exit()
|
177
|
-
|
178
|
-
neural_layer = inp
|
179
|
-
|
180
|
-
for Lindex, Layer in enumerate(STPW):
|
181
|
-
|
182
|
-
|
183
|
-
STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation, index=Lindex, max_w=max_w, LTD=LTD)
|
184
|
-
|
185
|
-
|
186
|
-
for i in range(len(STPW)):
|
187
|
-
STPW[i] = normalization(STPW[i])
|
188
|
-
|
189
|
-
for i, w in enumerate(STPW):
|
190
|
-
LTPW[i] = LTPW[i] + w
|
191
|
-
|
192
|
-
if val == True:
|
193
|
-
|
194
|
-
if int(progress) % val_count == 1:
|
195
|
-
|
196
|
-
validation_model = evaluate(x_val, y_val, LTPW ,bar_status=False, activation_potentiation=activation_potentiation, show_metrices=None)
|
197
|
-
val_acc = validation_model[get_acc()]
|
198
|
-
|
199
|
-
val_list.append(val_acc)
|
200
|
-
|
201
|
-
if show_training == True:
|
202
|
-
|
203
|
-
|
204
|
-
mat = LTPW[0]
|
205
|
-
art2 = ax[0, 0].imshow(mat, interpolation='sinc', cmap='viridis')
|
206
|
-
suptitle_info = 'Weight Learning Progress'
|
207
|
-
|
208
|
-
ax[0, 0].set_title(suptitle_info)
|
209
|
-
|
210
|
-
artist2.append([art2])
|
211
|
-
|
212
|
-
artist1 = plot_decision_boundary(ax, x_val, y_val, activation_potentiation, LTPW, artist=artist1)
|
213
|
-
|
214
|
-
period = list(range(1, len(val_list) + 1))
|
215
|
-
|
216
|
-
art3 = ax[1, 1].plot(
|
217
|
-
period,
|
218
|
-
val_list,
|
219
|
-
linestyle='--',
|
220
|
-
color='g',
|
221
|
-
marker='o',
|
222
|
-
markersize=6,
|
223
|
-
linewidth=2,
|
224
|
-
label='Validation Accuracy'
|
225
|
-
)
|
226
|
-
|
227
|
-
ax[1, 1].set_title('Validation History')
|
228
|
-
ax[1, 1].set_xlabel('Time')
|
229
|
-
ax[1, 1].set_ylabel('Validation Accuracy')
|
230
|
-
ax[1, 1].set_ylim([0, 1])
|
231
|
-
|
232
|
-
artist3.append(art3)
|
233
|
-
|
234
|
-
for i in range(LTPW[0].shape[0]):
|
235
|
-
for j in range(LTPW[0].shape[1]):
|
236
|
-
if LTPW[0][i, j] != 0:
|
237
|
-
G.add_edge(f'Motor Neuron{i}', f'Sensory Neuron{j}', ltpw=LTPW[0][i, j])
|
238
|
-
|
239
|
-
edges = G.edges(data=True)
|
240
|
-
weights = [edata['ltpw'] for _, _, edata in edges]
|
241
|
-
pos = generate_fixed_positions(G, layout_type='circular')
|
242
|
-
|
243
|
-
art4_1 = nx.draw_networkx_nodes(G, pos, ax=ax[0, 1], node_size=1000, node_color='lightblue')
|
244
|
-
art4_2 = nx.draw_networkx_edges(G, pos, ax=ax[0, 1], edge_color=weights, edge_cmap=plt.cm.Blues)
|
245
|
-
art4_3 = nx.draw_networkx_labels(G, pos, ax=ax[0, 1], font_size=10, font_weight='bold')
|
246
|
-
ax[0, 1].set_title('Neural Web')
|
247
|
-
|
248
|
-
art4_list = [art4_1] + [art4_2] + list(art4_3.values())
|
249
|
-
|
250
|
-
artist4.append(art4_list)
|
251
|
-
|
252
|
-
|
253
|
-
if v_iter == 0:
|
254
|
-
|
255
|
-
val_bar.update(val_acc)
|
256
|
-
|
257
|
-
if v_iter != 0:
|
258
|
-
|
259
|
-
val_acc = val_acc - val_list[v_iter - 1]
|
260
|
-
val_bar.update(val_acc)
|
261
|
-
|
262
|
-
v_iter += 1
|
263
|
-
|
264
|
-
if visible_layer == None:
|
265
|
-
STPW = [None]
|
266
|
-
STPW[0] = np.ones((len(class_count), x_train_size)) # STPW = SHORT TIME POTENTIATION WEIGHT
|
267
|
-
|
268
|
-
else:
|
269
|
-
STPW = weight_identification(
|
270
|
-
len(layers), len(class_count), fex_neurons, cat_neurons, x_train_size)
|
271
|
-
|
272
|
-
train_progress.update(1)
|
273
|
-
|
274
|
-
if show_training == True:
|
275
|
-
|
276
|
-
mat = LTPW[0]
|
277
|
-
|
278
|
-
for i in range(30):
|
279
|
-
|
280
|
-
art2 = ax[0, 0].imshow(mat, interpolation='sinc', cmap='viridis')
|
281
|
-
suptitle_info = 'Weight Learning Progress:'
|
282
|
-
|
283
|
-
ax[0, 0].set_title(suptitle_info)
|
284
|
-
|
285
|
-
artist2.append([art2])
|
286
|
-
|
287
|
-
art3 = ax[1, 1].plot(
|
288
|
-
period,
|
289
|
-
val_list,
|
290
|
-
linestyle='--',
|
291
|
-
color='g',
|
292
|
-
marker='o',
|
293
|
-
markersize=6,
|
294
|
-
linewidth=2,
|
295
|
-
label='Validation Accuracy'
|
296
|
-
)
|
297
|
-
|
298
|
-
ax[1, 1].set_title('Validation History')
|
299
|
-
ax[1, 1].set_xlabel('Time')
|
300
|
-
ax[1, 1].set_ylabel('Validation Accuracy')
|
301
|
-
ax[1, 1].set_ylim([0, 1])
|
302
|
-
|
303
|
-
artist3.append(art3)
|
304
|
-
|
305
|
-
for i in range(28):
|
306
|
-
|
307
|
-
art4_1 = nx.draw_networkx_nodes(G, pos, ax=ax[0, 1], node_size=1000, node_color='lightblue')
|
308
|
-
art4_2 = nx.draw_networkx_edges(G, pos, ax=ax[0, 1], edge_color=weights, edge_cmap=plt.cm.Blues)
|
309
|
-
art4_3 = nx.draw_networkx_labels(G, pos, ax=ax[0, 1], font_size=10, font_weight='bold')
|
310
|
-
ax[0, 1].set_title('Neural Web')
|
311
|
-
|
312
|
-
art4_list = [art4_1] + [art4_2] + list(art4_3.values())
|
313
|
-
|
314
|
-
artist4.append(art4_list)
|
315
|
-
|
316
|
-
|
317
|
-
artist1 = plot_decision_boundary(ax, x_val, y_val, activation_potentiation, LTPW, artist=artist1, draw_is_finished=True)
|
318
|
-
|
319
|
-
ani1 = ArtistAnimation(fig, artist1, interval=interval, blit=True)
|
320
|
-
ani2 = ArtistAnimation(fig, artist2, interval=interval, blit=True)
|
321
|
-
ani3 = ArtistAnimation(fig, artist3, interval=interval, blit=True)
|
322
|
-
ani4 = ArtistAnimation(fig, artist4, interval=interval, blit=True)
|
323
|
-
|
324
|
-
plt.show()
|
325
|
-
|
326
|
-
LTPW = normalization(LTPW)
|
327
|
-
|
328
|
-
return LTPW
|
329
|
-
|
330
|
-
# FUNCTIONS -----
|
331
|
-
|
332
|
-
def generate_fixed_positions(G, layout_type='circular'):
|
333
|
-
pos = {}
|
334
|
-
num_nodes = len(G.nodes())
|
335
|
-
|
336
|
-
if layout_type == 'circular':
|
337
|
-
angles = np.linspace(0, 2 * np.pi, num_nodes, endpoint=False)
|
338
|
-
radius = 10
|
339
|
-
for i, node in enumerate(G.nodes()):
|
340
|
-
pos[node] = (radius * np.cos(angles[i]), radius * np.sin(angles[i]))
|
341
|
-
elif layout_type == 'grid':
|
342
|
-
grid_size = int(np.ceil(np.sqrt(num_nodes)))
|
343
|
-
for i, node in enumerate(G.nodes()):
|
344
|
-
pos[node] = (i % grid_size, i // grid_size)
|
345
|
-
else:
|
346
|
-
raise ValueError("Unsupported layout_type. Use 'circular' or 'grid'.")
|
347
|
-
|
348
|
-
return pos
|
349
|
-
|
350
|
-
def weight_normalization(
|
351
|
-
W,
|
352
|
-
class_count
|
353
|
-
) -> str:
|
354
|
-
"""
|
355
|
-
Row(Neuron) based normalization. For unbalanced models.
|
356
|
-
|
357
|
-
Args:
|
358
|
-
W (list(num)): Trained weight matrix list.
|
359
|
-
class_count (int): Class count of model.
|
360
|
-
|
361
|
-
Returns:
|
362
|
-
list([numpy_arrays],[...]): posttrained weight matices of the model. .
|
363
|
-
"""
|
364
|
-
|
365
|
-
for i in range(class_count):
|
366
|
-
|
367
|
-
W[0][i,:] = normalization(W[0][i,:])
|
368
|
-
|
369
|
-
return W
|
370
|
-
|
371
|
-
def weight_identification(
|
372
|
-
fex_neurons,
|
373
|
-
cat_neurons, # list[num]: List of neuron counts for each layer.
|
374
|
-
) -> str:
|
375
|
-
"""
|
376
|
-
Identifies the weights for a neural network model.
|
377
|
-
|
378
|
-
Args:
|
379
|
-
layer_count (int): Number of layers in the neural network.
|
380
|
-
class_count (int): Number of classes in the classification task.
|
381
|
-
neurons (list[num]): List of neuron counts for each layer.
|
382
|
-
x_train_size (int): Size of the input data.
|
383
|
-
|
384
|
-
Returns:
|
385
|
-
list([numpy_arrays],[...]): pretrained weight matices of the model. .
|
386
|
-
"""
|
387
|
-
|
388
|
-
W = [None] * (len(fex_neurons) + 1)
|
389
|
-
|
390
|
-
for i in range(len(fex_neurons)):
|
391
|
-
W[i] = np.ones((fex_neurons[i]))
|
392
|
-
|
393
|
-
W[i + 1] = np.ones((cat_neurons[0], cat_neurons[1]))
|
394
|
-
|
395
|
-
return W
|
396
|
-
|
397
|
-
# ACTIVATION FUNCTIONS -----
|
398
|
-
|
399
|
-
def spiral_activation(x):
|
400
|
-
|
401
|
-
r = np.sqrt(np.sum(x**2))
|
402
|
-
|
403
|
-
theta = np.arctan2(x[1:], x[:-1])
|
404
|
-
|
405
|
-
spiral_x = r * np.cos(theta + r)
|
406
|
-
spiral_y = r * np.sin(theta + r)
|
407
|
-
|
408
|
-
|
409
|
-
spiral_output = np.concatenate(([spiral_x[0]], spiral_y))
|
410
|
-
|
411
|
-
return spiral_output
|
412
|
-
|
413
|
-
def Softmax(
|
414
|
-
x # num: Input data to be transformed using softmax function.
|
415
|
-
):
|
416
|
-
"""
|
417
|
-
Applies the softmax function to the input data.
|
418
|
-
|
419
|
-
Args:
|
420
|
-
(num): Input data to be transformed using softmax function.
|
421
|
-
|
422
|
-
Returns:
|
423
|
-
(num): Transformed data after applying softmax function.
|
424
|
-
"""
|
425
|
-
|
426
|
-
return softmax(x)
|
427
|
-
|
428
|
-
|
429
|
-
def Sigmoid(
|
430
|
-
x # num: Input data to be transformed using sigmoid function.
|
431
|
-
):
|
432
|
-
"""
|
433
|
-
Applies the sigmoid function to the input data.
|
434
|
-
|
435
|
-
Args:
|
436
|
-
(num): Input data to be transformed using sigmoid function.
|
437
|
-
|
438
|
-
Returns:
|
439
|
-
(num): Transformed data after applying sigmoid function.
|
440
|
-
"""
|
441
|
-
return expit(x)
|
442
|
-
|
443
|
-
|
444
|
-
def Relu(
|
445
|
-
x # num: Input data to be transformed using ReLU function.
|
446
|
-
):
|
447
|
-
"""
|
448
|
-
Applies the Rectified Linear Unit (ReLU) function to the input data.
|
449
|
-
|
450
|
-
Args:
|
451
|
-
(num): Input data to be transformed using ReLU function.
|
452
|
-
|
453
|
-
Returns:
|
454
|
-
(num): Transformed data after applying ReLU function.
|
455
|
-
"""
|
456
|
-
|
457
|
-
return np.maximum(0, x)
|
458
|
-
|
459
|
-
def tanh(x):
|
460
|
-
return np.tanh(x)
|
461
|
-
|
462
|
-
def swish(x):
|
463
|
-
return x * (1 / (1 + np.exp(-x)))
|
464
|
-
|
465
|
-
def circular_activation(x):
|
466
|
-
return (np.sin(x) + 1) / 2
|
467
|
-
|
468
|
-
def modular_circular_activation(x, period=2*np.pi):
|
469
|
-
return np.mod(x, period) / period
|
470
|
-
|
471
|
-
def tanh_circular_activation(x):
|
472
|
-
return (np.tanh(x) + 1) / 2
|
473
|
-
|
474
|
-
def leaky_relu(x, alpha=0.01):
|
475
|
-
return np.where(x > 0, x, alpha * x)
|
476
|
-
|
477
|
-
def softplus(x):
|
478
|
-
return np.log(1 + np.exp(x))
|
479
|
-
|
480
|
-
def elu(x, alpha=1.0):
|
481
|
-
return np.where(x > 0, x, alpha * (np.exp(x) - 1))
|
482
|
-
|
483
|
-
def gelu(x):
|
484
|
-
return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
|
485
|
-
|
486
|
-
def selu(x, lambda_=1.0507, alpha=1.6733):
|
487
|
-
return lambda_ * np.where(x > 0, x, alpha * (np.exp(x) - 1))
|
488
|
-
|
489
|
-
# 1. Sinusoids Activation (SinAkt)
|
490
|
-
def sinakt(x):
|
491
|
-
return np.sin(x) + np.cos(x)
|
492
|
-
|
493
|
-
# 2. Parametric Squared Activation (P-Squared)
|
494
|
-
def p_squared(x, alpha=1.0, beta=0.0):
|
495
|
-
return alpha * x**2 + beta * x
|
496
|
-
|
497
|
-
def sglu(x, alpha=1.0):
|
498
|
-
return softmax(alpha * x) * x
|
499
|
-
|
500
|
-
# 4. Double Leaky ReLU (DLReLU)
|
501
|
-
def dlrelu(x):
|
502
|
-
return np.maximum(0.01 * x, x) + np.minimum(0.01 * x, 0.1 * x)
|
503
|
-
|
504
|
-
# 5. Exponential Sigmoid (ExSig)
|
505
|
-
def exsig(x):
|
506
|
-
return 1 / (1 + np.exp(-x**2))
|
507
|
-
|
508
|
-
# 6. Adaptive Cosine Activation (ACos)
|
509
|
-
def acos(x, alpha=1.0, beta=0.0):
|
510
|
-
return np.cos(alpha * x + beta)
|
511
|
-
|
512
|
-
# 7. Gaussian-like Activation (GLA)
|
513
|
-
def gla(x, alpha=1.0, mu=0.0):
|
514
|
-
return np.exp(-alpha * (x - mu)**2)
|
515
|
-
|
516
|
-
# 8. Swish ReLU (SReLU)
|
517
|
-
def srelu(x):
|
518
|
-
return x * (1 / (1 + np.exp(-x))) + np.maximum(0, x)
|
519
|
-
|
520
|
-
# 9. Quadratic Exponential Linear Unit (QELU)
|
521
|
-
def qelu(x):
|
522
|
-
return x**2 * np.exp(x) - 1
|
523
|
-
|
524
|
-
# 10. Inverse Square Root Activation (ISRA)
|
525
|
-
def isra(x):
|
526
|
-
return x / np.sqrt(np.abs(x) + 1)
|
527
|
-
|
528
|
-
def waveakt(x, alpha=1.0, beta=2.0, gamma=3.0):
|
529
|
-
return np.sin(alpha * x) * np.cos(beta * x) * np.sin(gamma * x)
|
530
|
-
|
531
|
-
def arctan(x):
|
532
|
-
return np.arctan(x)
|
533
|
-
|
534
|
-
def bent_identity(x):
|
535
|
-
return (np.sqrt(x**2 + 1) - 1) / 2 + x
|
536
|
-
|
537
|
-
def sech(x):
|
538
|
-
return 2 / (np.exp(x) + np.exp(-x))
|
539
|
-
|
540
|
-
def softsign(x):
|
541
|
-
return x / (1 + np.abs(x))
|
542
|
-
|
543
|
-
def pwl(x, alpha=0.5, beta=1.5):
|
544
|
-
return np.where(x <= 0, alpha * x, beta * x)
|
545
|
-
|
546
|
-
def cubic(x):
|
547
|
-
return x**3
|
548
|
-
|
549
|
-
def gaussian(x, alpha=1.0, mu=0.0):
|
550
|
-
return np.exp(-alpha * (x - mu)**2)
|
551
|
-
|
552
|
-
def sine(x, alpha=1.0):
|
553
|
-
return np.sin(alpha * x)
|
554
|
-
|
555
|
-
def tanh_square(x):
|
556
|
-
return np.tanh(x)**2
|
557
|
-
|
558
|
-
def mod_sigmoid(x, alpha=1.0, beta=0.0):
|
559
|
-
return 1 / (1 + np.exp(-alpha * x + beta))
|
560
|
-
|
561
|
-
def quartic(x):
|
562
|
-
return x**4
|
563
|
-
|
564
|
-
def square_quartic(x):
|
565
|
-
return (x**2)**2
|
566
|
-
|
567
|
-
def cubic_quadratic(x):
|
568
|
-
return x**3 * (x**2)
|
569
|
-
|
570
|
-
def exp_cubic(x):
|
571
|
-
return np.exp(x**3)
|
572
|
-
|
573
|
-
def sine_square(x):
|
574
|
-
return np.sin(x)**2
|
575
|
-
|
576
|
-
def logarithmic(x):
|
577
|
-
return np.log(x**2 + 1)
|
578
|
-
|
579
|
-
def scaled_cubic(x, alpha=1.0):
|
580
|
-
return alpha * x**3
|
581
|
-
|
582
|
-
def sine_offset(x, beta=0.0):
|
583
|
-
return np.sin(x + beta)
|
584
|
-
|
585
|
-
def activations_list():
|
586
|
-
"""
|
587
|
-
spiral,
|
588
|
-
sigmoid,
|
589
|
-
relu,
|
590
|
-
tanh,: good for general datasets
|
591
|
-
swish,
|
592
|
-
circular,
|
593
|
-
mod_circular,
|
594
|
-
tanh_circular,
|
595
|
-
leaky_relu,
|
596
|
-
softplus,
|
597
|
-
elu,
|
598
|
-
gelu,
|
599
|
-
selu,
|
600
|
-
sinakt,
|
601
|
-
p_squared,
|
602
|
-
sglu,
|
603
|
-
dlrelu,
|
604
|
-
exsig,
|
605
|
-
acos,
|
606
|
-
gla,
|
607
|
-
srelu,
|
608
|
-
qelu,
|
609
|
-
isra,
|
610
|
-
waveakt,
|
611
|
-
arctan,
|
612
|
-
bent_identity,: good for image datasets
|
613
|
-
sech,
|
614
|
-
softsign,
|
615
|
-
pwl,
|
616
|
-
cubic,
|
617
|
-
gaussian,
|
618
|
-
sine,
|
619
|
-
tanh_square,
|
620
|
-
mod_sigmoid,
|
621
|
-
quartic,
|
622
|
-
square_quartic,
|
623
|
-
cubic_quadratic,
|
624
|
-
exp_cubic,
|
625
|
-
sine_square,
|
626
|
-
logarithmic,
|
627
|
-
scaled_cubic,
|
628
|
-
sine_offset
|
629
|
-
"""
|
630
|
-
|
631
|
-
|
632
|
-
def fex(
|
633
|
-
Input, # list[num]: Input data.
|
634
|
-
w, # num: Weight matrix of the neural network.
|
635
|
-
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
636
|
-
Class, # int: Which class is, if training.
|
637
|
-
activation_potentiation, # (list): Activation potentiation list for deep PLAN. (optional)
|
638
|
-
index,
|
639
|
-
max_w,
|
640
|
-
LTD=0
|
641
|
-
) -> tuple:
|
642
|
-
"""
|
643
|
-
Applies feature extraction process to the input data using synaptic potentiation.
|
644
|
-
|
645
|
-
Args:
|
646
|
-
Input (num): Input data.
|
647
|
-
w (num): Weight matrix of the neural network.
|
648
|
-
is_training (bool): Flag indicating if the function is called during training (True or False).
|
649
|
-
Class (int): if is during training then which class(label) ? is isnt then put None.
|
650
|
-
# activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
651
|
-
|
652
|
-
Returns:
|
653
|
-
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
654
|
-
or
|
655
|
-
num: neural network output
|
656
|
-
"""
|
657
|
-
|
658
|
-
Output = np.zeros(len(Input))
|
659
|
-
|
660
|
-
for activation in activation_potentiation:
|
661
|
-
|
662
|
-
if activation == 'sigmoid':
|
663
|
-
Output += Sigmoid(Input)
|
664
|
-
|
665
|
-
elif activation == 'swish':
|
666
|
-
Output += swish(Input)
|
667
|
-
|
668
|
-
elif activation == 'circular':
|
669
|
-
Output += circular_activation(Input)
|
670
|
-
|
671
|
-
elif activation == 'mod_circular':
|
672
|
-
Output += modular_circular_activation(Input)
|
673
|
-
|
674
|
-
elif activation == 'tanh_circular':
|
675
|
-
Output += tanh_circular_activation(Input)
|
676
|
-
|
677
|
-
elif activation == 'leaky_relu':
|
678
|
-
Output += leaky_relu(Input)
|
679
|
-
|
680
|
-
elif activation == 'relu':
|
681
|
-
Output += Relu(Input)
|
682
|
-
|
683
|
-
elif activation == 'softplus':
|
684
|
-
Output += softplus(Input)
|
685
|
-
|
686
|
-
elif activation == 'elu':
|
687
|
-
Output += elu(Input)
|
688
|
-
|
689
|
-
elif activation == 'gelu':
|
690
|
-
Output += gelu(Input)
|
691
|
-
|
692
|
-
elif activation == 'selu':
|
693
|
-
Output += selu(Input)
|
694
|
-
|
695
|
-
elif activation == 'softmax':
|
696
|
-
Output += Softmax(Input)
|
697
|
-
|
698
|
-
elif activation == 'tanh':
|
699
|
-
Output += tanh(Input)
|
700
|
-
|
701
|
-
elif activation == 'sinakt':
|
702
|
-
Output += sinakt(Input)
|
703
|
-
|
704
|
-
elif activation == 'p_squared':
|
705
|
-
Output += p_squared(Input)
|
706
|
-
|
707
|
-
elif activation == 'sglu':
|
708
|
-
Output += sglu(Input, alpha=1.0)
|
709
|
-
|
710
|
-
elif activation == 'dlrelu':
|
711
|
-
Output += dlrelu(Input)
|
712
|
-
|
713
|
-
elif activation == 'exsig':
|
714
|
-
Output += exsig(Input)
|
715
|
-
|
716
|
-
elif activation == 'acos':
|
717
|
-
Output += acos(Input, alpha=1.0, beta=0.0)
|
718
|
-
|
719
|
-
elif activation == 'gla':
|
720
|
-
Output += gla(Input, alpha=1.0, mu=0.0)
|
721
|
-
|
722
|
-
elif activation == 'srelu':
|
723
|
-
Output += srelu(Input)
|
724
|
-
|
725
|
-
elif activation == 'qelu':
|
726
|
-
Output += qelu(Input)
|
727
|
-
|
728
|
-
elif activation == 'isra':
|
729
|
-
Output += isra(Input)
|
730
|
-
|
731
|
-
elif activation == 'waveakt':
|
732
|
-
Output += waveakt(Input)
|
733
|
-
|
734
|
-
elif activation == 'arctan':
|
735
|
-
Output += arctan(Input)
|
736
|
-
|
737
|
-
elif activation == 'bent_identity':
|
738
|
-
Output += bent_identity(Input)
|
739
|
-
|
740
|
-
elif activation == 'sech':
|
741
|
-
Output += sech(Input)
|
742
|
-
|
743
|
-
elif activation == 'softsign':
|
744
|
-
Output += softsign(Input)
|
745
|
-
|
746
|
-
elif activation == 'pwl':
|
747
|
-
Output += pwl(Input)
|
748
|
-
|
749
|
-
elif activation == 'cubic':
|
750
|
-
Output += cubic(Input)
|
751
|
-
|
752
|
-
elif activation == 'gaussian':
|
753
|
-
Output += gaussian(Input)
|
754
|
-
|
755
|
-
elif activation == 'sine':
|
756
|
-
Output += sine(Input)
|
757
|
-
|
758
|
-
elif activation == 'tanh_square':
|
759
|
-
Output += tanh_square(Input)
|
760
|
-
|
761
|
-
elif activation == 'mod_sigmoid':
|
762
|
-
Output += mod_sigmoid(Input)
|
763
|
-
|
764
|
-
elif activation == None or activation == 'linear':
|
765
|
-
Output += Input
|
766
|
-
|
767
|
-
elif activation == 'quartic':
|
768
|
-
Output += quartic(Input)
|
769
|
-
|
770
|
-
elif activation == 'square_quartic':
|
771
|
-
Output += square_quartic(Input)
|
772
|
-
|
773
|
-
elif activation == 'cubic_quadratic':
|
774
|
-
Output += cubic_quadratic(Input)
|
775
|
-
|
776
|
-
elif activation == 'exp_cubic':
|
777
|
-
Output += exp_cubic(Input)
|
778
|
-
|
779
|
-
elif activation == 'sine_square':
|
780
|
-
Output += sine_square(Input)
|
781
|
-
|
782
|
-
elif activation == 'logarithmic':
|
783
|
-
Output += logarithmic(Input)
|
784
|
-
|
785
|
-
elif activation == 'scaled_cubic':
|
786
|
-
Output += scaled_cubic(Input, 1.0)
|
787
|
-
|
788
|
-
elif activation == 'sine_offset':
|
789
|
-
Output += sine_offset(Input, 1.0)
|
790
|
-
|
791
|
-
elif activation == 'spiral':
|
792
|
-
Output += spiral_activation(Input)
|
793
|
-
|
794
|
-
else:
|
795
|
-
|
796
|
-
print(Fore.RED + 'ERROR120:' + '"' + activation + '"'+ 'is not available. Please enter this code for avaliable activation function list: help(plan.activations_list)' + '' + Style.RESET_ALL)
|
797
|
-
sys.exit()
|
798
|
-
|
799
|
-
|
800
|
-
Input = Output
|
801
|
-
|
802
|
-
|
803
|
-
if is_training == True:
|
804
|
-
|
805
|
-
for i in range(LTD):
|
806
|
-
|
807
|
-
depression_vector = np.random.rand(*Input.shape)
|
808
|
-
|
809
|
-
Input -= depression_vector
|
810
|
-
|
811
|
-
w[Class, :] = Input
|
812
|
-
|
813
|
-
return w
|
814
|
-
|
815
|
-
|
816
|
-
elif is_training == False:
|
817
|
-
|
818
|
-
neural_layer = np.dot(w, Input)
|
819
|
-
|
820
|
-
return neural_layer
|
821
|
-
|
822
|
-
elif is_training == False and max_w != 0:
|
823
|
-
|
824
|
-
|
825
|
-
if index == max_w:
|
826
|
-
|
827
|
-
neural_layer = np.dot(w, Input)
|
828
|
-
return neural_layer
|
829
|
-
|
830
|
-
else:
|
831
|
-
|
832
|
-
neural_layer = [None] * len(w)
|
833
|
-
|
834
|
-
for i in range(len(w)):
|
835
|
-
|
836
|
-
neural_layer[i] = Input[i] * w[i]
|
837
|
-
|
838
|
-
neural_layer = np.array(neural_layer)
|
839
|
-
|
840
|
-
return neural_layer
|
841
|
-
|
842
|
-
|
843
|
-
def normalization(
|
844
|
-
Input # num: Input data to be normalized.
|
845
|
-
):
|
846
|
-
"""
|
847
|
-
Normalizes the input data using maximum absolute scaling.
|
848
|
-
|
849
|
-
Args:
|
850
|
-
Input (num): Input data to be normalized.
|
851
|
-
|
852
|
-
Returns:
|
853
|
-
(num) Scaled input data after normalization.
|
854
|
-
"""
|
855
|
-
|
856
|
-
MaxAbs = np.max(np.abs(Input)) # Direkt maksimumu hesapla
|
857
|
-
return Input / MaxAbs # Normalizasyonu geri döndür
|
858
|
-
|
859
|
-
|
860
|
-
def evaluate(
|
861
|
-
x_test, # list[num]: Test input data.
|
862
|
-
y_test, # list[num]: Test labels.
|
863
|
-
W, # list[num]: Weight matrix list of the neural network.
|
864
|
-
activation_potentiation=[None], # (list): Activation potentiation list for deep PLAN. (optional)
|
865
|
-
bar_status=True, # bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
|
866
|
-
show_metrices=None # show_metrices (bool): (True or None) (optional) Default: None
|
867
|
-
) -> tuple:
|
868
|
-
infoTestModel = """
|
869
|
-
Tests the neural network model with the given test data.
|
870
|
-
|
871
|
-
Args:
|
872
|
-
x_test (list[num]): Test input data.
|
873
|
-
y_test (list[num]): Test labels.
|
874
|
-
W (list[num]): Weight matrix list of the neural network.
|
875
|
-
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: help(plan.activation_functions_list) default: [None]
|
876
|
-
bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
|
877
|
-
show_metrices (bool): (True or None) (optional) Default: None
|
878
|
-
|
879
|
-
Returns:
|
880
|
-
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
881
|
-
"""
|
882
|
-
evaluate.__doc__ = infoTestModel
|
883
|
-
|
884
|
-
predict_probabilitys = []
|
885
|
-
real_classes = []
|
886
|
-
predict_classes = []
|
887
|
-
|
888
|
-
layer_count = len(W)
|
889
|
-
|
890
|
-
try:
|
891
|
-
layers = ['fex'] * layer_count
|
892
|
-
|
893
|
-
Wc = [0] * len(W) # Wc = Weight copy
|
894
|
-
true = 0
|
895
|
-
y_preds = []
|
896
|
-
acc_list = []
|
897
|
-
max_w = len(W) - 1
|
898
|
-
|
899
|
-
for i, w in enumerate(W):
|
900
|
-
Wc[i] = np.copy(w)
|
901
|
-
|
902
|
-
|
903
|
-
if bar_status == True:
|
904
|
-
|
905
|
-
test_progress = tqdm(total=len(x_test),leave=False, desc='Testing',ncols=120)
|
906
|
-
acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=120)
|
907
|
-
|
908
|
-
|
909
|
-
for inpIndex, Input in enumerate(x_test):
|
910
|
-
Input = np.array(Input)
|
911
|
-
Input = Input.ravel()
|
912
|
-
neural_layer = Input
|
913
|
-
|
914
|
-
for index, Layer in enumerate(W):
|
915
|
-
|
916
|
-
|
917
|
-
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation, index=index, max_w=max_w)
|
918
|
-
|
919
|
-
|
920
|
-
for i, w in enumerate(Wc):
|
921
|
-
W[i] = np.copy(w)
|
922
|
-
|
923
|
-
neural_layer = Softmax(neural_layer)
|
924
|
-
|
925
|
-
max_value = max(neural_layer)
|
926
|
-
|
927
|
-
predict_probabilitys.append(max_value)
|
928
|
-
|
929
|
-
|
930
|
-
RealOutput = np.argmax(y_test[inpIndex])
|
931
|
-
real_classes.append(RealOutput)
|
932
|
-
PredictedOutput = np.argmax(neural_layer)
|
933
|
-
predict_classes.append(PredictedOutput)
|
934
|
-
|
935
|
-
if RealOutput == PredictedOutput:
|
936
|
-
true += 1
|
937
|
-
acc = true / len(y_test)
|
938
|
-
|
939
|
-
|
940
|
-
acc_list.append(acc)
|
941
|
-
y_preds.append(PredictedOutput)
|
942
|
-
|
943
|
-
if bar_status == True:
|
944
|
-
test_progress.update(1)
|
945
|
-
if inpIndex == 0:
|
946
|
-
acc_bar.update(acc)
|
947
|
-
|
948
|
-
else:
|
949
|
-
acc = acc - acc_list[inpIndex - 1]
|
950
|
-
acc_bar.update(acc)
|
951
|
-
|
952
|
-
if show_metrices == True:
|
953
|
-
plot_evaluate(x_test, y_test, y_preds, acc_list, W=W, activation_potentiation=activation_potentiation)
|
954
|
-
|
955
|
-
|
956
|
-
for i, w in enumerate(Wc):
|
957
|
-
W[i] = np.copy(w)
|
958
|
-
|
959
|
-
except:
|
960
|
-
|
961
|
-
print(Fore.RED + 'ERROR:' + infoTestModel + Style.RESET_ALL)
|
962
|
-
sys.exit()
|
963
|
-
|
964
|
-
return W, y_preds, acc
|
965
|
-
|
966
|
-
|
967
|
-
def multiple_evaluate(
|
968
|
-
x_test, # list[num]: Test input data.
|
969
|
-
y_test, # list[num]: Test labels.
|
970
|
-
show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
|
971
|
-
MW, # list[list[num]]: Weight matrix of the neural network.
|
972
|
-
activation_potentiation=None # (float or None): Threshold value for comparison. (optional)
|
973
|
-
) -> tuple:
|
974
|
-
infoTestModel = """
|
975
|
-
Tests the neural network model with the given test data.
|
976
|
-
|
977
|
-
Args:
|
978
|
-
x_test (list[num]): Test input data.
|
979
|
-
y_test (list[num]): Test labels.
|
980
|
-
show_metrices (bool): (True or False)
|
981
|
-
MW (list(list[num])): Multiple Weight matrix list of the neural network. (Multiple model testing)
|
982
|
-
|
983
|
-
Returns:
|
984
|
-
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
985
|
-
"""
|
986
|
-
|
987
|
-
layers = ['fex', 'cat']
|
988
|
-
|
989
|
-
try:
|
990
|
-
y_preds = [-1] * len(y_test)
|
991
|
-
acc_list = []
|
992
|
-
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
993
|
-
start_time = time.time()
|
994
|
-
true = 0
|
995
|
-
for inpIndex, Input in enumerate(x_test):
|
996
|
-
|
997
|
-
output_layer = 0
|
998
|
-
|
999
|
-
for m, Model in enumerate(MW):
|
1000
|
-
|
1001
|
-
W = Model
|
1002
|
-
|
1003
|
-
Wc = [0] * len(W) # Wc = weight copy
|
1004
|
-
|
1005
|
-
y_preds = [None] * len(y_test)
|
1006
|
-
for i, w in enumerate(W):
|
1007
|
-
Wc[i] = np.copy(w)
|
1008
|
-
|
1009
|
-
Input = np.array(Input)
|
1010
|
-
Input = Input.ravel()
|
1011
|
-
uni_start_time = time.time()
|
1012
|
-
neural_layer = Input
|
1013
|
-
|
1014
|
-
for index, Layer in enumerate(layers):
|
1015
|
-
|
1016
|
-
neural_layer = normalization(neural_layer)
|
1017
|
-
|
1018
|
-
if Layer == 'fex':
|
1019
|
-
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
1020
|
-
|
1021
|
-
output_layer += neural_layer
|
1022
|
-
|
1023
|
-
for i, w in enumerate(Wc):
|
1024
|
-
W[i] = np.copy(w)
|
1025
|
-
for i, w in enumerate(Wc):
|
1026
|
-
W[i] = np.copy(w)
|
1027
|
-
RealOutput = np.argmax(y_test[inpIndex])
|
1028
|
-
PredictedOutput = np.argmax(output_layer)
|
1029
|
-
if RealOutput == PredictedOutput:
|
1030
|
-
true += 1
|
1031
|
-
acc = true / len(y_test)
|
1032
|
-
if show_metrices == True:
|
1033
|
-
acc_list.append(acc)
|
1034
|
-
y_preds[inpIndex] = PredictedOutput
|
1035
|
-
|
1036
|
-
|
1037
|
-
uni_end_time = time.time()
|
1038
|
-
|
1039
|
-
calculating_est = round(
|
1040
|
-
(uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
|
1041
|
-
|
1042
|
-
if calculating_est < 60:
|
1043
|
-
print('\rest......(sec):', calculating_est, '\n', end="")
|
1044
|
-
print('\rTest accuracy: ', acc, "\n", end="")
|
1045
|
-
|
1046
|
-
elif calculating_est > 60 and calculating_est < 3600:
|
1047
|
-
print('\rest......(min):', calculating_est/60, '\n', end="")
|
1048
|
-
print('\rTest accuracy: ', acc, "\n", end="")
|
1049
|
-
|
1050
|
-
elif calculating_est > 3600:
|
1051
|
-
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
1052
|
-
print('\rTest accuracy: ', acc, "\n", end="")
|
1053
|
-
if show_metrices == True:
|
1054
|
-
plot_evaluate(y_test, y_preds, acc_list)
|
1055
|
-
|
1056
|
-
EndTime = time.time()
|
1057
|
-
for i, w in enumerate(Wc):
|
1058
|
-
W[i] = np.copy(w)
|
1059
|
-
|
1060
|
-
calculating_est = round(EndTime - start_time, 2)
|
1061
|
-
|
1062
|
-
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
1063
|
-
|
1064
|
-
if calculating_est < 60:
|
1065
|
-
print('Total testing time(sec): ', calculating_est)
|
1066
|
-
|
1067
|
-
elif calculating_est > 60 and calculating_est < 3600:
|
1068
|
-
print('Total testing time(min): ', calculating_est/60)
|
1069
|
-
|
1070
|
-
elif calculating_est > 3600:
|
1071
|
-
print('Total testing time(h): ', calculating_est/3600)
|
1072
|
-
|
1073
|
-
if acc >= 0.8:
|
1074
|
-
print(Fore.GREEN + '\nTotal Test accuracy: ',
|
1075
|
-
acc, '\n' + Style.RESET_ALL)
|
1076
|
-
|
1077
|
-
elif acc < 0.8 and acc > 0.6:
|
1078
|
-
print(Fore.MAGENTA + '\nTotal Test accuracy: ',
|
1079
|
-
acc, '\n' + Style.RESET_ALL)
|
1080
|
-
|
1081
|
-
elif acc <= 0.6:
|
1082
|
-
print(Fore.RED + '\nTotal Test accuracy: ',
|
1083
|
-
acc, '\n' + Style.RESET_ALL)
|
1084
|
-
|
1085
|
-
except:
|
1086
|
-
|
1087
|
-
print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
|
1088
|
-
|
1089
|
-
sys.exit()
|
1090
|
-
|
1091
|
-
return W, y_preds, acc
|
1092
|
-
|
1093
|
-
|
1094
|
-
def save_model(model_name,
|
1095
|
-
model_type,
|
1096
|
-
test_acc,
|
1097
|
-
weights_type,
|
1098
|
-
weights_format,
|
1099
|
-
model_path,
|
1100
|
-
W,
|
1101
|
-
scaler_params=None,
|
1102
|
-
activation_potentiation=[None]
|
1103
|
-
):
|
1104
|
-
|
1105
|
-
infosave_model = """
|
1106
|
-
Function to save a potentiation learning model.
|
1107
|
-
|
1108
|
-
Arguments:
|
1109
|
-
model_name (str): Name of the model.
|
1110
|
-
model_type (str): Type of the model.(options: PLAN)
|
1111
|
-
test_acc (float): Test accuracy of the model.
|
1112
|
-
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
1113
|
-
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
1114
|
-
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
1115
|
-
scaler_params (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
1116
|
-
W: Weights of the model.
|
1117
|
-
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: help(plan.activation_functions_list) default: [None]
|
1118
|
-
|
1119
|
-
Returns:
|
1120
|
-
str: Message indicating if the model was saved successfully or encountered an error.
|
1121
|
-
"""
|
1122
|
-
|
1123
|
-
save_model.__doc__ = infosave_model
|
1124
|
-
|
1125
|
-
class_count = W[0].shape[0]
|
1126
|
-
|
1127
|
-
layers = ['fex']
|
1128
|
-
|
1129
|
-
if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
|
1130
|
-
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
|
1131
|
-
infosave_model + Style.RESET_ALL)
|
1132
|
-
sys.exit()
|
1133
|
-
|
1134
|
-
if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
|
1135
|
-
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" +
|
1136
|
-
infosave_model + Style.RESET_ALL)
|
1137
|
-
sys.exit()
|
1138
|
-
|
1139
|
-
NeuronCount = 0
|
1140
|
-
SynapseCount = 0
|
1141
|
-
|
1142
|
-
|
1143
|
-
try:
|
1144
|
-
for w in W:
|
1145
|
-
NeuronCount += np.shape(w)[0] + np.shape(w)[1]
|
1146
|
-
SynapseCount += np.shape(w)[0] * np.shape(w)[1]
|
1147
|
-
except:
|
1148
|
-
|
1149
|
-
print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
|
1150
|
-
infosave_model + Style.RESET_ALL)
|
1151
|
-
sys.exit()
|
1152
|
-
|
1153
|
-
if scaler_params != None:
|
1154
|
-
|
1155
|
-
if len(scaler_params) > len(activation_potentiation):
|
1156
|
-
|
1157
|
-
activation_potentiation += ['']
|
1158
|
-
|
1159
|
-
elif len(activation_potentiation) > len(scaler_params):
|
1160
|
-
|
1161
|
-
for i in range(len(activation_potentiation) - len(scaler_params)):
|
1162
|
-
|
1163
|
-
scaler_params.append(' ')
|
1164
|
-
|
1165
|
-
data = {'MODEL NAME': model_name,
|
1166
|
-
'MODEL TYPE': model_type,
|
1167
|
-
'LAYER COUNT': len(layers),
|
1168
|
-
'CLASS COUNT': class_count,
|
1169
|
-
'NEURON COUNT': NeuronCount,
|
1170
|
-
'SYNAPSE COUNT': SynapseCount,
|
1171
|
-
'TEST ACCURACY': float(test_acc),
|
1172
|
-
'SAVE DATE': datetime.now(),
|
1173
|
-
'WEIGHTS TYPE': weights_type,
|
1174
|
-
'WEIGHTS FORMAT': weights_format,
|
1175
|
-
'MODEL PATH': model_path,
|
1176
|
-
'STANDARD SCALER': scaler_params,
|
1177
|
-
'ACTIVATION POTENTIATION': activation_potentiation
|
1178
|
-
}
|
1179
|
-
try:
|
1180
|
-
|
1181
|
-
df = pd.DataFrame(data)
|
1182
|
-
|
1183
|
-
df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
|
1184
|
-
|
1185
|
-
except:
|
1186
|
-
|
1187
|
-
print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
|
1188
|
-
infosave_model + Style.RESET_ALL)
|
1189
|
-
sys.exit()
|
1190
|
-
|
1191
|
-
try:
|
1192
|
-
|
1193
|
-
if weights_type == 'txt' and weights_format == 'd':
|
1194
|
-
|
1195
|
-
for i, w in enumerate(W):
|
1196
|
-
np.savetxt(model_path + model_name + '_weights.txt', w, fmt='%d')
|
1197
|
-
|
1198
|
-
if weights_type == 'txt' and weights_format == 'f':
|
1199
|
-
|
1200
|
-
for i, w in enumerate(W):
|
1201
|
-
np.savetxt(model_path + model_name + '_weights.txt', w, fmt='%f')
|
1202
|
-
|
1203
|
-
if weights_type == 'txt' and weights_format == 'raw':
|
1204
|
-
|
1205
|
-
for i, w in enumerate(W):
|
1206
|
-
np.savetxt(model_path + model_name + '_weights.txt', w)
|
1207
|
-
|
1208
|
-
###
|
1209
|
-
|
1210
|
-
if weights_type == 'npy' and weights_format == 'd':
|
1211
|
-
|
1212
|
-
for i, w in enumerate(W):
|
1213
|
-
np.save(model_path + model_name + '_weights.npy', w.astype(int))
|
1214
|
-
|
1215
|
-
if weights_type == 'npy' and weights_format == 'f':
|
1216
|
-
|
1217
|
-
for i, w in enumerate(W):
|
1218
|
-
np.save(model_path + model_name + '_weights.npy', w, w.astype(float))
|
1219
|
-
|
1220
|
-
if weights_type == 'npy' and weights_format == 'raw':
|
1221
|
-
|
1222
|
-
for i, w in enumerate(W):
|
1223
|
-
np.save(model_path + model_name + '_weights.npy', w)
|
1224
|
-
|
1225
|
-
###
|
1226
|
-
|
1227
|
-
if weights_type == 'mat' and weights_format == 'd':
|
1228
|
-
|
1229
|
-
for i, w in enumerate(W):
|
1230
|
-
w = {'w': w.astype(int)}
|
1231
|
-
io.savemat(model_path + model_name + '_weights.mat', w)
|
1232
|
-
|
1233
|
-
if weights_type == 'mat' and weights_format == 'f':
|
1234
|
-
|
1235
|
-
for i, w in enumerate(W):
|
1236
|
-
w = {'w': w.astype(float)}
|
1237
|
-
io.savemat(model_path + model_name + '_weights.mat', w)
|
1238
|
-
|
1239
|
-
if weights_type == 'mat' and weights_format == 'raw':
|
1240
|
-
|
1241
|
-
for i, w in enumerate(W):
|
1242
|
-
w = {'w': w}
|
1243
|
-
io.savemat(model_path + model_name + '_weights.mat', w)
|
1244
|
-
|
1245
|
-
except:
|
1246
|
-
|
1247
|
-
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
|
1248
|
-
sys.exit()
|
1249
|
-
print(df)
|
1250
|
-
message = (
|
1251
|
-
Fore.GREEN + "Model Saved Successfully\n" +
|
1252
|
-
Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
|
1253
|
-
Style.RESET_ALL
|
1254
|
-
)
|
1255
|
-
|
1256
|
-
return print(message)
|
1257
|
-
|
1258
|
-
|
1259
|
-
def load_model(model_name,
|
1260
|
-
model_path,
|
1261
|
-
):
|
1262
|
-
infoload_model = """
|
1263
|
-
Function to load a potentiation learning model.
|
1264
|
-
|
1265
|
-
Arguments:
|
1266
|
-
model_name (str): Name of the model.
|
1267
|
-
model_path (str): Path where the model is saved.
|
1268
|
-
|
1269
|
-
Returns:
|
1270
|
-
lists: W(list[num]), activation_potentiation, DataFrame of the model
|
1271
|
-
"""
|
1272
|
-
|
1273
|
-
load_model.__doc__ = infoload_model
|
1274
|
-
|
1275
|
-
try:
|
1276
|
-
|
1277
|
-
df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
|
1278
|
-
|
1279
|
-
except:
|
1280
|
-
|
1281
|
-
print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" +
|
1282
|
-
infoload_model + Style.RESET_ALL)
|
1283
|
-
|
1284
|
-
model_name = str(df['MODEL NAME'].iloc[0])
|
1285
|
-
layer_count = int(df['LAYER COUNT'].iloc[0])
|
1286
|
-
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
1287
|
-
|
1288
|
-
W = [0] * layer_count
|
1289
|
-
|
1290
|
-
if WeightType == 'txt':
|
1291
|
-
for i in range(layer_count):
|
1292
|
-
W[i] = np.loadtxt(model_path + model_name + '_weights.txt')
|
1293
|
-
elif WeightType == 'npy':
|
1294
|
-
for i in range(layer_count):
|
1295
|
-
W[i] = np.load(model_path + model_name + '_weights.npy')
|
1296
|
-
elif WeightType == 'mat':
|
1297
|
-
for i in range(layer_count):
|
1298
|
-
W[i] = sio.loadmat(model_path + model_name + '_weights.mat')
|
1299
|
-
else:
|
1300
|
-
raise ValueError(
|
1301
|
-
Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
|
1302
|
-
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
1303
|
-
return W, df
|
1304
|
-
|
1305
|
-
|
1306
|
-
def predict_model_ssd(Input, model_name, model_path):
|
1307
|
-
|
1308
|
-
infopredict_model_ssd = """
|
1309
|
-
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
1310
|
-
|
1311
|
-
Arguments:
|
1312
|
-
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
1313
|
-
model_name (str): Name of the model.
|
1314
|
-
Returns:
|
1315
|
-
ndarray: Output from the model.
|
1316
|
-
"""
|
1317
|
-
|
1318
|
-
predict_model_ram.__doc__ = infopredict_model_ssd
|
1319
|
-
|
1320
|
-
W, df = load_model(model_name, model_path)
|
1321
|
-
|
1322
|
-
activation_potentiation = list(df['ACTIVATION POTENTIATION'])
|
1323
|
-
|
1324
|
-
scaler_params = df['STANDARD SCALER'].tolist()
|
1325
|
-
|
1326
|
-
scaler_params = [item for item in scaler_params if item != ' ']
|
1327
|
-
|
1328
|
-
try:
|
1329
|
-
|
1330
|
-
scaler_params = [np.fromstring(arr.strip('[]'), sep=' ') for arr in scaler_params]
|
1331
|
-
|
1332
|
-
Input = standard_scaler(None, Input, scaler_params)
|
1333
|
-
|
1334
|
-
except:
|
1335
|
-
|
1336
|
-
pass
|
1337
|
-
|
1338
|
-
layers = ['fex']
|
1339
|
-
|
1340
|
-
Wc = [0] * len(W)
|
1341
|
-
for i, w in enumerate(W):
|
1342
|
-
Wc[i] = np.copy(w)
|
1343
|
-
try:
|
1344
|
-
neural_layer = Input
|
1345
|
-
neural_layer = np.array(neural_layer)
|
1346
|
-
neural_layer = neural_layer.ravel()
|
1347
|
-
max_w = len(W) - 1
|
1348
|
-
for index, Layer in enumerate(W):
|
1349
|
-
|
1350
|
-
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation, index=index, max_w=max_w)
|
1351
|
-
|
1352
|
-
except:
|
1353
|
-
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
|
1354
|
-
infopredict_model_ssd + Style.RESET_ALL)
|
1355
|
-
sys.exit()
|
1356
|
-
for i, w in enumerate(Wc):
|
1357
|
-
W[i] = np.copy(w)
|
1358
|
-
return neural_layer
|
1359
|
-
|
1360
|
-
|
1361
|
-
def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=[None]):
|
1362
|
-
|
1363
|
-
infopredict_model_ram = """
|
1364
|
-
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
1365
|
-
from weights and parameters stored in memory.
|
1366
|
-
|
1367
|
-
Arguments:
|
1368
|
-
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
1369
|
-
W (list of ndarrays): Weights of the model.
|
1370
|
-
scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
|
1371
|
-
activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
1372
|
-
|
1373
|
-
Returns:
|
1374
|
-
ndarray: Output from the model.
|
1375
|
-
"""
|
1376
|
-
|
1377
|
-
predict_model_ram.__doc__ = infopredict_model_ram
|
1378
|
-
|
1379
|
-
try:
|
1380
|
-
if scaler_params != None:
|
1381
|
-
|
1382
|
-
Input = standard_scaler(None, Input, scaler_params)
|
1383
|
-
except:
|
1384
|
-
Input = standard_scaler(None, Input, scaler_params)
|
1385
|
-
|
1386
|
-
layers = ['fex']
|
1387
|
-
|
1388
|
-
Wc = [0] * len(W)
|
1389
|
-
for i, w in enumerate(W):
|
1390
|
-
Wc[i] = np.copy(w)
|
1391
|
-
try:
|
1392
|
-
neural_layer = Input
|
1393
|
-
neural_layer = np.array(neural_layer)
|
1394
|
-
neural_layer = neural_layer.ravel()
|
1395
|
-
|
1396
|
-
max_w = len(W) - 1
|
1397
|
-
|
1398
|
-
for index, Layer in enumerate(W):
|
1399
|
-
|
1400
|
-
|
1401
|
-
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation, index=index, max_w=max_w)
|
1402
|
-
|
1403
|
-
for i, w in enumerate(Wc):
|
1404
|
-
W[i] = np.copy(w)
|
1405
|
-
return neural_layer
|
1406
|
-
|
1407
|
-
except:
|
1408
|
-
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
|
1409
|
-
infopredict_model_ram + Style.RESET_ALL)
|
1410
|
-
sys.exit()
|
1411
|
-
|
1412
|
-
def auto_balancer(x_train, y_train):
|
1413
|
-
|
1414
|
-
infoauto_balancer = """
|
1415
|
-
Function to balance the training data across different classes.
|
1416
|
-
|
1417
|
-
Arguments:
|
1418
|
-
x_train (list): Input data for training.
|
1419
|
-
y_train (list): Labels corresponding to the input data.
|
1420
|
-
|
1421
|
-
Returns:
|
1422
|
-
tuple: A tuple containing balanced input data and labels.
|
1423
|
-
"""
|
1424
|
-
|
1425
|
-
auto_balancer.__doc__ = infoauto_balancer
|
1426
|
-
|
1427
|
-
classes = np.arange(y_train.shape[1])
|
1428
|
-
class_count = len(classes)
|
1429
|
-
|
1430
|
-
try:
|
1431
|
-
ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[
|
1432
|
-
0] for i in range(class_count)}
|
1433
|
-
classes = [len(ClassIndices[i]) for i in range(class_count)]
|
1434
|
-
|
1435
|
-
if len(set(classes)) == 1:
|
1436
|
-
print(Fore.WHITE + "INFO: Data have already balanced. from: auto_balancer" + Style.RESET_ALL)
|
1437
|
-
return x_train, y_train
|
1438
|
-
|
1439
|
-
MinCount = min(classes)
|
1440
|
-
|
1441
|
-
BalancedIndices = []
|
1442
|
-
for i in tqdm(range(class_count),leave=False,desc='Balancing Data',ncols=120):
|
1443
|
-
if len(ClassIndices[i]) > MinCount:
|
1444
|
-
SelectedIndices = np.random.choice(
|
1445
|
-
ClassIndices[i], MinCount, replace=False)
|
1446
|
-
else:
|
1447
|
-
SelectedIndices = ClassIndices[i]
|
1448
|
-
BalancedIndices.extend(SelectedIndices)
|
1449
|
-
|
1450
|
-
BalancedInputs = [x_train[idx] for idx in BalancedIndices]
|
1451
|
-
BalancedLabels = [y_train[idx] for idx in BalancedIndices]
|
1452
|
-
|
1453
|
-
print(Fore.GREEN + "Data Succesfully Balanced from: " + str(len(x_train)
|
1454
|
-
) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
|
1455
|
-
except:
|
1456
|
-
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
|
1457
|
-
sys.exit()
|
1458
|
-
|
1459
|
-
return np.array(BalancedInputs), np.array(BalancedLabels)
|
1460
|
-
|
1461
|
-
|
1462
|
-
def synthetic_augmentation(x_train, y_train):
|
1463
|
-
"""
|
1464
|
-
Generates synthetic examples to balance classes with fewer examples.
|
1465
|
-
|
1466
|
-
Arguments:
|
1467
|
-
x -- Input dataset (examples) - list format
|
1468
|
-
y -- Class labels (one-hot encoded) - list format
|
1469
|
-
|
1470
|
-
Returns:
|
1471
|
-
x_balanced -- Balanced input dataset (list format)
|
1472
|
-
y_balanced -- Balanced class labels (one-hot encoded, list format)
|
1473
|
-
"""
|
1474
|
-
x = x_train
|
1475
|
-
y = y_train
|
1476
|
-
classes = np.arange(y_train.shape[1])
|
1477
|
-
class_count = len(classes)
|
1478
|
-
|
1479
|
-
class_distribution = {i: 0 for i in range(class_count)}
|
1480
|
-
for label in y:
|
1481
|
-
class_distribution[np.argmax(label)] += 1
|
1482
|
-
|
1483
|
-
max_class_count = max(class_distribution.values())
|
1484
|
-
|
1485
|
-
x_balanced = list(x)
|
1486
|
-
y_balanced = list(y)
|
1487
|
-
|
1488
|
-
for class_label in tqdm(range(class_count), leave=False, desc='Augmenting Data',ncols= 120):
|
1489
|
-
class_indices = [i for i, label in enumerate(
|
1490
|
-
y) if np.argmax(label) == class_label]
|
1491
|
-
num_samples = len(class_indices)
|
1492
|
-
|
1493
|
-
if num_samples < max_class_count:
|
1494
|
-
while num_samples < max_class_count:
|
1495
|
-
|
1496
|
-
random_indices = np.random.choice(
|
1497
|
-
class_indices, 2, replace=False)
|
1498
|
-
sample1 = x[random_indices[0]]
|
1499
|
-
sample2 = x[random_indices[1]]
|
1500
|
-
|
1501
|
-
synthetic_sample = sample1 + \
|
1502
|
-
(np.array(sample2) - np.array(sample1)) * np.random.rand()
|
1503
|
-
|
1504
|
-
x_balanced.append(synthetic_sample.tolist())
|
1505
|
-
y_balanced.append(y[class_indices[0]])
|
1506
|
-
|
1507
|
-
num_samples += 1
|
1508
|
-
|
1509
|
-
return np.array(x_balanced), np.array(y_balanced)
|
1510
|
-
|
1511
|
-
|
1512
|
-
def standard_scaler(x_train=None, x_test=None, scaler_params=None):
|
1513
|
-
info_standard_scaler = """
|
1514
|
-
Standardizes training and test datasets. x_test may be None.
|
1515
|
-
|
1516
|
-
Args:
|
1517
|
-
train_data: numpy.ndarray
|
1518
|
-
Training data
|
1519
|
-
test_data: numpy.ndarray
|
1520
|
-
Test data (optional)
|
1521
|
-
|
1522
|
-
Returns:
|
1523
|
-
list:
|
1524
|
-
Scaler parameters: mean and std
|
1525
|
-
tuple
|
1526
|
-
Standardized training and test datasets
|
1527
|
-
"""
|
1528
|
-
|
1529
|
-
standard_scaler.__doc__ = info_standard_scaler
|
1530
|
-
|
1531
|
-
try:
|
1532
|
-
|
1533
|
-
x_train = x_train.tolist()
|
1534
|
-
x_test = x_test.tolist()
|
1535
|
-
|
1536
|
-
except:
|
1537
|
-
|
1538
|
-
pass
|
1539
|
-
|
1540
|
-
try:
|
1541
|
-
|
1542
|
-
if scaler_params == None and x_test != None:
|
1543
|
-
|
1544
|
-
mean = np.mean(x_train, axis=0)
|
1545
|
-
std = np.std(x_train, axis=0)
|
1546
|
-
|
1547
|
-
train_data_scaled = (x_train - mean) / std
|
1548
|
-
test_data_scaled = (x_test - mean) / std
|
1549
|
-
|
1550
|
-
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1551
|
-
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1552
|
-
|
1553
|
-
scaler_params = [mean, std]
|
1554
|
-
|
1555
|
-
return scaler_params, train_data_scaled, test_data_scaled
|
1556
|
-
|
1557
|
-
if scaler_params == None and x_test == None:
|
1558
|
-
|
1559
|
-
mean = np.mean(x_train, axis=0)
|
1560
|
-
std = np.std(x_train, axis=0)
|
1561
|
-
train_data_scaled = (x_train - mean) / std
|
1562
|
-
|
1563
|
-
train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
|
1564
|
-
|
1565
|
-
scaler_params = [mean, std]
|
1566
|
-
|
1567
|
-
return scaler_params, train_data_scaled
|
1568
|
-
|
1569
|
-
if scaler_params != None:
|
1570
|
-
|
1571
|
-
try:
|
1572
|
-
|
1573
|
-
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
1574
|
-
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1575
|
-
|
1576
|
-
except:
|
1577
|
-
|
1578
|
-
test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
|
1579
|
-
test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
|
1580
|
-
|
1581
|
-
return test_data_scaled
|
1582
|
-
|
1583
|
-
except:
|
1584
|
-
|
1585
|
-
print(
|
1586
|
-
Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler + Style.RESET_ALL)
|
1587
|
-
|
1588
|
-
sys.exit()
|
1589
|
-
|
1590
|
-
def encode_one_hot(y_train, y_test):
|
1591
|
-
"""
|
1592
|
-
Performs one-hot encoding on y_train and y_test data..
|
1593
|
-
|
1594
|
-
Args:
|
1595
|
-
y_train (numpy.ndarray): Eğitim etiketi verisi.
|
1596
|
-
y_test (numpy.ndarray): Test etiketi verisi.
|
1597
|
-
|
1598
|
-
Returns:
|
1599
|
-
tuple: One-hot encoded y_train ve y_test verileri.
|
1600
|
-
"""
|
1601
|
-
classes = np.unique(y_train)
|
1602
|
-
class_count = len(classes)
|
1603
|
-
|
1604
|
-
class_to_index = {cls: idx for idx, cls in enumerate(classes)}
|
1605
|
-
|
1606
|
-
y_train_encoded = np.zeros((y_train.shape[0], class_count))
|
1607
|
-
for i, label in enumerate(y_train):
|
1608
|
-
y_train_encoded[i, class_to_index[label]] = 1
|
1609
|
-
|
1610
|
-
y_test_encoded = np.zeros((y_test.shape[0], class_count))
|
1611
|
-
for i, label in enumerate(y_test):
|
1612
|
-
y_test_encoded[i, class_to_index[label]] = 1
|
1613
|
-
|
1614
|
-
return y_train_encoded, y_test_encoded
|
1615
|
-
|
1616
|
-
|
1617
|
-
def split(X, y, test_size, random_state):
|
1618
|
-
"""
|
1619
|
-
Splits the given X (features) and y (labels) data into training and testing subsets.
|
1620
|
-
|
1621
|
-
Args:
|
1622
|
-
X (numpy.ndarray): Features data.
|
1623
|
-
y (numpy.ndarray): Labels data.
|
1624
|
-
test_size (float or int): Proportion or number of samples for the test subset.
|
1625
|
-
random_state (int or None): Seed for random state.
|
1626
|
-
|
1627
|
-
Returns:
|
1628
|
-
tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
|
1629
|
-
"""
|
1630
|
-
num_samples = X.shape[0]
|
1631
|
-
|
1632
|
-
if isinstance(test_size, float):
|
1633
|
-
test_size = int(test_size * num_samples)
|
1634
|
-
elif isinstance(test_size, int):
|
1635
|
-
if test_size > num_samples:
|
1636
|
-
raise ValueError(
|
1637
|
-
"test_size cannot be larger than the number of samples.")
|
1638
|
-
else:
|
1639
|
-
raise ValueError("test_size should be float or int.")
|
1640
|
-
|
1641
|
-
if random_state is not None:
|
1642
|
-
np.random.seed(random_state)
|
1643
|
-
|
1644
|
-
indices = np.arange(num_samples)
|
1645
|
-
np.random.shuffle(indices)
|
1646
|
-
|
1647
|
-
test_indices = indices[:test_size]
|
1648
|
-
train_indices = indices[test_size:]
|
1649
|
-
|
1650
|
-
x_train, x_test = X[train_indices], X[test_indices]
|
1651
|
-
y_train, y_test = y[train_indices], y[test_indices]
|
1652
|
-
|
1653
|
-
return x_train, x_test, y_train, y_test
|
1654
|
-
|
1655
|
-
|
1656
|
-
def metrics(y_ts, test_preds, average='weighted'):
|
1657
|
-
"""
|
1658
|
-
Calculates precision, recall and F1 score for a classification task.
|
1659
|
-
|
1660
|
-
Args:
|
1661
|
-
y_ts (list or numpy.ndarray): True labels.
|
1662
|
-
test_preds (list or numpy.ndarray): Predicted labels.
|
1663
|
-
average (str): Type of averaging ('micro', 'macro', 'weighted').
|
1664
|
-
|
1665
|
-
Returns:
|
1666
|
-
tuple: Precision, recall, F1 score.
|
1667
|
-
"""
|
1668
|
-
y_test_d = decode_one_hot(y_ts)
|
1669
|
-
y_test_d = np.array(y_test_d)
|
1670
|
-
y_pred = np.array(test_preds)
|
1671
|
-
|
1672
|
-
if y_test_d.ndim > 1:
|
1673
|
-
y_test_d = y_test_d.reshape(-1)
|
1674
|
-
if y_pred.ndim > 1:
|
1675
|
-
y_pred = y_pred.reshape(-1)
|
1676
|
-
|
1677
|
-
tp = {}
|
1678
|
-
fp = {}
|
1679
|
-
fn = {}
|
1680
|
-
|
1681
|
-
classes = np.unique(np.concatenate((y_test_d, y_pred)))
|
1682
|
-
|
1683
|
-
for c in classes:
|
1684
|
-
tp[c] = 0
|
1685
|
-
fp[c] = 0
|
1686
|
-
fn[c] = 0
|
1687
|
-
|
1688
|
-
for c in classes:
|
1689
|
-
for true, pred in zip(y_test_d, y_pred):
|
1690
|
-
if true == c and pred == c:
|
1691
|
-
tp[c] += 1
|
1692
|
-
elif true != c and pred == c:
|
1693
|
-
fp[c] += 1
|
1694
|
-
elif true == c and pred != c:
|
1695
|
-
fn[c] += 1
|
1696
|
-
|
1697
|
-
precision = {}
|
1698
|
-
recall = {}
|
1699
|
-
f1 = {}
|
1700
|
-
|
1701
|
-
for c in classes:
|
1702
|
-
precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
|
1703
|
-
recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
|
1704
|
-
f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
|
1705
|
-
|
1706
|
-
if average == 'micro':
|
1707
|
-
precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
|
1708
|
-
recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
|
1709
|
-
f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
|
1710
|
-
|
1711
|
-
elif average == 'macro':
|
1712
|
-
precision_val = np.mean(list(precision.values()))
|
1713
|
-
recall_val = np.mean(list(recall.values()))
|
1714
|
-
f1_val = np.mean(list(f1.values()))
|
1715
|
-
|
1716
|
-
elif average == 'weighted':
|
1717
|
-
weights = np.array([np.sum(y_test_d == c) for c in classes])
|
1718
|
-
weights = weights / np.sum(weights)
|
1719
|
-
precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
|
1720
|
-
recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
|
1721
|
-
f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
|
1722
|
-
|
1723
|
-
else:
|
1724
|
-
raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
|
1725
|
-
|
1726
|
-
return precision_val, recall_val, f1_val
|
1727
|
-
|
1728
|
-
|
1729
|
-
def decode_one_hot(encoded_data):
|
1730
|
-
"""
|
1731
|
-
Decodes one-hot encoded data to original categorical labels.
|
1732
|
-
|
1733
|
-
Args:
|
1734
|
-
encoded_data (numpy.ndarray): One-hot encoded data with shape (n_samples, n_classes).
|
1735
|
-
|
1736
|
-
Returns:
|
1737
|
-
numpy.ndarray: Decoded categorical labels with shape (n_samples,).
|
1738
|
-
"""
|
1739
|
-
|
1740
|
-
decoded_labels = np.argmax(encoded_data, axis=1)
|
1741
|
-
|
1742
|
-
return decoded_labels
|
1743
|
-
|
1744
|
-
|
1745
|
-
def roc_curve(y_true, y_score):
|
1746
|
-
"""
|
1747
|
-
Compute Receiver Operating Characteristic (ROC) curve.
|
1748
|
-
|
1749
|
-
Parameters:
|
1750
|
-
y_true : array, shape = [n_samples]
|
1751
|
-
True binary labels in range {0, 1} or {-1, 1}.
|
1752
|
-
y_score : array, shape = [n_samples]
|
1753
|
-
Target scores, can either be probability estimates of the positive class,
|
1754
|
-
confidence values, or non-thresholded measure of decisions (as returned
|
1755
|
-
by decision_function on some classifiers).
|
1756
|
-
|
1757
|
-
Returns:
|
1758
|
-
fpr : array, shape = [n]
|
1759
|
-
Increasing false positive rates such that element i is the false positive rate
|
1760
|
-
of predictions with score >= thresholds[i].
|
1761
|
-
tpr : array, shape = [n]
|
1762
|
-
Increasing true positive rates such that element i is the true positive rate
|
1763
|
-
of predictions with score >= thresholds[i].
|
1764
|
-
thresholds : array, shape = [n]
|
1765
|
-
Decreasing thresholds on the decision function used to compute fpr and tpr.
|
1766
|
-
"""
|
1767
|
-
|
1768
|
-
y_true = np.asarray(y_true)
|
1769
|
-
y_score = np.asarray(y_score)
|
1770
|
-
|
1771
|
-
if len(np.unique(y_true)) != 2:
|
1772
|
-
raise ValueError("Only binary classification is supported.")
|
1773
|
-
|
1774
|
-
|
1775
|
-
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
|
1776
|
-
y_score = y_score[desc_score_indices]
|
1777
|
-
y_true = y_true[desc_score_indices]
|
1778
|
-
|
1779
|
-
|
1780
|
-
fpr = []
|
1781
|
-
tpr = []
|
1782
|
-
thresholds = []
|
1783
|
-
n_pos = np.sum(y_true)
|
1784
|
-
n_neg = len(y_true) - n_pos
|
1785
|
-
|
1786
|
-
tp = 0
|
1787
|
-
fp = 0
|
1788
|
-
prev_score = None
|
1789
|
-
|
1790
|
-
|
1791
|
-
for i, score in enumerate(y_score):
|
1792
|
-
if score != prev_score:
|
1793
|
-
fpr.append(fp / n_neg)
|
1794
|
-
tpr.append(tp / n_pos)
|
1795
|
-
thresholds.append(score)
|
1796
|
-
prev_score = score
|
1797
|
-
|
1798
|
-
if y_true[i] == 1:
|
1799
|
-
tp += 1
|
1800
|
-
else:
|
1801
|
-
fp += 1
|
1802
|
-
|
1803
|
-
fpr.append(fp / n_neg)
|
1804
|
-
tpr.append(tp / n_pos)
|
1805
|
-
thresholds.append(score)
|
1806
|
-
|
1807
|
-
return np.array(fpr), np.array(tpr), np.array(thresholds)
|
1808
|
-
|
1809
|
-
|
1810
|
-
def confusion_matrix(y_true, y_pred, class_count):
|
1811
|
-
"""
|
1812
|
-
Computes confusion matrix.
|
1813
|
-
|
1814
|
-
Args:
|
1815
|
-
y_true (numpy.ndarray): True class labels (1D array).
|
1816
|
-
y_pred (numpy.ndarray): Predicted class labels (1D array).
|
1817
|
-
num_classes (int): Number of classes.
|
1818
|
-
|
1819
|
-
Returns:
|
1820
|
-
numpy.ndarray: Confusion matrix of shape (num_classes, num_classes).
|
1821
|
-
"""
|
1822
|
-
confusion = np.zeros((class_count, class_count), dtype=int)
|
1823
|
-
|
1824
|
-
for i in range(len(y_true)):
|
1825
|
-
true_label = y_true[i]
|
1826
|
-
pred_label = y_pred[i]
|
1827
|
-
confusion[true_label, pred_label] += 1
|
1828
|
-
|
1829
|
-
return confusion
|
1830
|
-
|
1831
|
-
|
1832
|
-
def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation):
|
1833
|
-
|
1834
|
-
|
1835
|
-
acc = acc_list[len(acc_list) - 1]
|
1836
|
-
y_true = decode_one_hot(y_test)
|
1837
|
-
|
1838
|
-
y_true = np.array(y_true)
|
1839
|
-
y_preds = np.array(y_preds)
|
1840
|
-
Class = np.unique(decode_one_hot(y_test))
|
1841
|
-
|
1842
|
-
precision, recall, f1 = metrics(y_test, y_preds)
|
1843
|
-
|
1844
|
-
|
1845
|
-
cm = confusion_matrix(y_true, y_preds, len(Class))
|
1846
|
-
fig, axs = plt.subplots(2, 2, figsize=(16, 12))
|
1847
|
-
|
1848
|
-
sns.heatmap(cm, annot=True, fmt='d', ax=axs[0, 0])
|
1849
|
-
axs[0, 0].set_title("Confusion Matrix")
|
1850
|
-
axs[0, 0].set_xlabel("Predicted Class")
|
1851
|
-
axs[0, 0].set_ylabel("Actual Class")
|
1852
|
-
|
1853
|
-
if len(Class) == 2:
|
1854
|
-
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
1855
|
-
|
1856
|
-
roc_auc = np.trapz(tpr, fpr)
|
1857
|
-
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1858
|
-
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1859
|
-
axs[1, 0].set_xlim([0.0, 1.0])
|
1860
|
-
axs[1, 0].set_ylim([0.0, 1.05])
|
1861
|
-
axs[1, 0].set_xlabel('False Positive Rate')
|
1862
|
-
axs[1, 0].set_ylabel('True Positive Rate')
|
1863
|
-
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1864
|
-
axs[1, 0].legend(loc="lower right")
|
1865
|
-
axs[1, 0].legend(loc="lower right")
|
1866
|
-
else:
|
1867
|
-
|
1868
|
-
for i in range(len(Class)):
|
1869
|
-
|
1870
|
-
y_true_copy = np.copy(y_true)
|
1871
|
-
y_preds_copy = np.copy(y_preds)
|
1872
|
-
|
1873
|
-
y_true_copy[y_true_copy == i] = 0
|
1874
|
-
y_true_copy[y_true_copy != 0] = 1
|
1875
|
-
|
1876
|
-
y_preds_copy[y_preds_copy == i] = 0
|
1877
|
-
y_preds_copy[y_preds_copy != 0] = 1
|
1878
|
-
|
1879
|
-
|
1880
|
-
fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
|
1881
|
-
|
1882
|
-
roc_auc = np.trapz(tpr, fpr)
|
1883
|
-
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
1884
|
-
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
1885
|
-
axs[1, 0].set_xlim([0.0, 1.0])
|
1886
|
-
axs[1, 0].set_ylim([0.0, 1.05])
|
1887
|
-
axs[1, 0].set_xlabel('False Positive Rate')
|
1888
|
-
axs[1, 0].set_ylabel('True Positive Rate')
|
1889
|
-
axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
|
1890
|
-
axs[1, 0].legend(loc="lower right")
|
1891
|
-
axs[1, 0].legend(loc="lower right")
|
1892
|
-
|
1893
|
-
|
1894
|
-
"""
|
1895
|
-
accuracy_per_class = []
|
1896
|
-
|
1897
|
-
for cls in Class:
|
1898
|
-
correct = np.sum((y_true == cls) & (y_preds == cls))
|
1899
|
-
total = np.sum(y_true == cls)
|
1900
|
-
accuracy_cls = correct / total if total > 0 else 0.0
|
1901
|
-
accuracy_per_class.append(accuracy_cls)
|
1902
|
-
|
1903
|
-
axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
|
1904
|
-
axs[2, 0].set_xlabel('Class')
|
1905
|
-
axs[2, 0].set_ylabel('Accuracy')
|
1906
|
-
axs[2, 0].set_title('Class-wise Accuracy')
|
1907
|
-
axs[2, 0].set_xticks(Class)
|
1908
|
-
axs[2, 0].grid(True)
|
1909
|
-
"""
|
1910
|
-
|
1911
|
-
|
1912
|
-
|
1913
|
-
|
1914
|
-
|
1915
|
-
metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
|
1916
|
-
values = [precision, recall, f1, acc]
|
1917
|
-
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
|
1918
|
-
|
1919
|
-
|
1920
|
-
bars = axs[0, 1].bar(metric, values, color=colors)
|
1921
|
-
|
1922
|
-
|
1923
|
-
for bar, value in zip(bars, values):
|
1924
|
-
axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
|
1925
|
-
ha='center', va='bottom', fontsize=12, color='white', weight='bold')
|
1926
|
-
|
1927
|
-
axs[0, 1].set_ylim(0, 1)
|
1928
|
-
axs[0, 1].set_xlabel('Metrics')
|
1929
|
-
axs[0, 1].set_ylabel('Score')
|
1930
|
-
axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Weighted)')
|
1931
|
-
axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
|
1932
|
-
|
1933
|
-
feature_indices=[0, 1]
|
1934
|
-
|
1935
|
-
h = .02
|
1936
|
-
x_min, x_max = x_test[:, feature_indices[0]].min() - 1, x_test[:, feature_indices[0]].max() + 1
|
1937
|
-
y_min, y_max = x_test[:, feature_indices[1]].min() - 1, x_test[:, feature_indices[1]].max() + 1
|
1938
|
-
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
|
1939
|
-
np.arange(y_min, y_max, h))
|
1940
|
-
|
1941
|
-
grid = np.c_[xx.ravel(), yy.ravel()]
|
1942
|
-
|
1943
|
-
try:
|
1944
|
-
|
1945
|
-
grid_full = np.zeros((grid.shape[0], x_test.shape[1]))
|
1946
|
-
grid_full[:, feature_indices] = grid
|
1947
|
-
|
1948
|
-
Z = [None] * len(grid_full)
|
1949
|
-
|
1950
|
-
predict_progress = tqdm(total=len(grid_full),leave=False, desc="Predicts For Decision Boundary",ncols= 120)
|
1951
|
-
|
1952
|
-
for i in range(len(grid_full)):
|
1953
|
-
|
1954
|
-
Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activation_potentiation=activation_potentiation))
|
1955
|
-
predict_progress.update(1)
|
1956
|
-
|
1957
|
-
Z = np.array(Z)
|
1958
|
-
Z = Z.reshape(xx.shape)
|
1959
|
-
|
1960
|
-
axs[1,1].contourf(xx, yy, Z, alpha=0.8)
|
1961
|
-
axs[1,1].scatter(x_test[:, feature_indices[0]], x_test[:, feature_indices[1]], c=decode_one_hot(y_test), edgecolors='k', marker='o', s=20, alpha=0.9)
|
1962
|
-
axs[1,1].set_xlabel(f'Feature {0 + 1}')
|
1963
|
-
axs[1,1].set_ylabel(f'Feature {1 + 1}')
|
1964
|
-
axs[1,1].set_title('Decision Boundary')
|
1965
|
-
|
1966
|
-
except:
|
1967
|
-
pass
|
1968
|
-
|
1969
|
-
plt.show()
|
1970
|
-
|
1971
|
-
|
1972
|
-
def plot_decision_boundary(ax, x, y, activation_potentiation, W, artist, draw_is_finished=False):
|
1973
|
-
feature_indices = [0, 1]
|
1974
|
-
|
1975
|
-
h = .02
|
1976
|
-
x_min, x_max = x[:, feature_indices[0]].min() - 1, x[:, feature_indices[0]].max() + 1
|
1977
|
-
y_min, y_max = x[:, feature_indices[1]].min() - 1, x[:, feature_indices[1]].max() + 1
|
1978
|
-
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
|
1979
|
-
np.arange(y_min, y_max, h))
|
1980
|
-
|
1981
|
-
grid = np.c_[xx.ravel(), yy.ravel()]
|
1982
|
-
grid_full = np.zeros((grid.shape[0], x.shape[1]))
|
1983
|
-
grid_full[:, feature_indices] = grid
|
1984
|
-
|
1985
|
-
Z = [None] * len(grid_full)
|
1986
|
-
|
1987
|
-
for i in range(len(grid_full)):
|
1988
|
-
Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activation_potentiation=activation_potentiation))
|
1989
|
-
|
1990
|
-
Z = np.array(Z)
|
1991
|
-
Z = Z.reshape(xx.shape)
|
1992
|
-
|
1993
|
-
if draw_is_finished == False:
|
1994
|
-
|
1995
|
-
art1_1 = ax[1, 0].contourf(xx, yy, Z, alpha=0.8)
|
1996
|
-
art1_2 = ax[1, 0].scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
|
1997
|
-
ax[1, 0].set_xlabel(f'Feature {0 + 1}')
|
1998
|
-
ax[1, 0].set_ylabel(f'Feature {1 + 1}')
|
1999
|
-
ax[1, 0].set_title('Decision Boundary')
|
2000
|
-
artist.append([*art1_1.collections, art1_2])
|
2001
|
-
|
2002
|
-
else:
|
2003
|
-
|
2004
|
-
for i in range(30):
|
2005
|
-
|
2006
|
-
art1_1 = ax[1, 0].contourf(xx, yy, Z, alpha=0.8)
|
2007
|
-
art1_2 = ax[1, 0].scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
|
2008
|
-
ax[1, 0].set_xlabel(f'Feature {0 + 1}')
|
2009
|
-
ax[1, 0].set_ylabel(f'Feature {1 + 1}')
|
2010
|
-
ax[1, 0].set_title('Decision Boundary')
|
2011
|
-
artist.append([*art1_1.collections, art1_2])
|
2012
|
-
|
2013
|
-
return artist
|
2014
|
-
|
2015
|
-
def pca(X, n_components):
|
2016
|
-
"""
|
2017
|
-
|
2018
|
-
Parameters:
|
2019
|
-
X (numpy array): (n_samples, n_features)
|
2020
|
-
n_components (int):
|
2021
|
-
|
2022
|
-
Returns:
|
2023
|
-
X_reduced (numpy array): (n_samples, n_components)
|
2024
|
-
"""
|
2025
|
-
|
2026
|
-
X_meaned = X - np.mean(X, axis=0)
|
2027
|
-
|
2028
|
-
covariance_matrix = np.cov(X_meaned, rowvar=False)
|
2029
|
-
|
2030
|
-
eigenvalues, eigenvectors = np.linalg.eigh(covariance_matrix)
|
2031
|
-
|
2032
|
-
sorted_index = np.argsort(eigenvalues)[::-1]
|
2033
|
-
sorted_eigenvectors = eigenvectors[:, sorted_index]
|
2034
|
-
|
2035
|
-
eigenvectors_subset = sorted_eigenvectors[:, :n_components]
|
2036
|
-
|
2037
|
-
X_reduced = np.dot(X_meaned, eigenvectors_subset)
|
2038
|
-
|
2039
|
-
return X_reduced
|
2040
|
-
|
2041
|
-
def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
|
2042
|
-
|
2043
|
-
if x.shape[1] > 2:
|
2044
|
-
|
2045
|
-
X_pca = pca(x, n_components=2)
|
2046
|
-
else:
|
2047
|
-
X_pca = x
|
2048
|
-
|
2049
|
-
if y_preds == None:
|
2050
|
-
y_preds = decode_one_hot(y)
|
2051
|
-
|
2052
|
-
y = decode_one_hot(y)
|
2053
|
-
num_classes = len(np.unique(y))
|
2054
|
-
|
2055
|
-
cmap = plt.get_cmap(color)
|
2056
|
-
|
2057
|
-
|
2058
|
-
norm = plt.Normalize(vmin=0, vmax=num_classes - 1)
|
2059
|
-
|
2060
|
-
|
2061
|
-
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, edgecolor='k', s=50, cmap=cmap, norm=norm)
|
2062
|
-
|
2063
|
-
|
2064
|
-
for cls in range(num_classes):
|
2065
|
-
|
2066
|
-
class_points = []
|
2067
|
-
|
2068
|
-
|
2069
|
-
for i in range(len(y)):
|
2070
|
-
if y_preds[i] == cls:
|
2071
|
-
class_points.append(X_pca[i])
|
2072
|
-
|
2073
|
-
class_points = np.array(class_points)
|
2074
|
-
|
2075
|
-
|
2076
|
-
if len(class_points) > 2:
|
2077
|
-
hull = ConvexHull(class_points)
|
2078
|
-
hull_points = class_points[hull.vertices]
|
2079
|
-
|
2080
|
-
hull_points = np.vstack([hull_points, hull_points[0]])
|
2081
|
-
|
2082
|
-
plt.fill(hull_points[:, 0], hull_points[:, 1], color=cmap(norm(cls)), alpha=0.3, edgecolor='k', label=f'Class {cls} Hull')
|
2083
|
-
|
2084
|
-
plt.title("Decision Space (Data Distribution)")
|
2085
|
-
|
2086
|
-
plt.draw()
|
2087
|
-
|
2088
|
-
|
2089
|
-
def manuel_balancer(x_train, y_train, target_samples_per_class):
|
2090
|
-
"""
|
2091
|
-
Generates synthetic examples to balance classes to the specified number of examples per class.
|
2092
|
-
|
2093
|
-
Arguments:
|
2094
|
-
x_train -- Input dataset (examples) - NumPy array format
|
2095
|
-
y_train -- Class labels (one-hot encoded) - NumPy array format
|
2096
|
-
target_samples_per_class -- Desired number of samples per class
|
2097
|
-
|
2098
|
-
Returns:
|
2099
|
-
x_balanced -- Balanced input dataset (NumPy array format)
|
2100
|
-
y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
|
2101
|
-
"""
|
2102
|
-
try:
|
2103
|
-
x_train = np.array(x_train)
|
2104
|
-
y_train = np.array(y_train)
|
2105
|
-
except:
|
2106
|
-
pass
|
2107
|
-
|
2108
|
-
classes = np.arange(y_train.shape[1])
|
2109
|
-
class_count = len(classes)
|
2110
|
-
|
2111
|
-
x_balanced = []
|
2112
|
-
y_balanced = []
|
2113
|
-
|
2114
|
-
for class_label in tqdm(range(class_count),leave=False, desc='Augmenting Data',ncols= 120):
|
2115
|
-
class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
|
2116
|
-
num_samples = len(class_indices)
|
2117
|
-
|
2118
|
-
if num_samples > target_samples_per_class:
|
2119
|
-
|
2120
|
-
selected_indices = np.random.choice(class_indices, target_samples_per_class, replace=False)
|
2121
|
-
x_balanced.append(x_train[selected_indices])
|
2122
|
-
y_balanced.append(y_train[selected_indices])
|
2123
|
-
|
2124
|
-
else:
|
2125
|
-
|
2126
|
-
x_balanced.append(x_train[class_indices])
|
2127
|
-
y_balanced.append(y_train[class_indices])
|
2128
|
-
|
2129
|
-
if num_samples < target_samples_per_class:
|
2130
|
-
|
2131
|
-
samples_to_add = target_samples_per_class - num_samples
|
2132
|
-
additional_samples = np.zeros((samples_to_add, x_train.shape[1]))
|
2133
|
-
additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
|
2134
|
-
|
2135
|
-
for i in range(samples_to_add):
|
2136
|
-
|
2137
|
-
random_indices = np.random.choice(class_indices, 2, replace=False)
|
2138
|
-
sample1 = x_train[random_indices[0]]
|
2139
|
-
sample2 = x_train[random_indices[1]]
|
2140
|
-
|
2141
|
-
|
2142
|
-
synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
|
2143
|
-
|
2144
|
-
additional_samples[i] = synthetic_sample
|
2145
|
-
additional_labels[i] = y_train[class_indices[0]]
|
2146
|
-
|
2147
|
-
|
2148
|
-
x_balanced.append(additional_samples)
|
2149
|
-
y_balanced.append(additional_labels)
|
2150
|
-
|
2151
|
-
x_balanced = np.vstack(x_balanced)
|
2152
|
-
y_balanced = np.vstack(y_balanced)
|
2153
|
-
|
2154
|
-
return x_balanced, y_balanced
|
2155
|
-
|
2156
|
-
def get_weights():
|
2157
|
-
|
2158
|
-
return 0
|
2159
|
-
|
2160
|
-
|
2161
|
-
def get_df():
|
2162
|
-
|
2163
|
-
return 2
|
2164
|
-
|
2165
|
-
|
2166
|
-
def get_preds():
|
2167
|
-
|
2168
|
-
return 1
|
2169
|
-
|
2170
|
-
|
2171
|
-
def get_acc():
|
2172
|
-
|
2173
|
-
return 2
|