pyerualjetwork 2.1.2__py3-none-any.whl → 2.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan_di/plan_di.py CHANGED
@@ -3,9 +3,11 @@ Created on Thu Jun 12 00:00:00 2024
3
3
 
4
4
  @author: hasan can beydili
5
5
  """
6
+
7
+ import pandas as pd
6
8
  import numpy as np
7
9
  import time
8
- from colorama import Fore,Style
10
+ from colorama import Fore, Style
9
11
  from typing import List, Union
10
12
  import math
11
13
  from scipy.special import expit, softmax
@@ -13,11 +15,14 @@ import matplotlib.pyplot as plt
13
15
  import seaborn as sns
14
16
 
15
17
  # BUILD -----
18
+
19
+
16
20
  def fit(
17
21
  x_train: List[Union[int, float]],
18
- y_train: List[Union[int, float, str]], # At least two.. and one hot encoded
22
+ # At least two.. and one hot encoded
23
+ y_train: List[Union[int, float, str]],
19
24
  ) -> str:
20
-
25
+
21
26
  infoPLAN = """
22
27
  Creates and configures a PLAN model.
23
28
 
@@ -32,28 +37,28 @@ def fit(
32
37
  """
33
38
 
34
39
  if len(x_train) != len(y_train):
35
- print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit",infoPLAN)
36
- return 'e'
37
-
40
+ print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
41
+ return 'e'
42
+
38
43
  class_count = set()
39
44
  for sublist in y_train:
40
-
45
+
41
46
  class_count.add(tuple(sublist))
42
-
43
-
47
+
44
48
  class_count = list(class_count)
45
-
49
+
46
50
  y_train = [tuple(sublist) for sublist in y_train]
47
-
48
- neurons = [len(class_count),len(class_count)]
49
- layers = ['fex','cat']
50
-
51
+
52
+ neurons = [len(class_count), len(class_count)]
53
+ layers = ['fex', 'cat']
54
+
51
55
  x_train[0] = np.array(x_train[0])
52
56
  x_train[0] = x_train[0].ravel()
53
57
  x_train_size = len(x_train[0])
54
-
55
- W = weight_identification(len(layers) - 1,len(class_count),neurons,x_train_size)
56
- Divides, Piece = synaptic_dividing(len(class_count),W)
58
+
59
+ W = weight_identification(
60
+ len(layers) - 1, len(class_count), neurons, x_train_size)
61
+ Divides, Piece = synaptic_dividing(len(class_count), W)
57
62
  trained_W = [1] * len(W)
58
63
  print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
59
64
  start_time = time.time()
@@ -61,14 +66,14 @@ def fit(
61
66
  uni_start_time = time.time()
62
67
  inp = np.array(inp)
63
68
  inp = inp.ravel()
64
-
69
+
65
70
  if x_train_size != len(inp):
66
- print(Fore.RED +"ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",infoPLAN + Style.RESET_ALL)
71
+ print(Fore.RED + "ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",
72
+ infoPLAN + Style.RESET_ALL)
67
73
  return 'e'
68
-
69
-
74
+
70
75
  for Ulindex, Ul in enumerate(class_count):
71
-
76
+
72
77
  if Ul == y_train[index]:
73
78
  for Windex, w in enumerate(W):
74
79
  for i, ul in enumerate(Ul):
@@ -77,64 +82,63 @@ def fit(
77
82
 
78
83
  cs = Divides[int(k)][Windex][0]
79
84
 
80
-
81
- W[Windex] = synaptic_pruning(w, cs, 'row', int(k), len(class_count), Piece[Windex], True)
85
+ W[Windex] = synaptic_pruning(w, cs, 'row', int(
86
+ k), len(class_count), Piece[Windex], True)
82
87
 
83
88
  neural_layer = inp
84
-
89
+
85
90
  for Lindex, Layer in enumerate(layers):
86
-
87
-
91
+
88
92
  neural_layer = normalization(neural_layer)
89
-
93
+
90
94
  y = np.argmax(y_train[index])
91
95
  if Layer == 'fex':
92
96
  W[Lindex] = fex(neural_layer, W[Lindex], True, y)
93
97
  elif Layer == 'cat':
94
98
  W[Lindex] = cat(neural_layer, W[Lindex], True, y)
95
-
96
-
99
+
97
100
  for i, w in enumerate(W):
98
101
  trained_W[i] = trained_W[i] + w
99
-
100
- W = weight_identification(len(layers) - 1, len(class_count), neurons, x_train_size)
101
-
102
-
102
+
103
+ W = weight_identification(
104
+ len(layers) - 1, len(class_count), neurons, x_train_size)
105
+
103
106
  uni_end_time = time.time()
104
-
105
- calculating_est = round((uni_end_time - uni_start_time) * (len(x_train) - index),3)
106
-
107
+
108
+ calculating_est = round(
109
+ (uni_end_time - uni_start_time) * (len(x_train) - index), 3)
110
+
107
111
  if calculating_est < 60:
108
- print('\rest......(sec):',calculating_est,'\n',end= "")
109
-
112
+ print('\rest......(sec):', calculating_est, '\n', end="")
113
+
110
114
  elif calculating_est > 60 and calculating_est < 3600:
111
- print('\rest......(min):',calculating_est/60,'\n',end= "")
112
-
115
+ print('\rest......(min):', calculating_est/60, '\n', end="")
116
+
113
117
  elif calculating_est > 3600:
114
- print('\rest......(h):',calculating_est/3600,'\n',end= "")
115
-
116
- print('\rTraining: ' , index, "/", len(x_train),"\n", end="")
117
-
118
+ print('\rest......(h):', calculating_est/3600, '\n', end="")
119
+
120
+ print('\rTraining: ', index, "/", len(x_train), "\n", end="")
121
+
118
122
  EndTime = time.time()
119
123
 
120
- calculating_est = round(EndTime - start_time,2)
124
+ calculating_est = round(EndTime - start_time, 2)
121
125
 
122
126
  print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n" + Style.RESET_ALL)
123
127
 
124
128
  if calculating_est < 60:
125
- print('Total training time(sec): ',calculating_est)
126
-
129
+ print('Total training time(sec): ', calculating_est)
130
+
127
131
  elif calculating_est > 60 and calculating_est < 3600:
128
- print('Total training time(min): ',calculating_est/60)
129
-
132
+ print('Total training time(min): ', calculating_est/60)
133
+
130
134
  elif calculating_est > 3600:
131
- print('Total training time(h): ',calculating_est/3600)
132
-
135
+ print('Total training time(h): ', calculating_est/3600)
133
136
 
134
137
  return trained_W
135
-
138
+
136
139
  # FUNCTIONS -----
137
140
 
141
+
138
142
  def weight_identification(
139
143
  layer_count, # int: Number of layers in the neural network.
140
144
  class_count, # int: Number of classes in the classification task.
@@ -154,25 +158,26 @@ def weight_identification(
154
158
  list([numpy_arrays],[...]): pretrained weight matices of the model. .
155
159
  """
156
160
 
157
-
158
161
  Wlen = layer_count + 1
159
162
  W = [None] * Wlen
160
- W[0] = np.ones((neurons[0],x_train_size))
163
+ W[0] = np.ones((neurons[0], x_train_size))
161
164
  ws = layer_count - 1
162
165
  for w in range(ws):
163
- W[w + 1] = np.ones((neurons[w + 1],neurons[w]))
164
- W[layer_count] = np.ones((class_count,neurons[layer_count - 1]))
166
+ W[w + 1] = np.ones((neurons[w + 1], neurons[w]))
167
+ W[layer_count] = np.ones((class_count, neurons[layer_count - 1]))
165
168
  return W
166
169
 
170
+
167
171
  def synaptic_pruning(
168
172
  w, # num: Weight matrix of the neural network.
169
173
  cs, # int: cs = cut_start, Synaptic connections between neurons.
170
174
  key, # int: key for identifying synaptic connections.
171
175
  Class, # int: Class label for the current training instance.
172
- class_count, # int: Total number of classes in the dataset.
173
- piece, # int: Which set of neurons will information be transferred to?
174
- is_training # bool: Flag indicating if the function is called during training (True or False).
175
-
176
+ class_count, # int: Total number of classes in the dataset.
177
+ piece, # int: Which set of neurons will information be transferred to?
178
+ # bool: Flag indicating if the function is called during training (True or False).
179
+ is_training
180
+
176
181
  ) -> str:
177
182
  infoPruning = """
178
183
  Performs synaptic pruning in a neural network model.
@@ -189,48 +194,45 @@ def synaptic_pruning(
189
194
  Returns:
190
195
  numpy array: Weight matrix.
191
196
  """
192
-
193
-
194
- Class += 1 # because index start 0
195
-
196
- if Class != 1:
197
-
198
-
199
-
200
- ce = cs / Class # ce(cut_end) = cs(cut_start) / current_class
201
-
197
+
198
+ Class += 1 # because index start 0
199
+
200
+ if Class != 1:
201
+
202
+ ce = cs / Class # ce(cut_end) = cs(cut_start) / current_class
203
+
202
204
  if is_training == True:
203
-
205
+
204
206
  p = piece
205
-
207
+
206
208
  for i in range(Class - 3):
207
-
208
- piece+=p
209
-
210
- if Class!= 2:
211
- ce += piece
212
-
213
- w[int(ce)-1::-1,:] = 0
214
-
215
-
216
- w[cs:,:] = 0
209
+
210
+ piece += p
211
+
212
+ if Class != 2:
213
+ ce += piece
214
+
215
+ w[int(ce)-1::-1, :] = 0
216
+
217
+ w[cs:, :] = 0
217
218
 
218
219
  else:
219
-
220
- if key == 'row':
221
-
222
- w[cs:,:] = 0
223
-
224
- elif key == 'col':
225
-
226
- w[:,cs] = 0
227
-
228
- else:
229
- print(Fore.RED + "ERROR103: synaptic_pruning func's key parameter must be 'row' or 'col' from: synaptic_pruning" + infoPruning)
230
- return 'e'
231
-
220
+
221
+ if key == 'row':
222
+
223
+ w[cs:, :] = 0
224
+
225
+ elif key == 'col':
226
+
227
+ w[:, cs] = 0
228
+
229
+ else:
230
+ print(Fore.RED + "ERROR103: synaptic_pruning func's key parameter must be 'row' or 'col' from: synaptic_pruning" + infoPruning)
231
+ return 'e'
232
+
232
233
  return w
233
234
 
235
+
234
236
  def synaptic_dividing(
235
237
  class_count, # int: Total number of classes in the dataset.
236
238
  W # list[num]: Weight matrix of the neural network.
@@ -246,35 +248,33 @@ def synaptic_dividing(
246
248
  list: a 3D list holds informations of divided net and list of neuron groups separated by classes.
247
249
  """
248
250
 
249
-
250
251
  Piece = [1] * len(W)
251
-
252
+
252
253
  Divides = [[[0] for _ in range(len(W))] for _ in range(class_count)]
253
-
254
-
254
+
255
255
  for i in range(len(W)):
256
-
257
256
 
258
- Piece[i] = int(math.floor(W[i].shape[0] / class_count))
257
+ Piece[i] = int(math.floor(W[i].shape[0] / class_count))
259
258
 
260
- cs = 0
259
+ cs = 0
261
260
 
262
261
  for i in range(len(W)):
263
262
  for j in range(class_count):
264
263
  cs = cs + Piece[i]
265
264
  Divides[j][i][0] = cs
266
-
265
+
267
266
  j = 0
268
267
  cs = 0
269
-
268
+
270
269
  return Divides, Piece
271
-
270
+
272
271
 
273
272
  def fex(
274
273
  Input, # list[num]: Input data.
275
274
  w, # num: Weight matrix of the neural network.
276
- is_training, # bool: Flag indicating if the function is called during training (True or False).
277
- Class # int: Which class is, if training.
275
+ # bool: Flag indicating if the function is called during training (True or False).
276
+ is_training,
277
+ Class # int: Which class is, if training.
278
278
  ) -> tuple:
279
279
  """
280
280
  Applies feature extraction process to the input data using synaptic pruning.
@@ -288,23 +288,25 @@ def fex(
288
288
  Returns:
289
289
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
290
290
  """
291
-
291
+
292
292
  if is_training == True:
293
-
294
- w[Class,:] = Input
293
+
294
+ w[Class, :] = Input
295
295
 
296
296
  return w
297
-
297
+
298
298
  else:
299
-
299
+
300
300
  neural_layer = np.dot(w, Input)
301
-
301
+
302
302
  return neural_layer
303
303
 
304
+
304
305
  def cat(
305
306
  Input, # list[num]: Input data.
306
307
  w, # list[num]: Weight matrix of the neural network.
307
- is_training, # (bool): Flag indicating if the function is called during training (True or False).
308
+ # (bool): Flag indicating if the function is called during training (True or False).
309
+ is_training,
308
310
  Class
309
311
  ) -> tuple:
310
312
  """
@@ -318,19 +320,17 @@ def cat(
318
320
  Returns:
319
321
  tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
320
322
  """
321
-
322
-
323
-
323
+
324
324
  if is_training == True:
325
-
326
- w[Class,Class] += 1
327
-
325
+
326
+ w[Class, Class] += 1
327
+
328
328
  return w
329
-
329
+
330
330
  else:
331
-
331
+
332
332
  neural_layer = np.dot(w, Input)
333
-
333
+
334
334
  return neural_layer
335
335
 
336
336
 
@@ -347,13 +347,12 @@ def normalization(
347
347
  (num) Scaled input data after normalization.
348
348
  """
349
349
 
350
-
351
350
  AbsVector = np.abs(Input)
352
-
351
+
353
352
  MaxAbs = np.max(AbsVector)
354
-
353
+
355
354
  ScaledInput = Input / MaxAbs
356
-
355
+
357
356
  return ScaledInput
358
357
 
359
358
 
@@ -369,7 +368,7 @@ def Softmax(
369
368
  Returns:
370
369
  (num): Transformed data after applying softmax function.
371
370
  """
372
-
371
+
373
372
  return softmax(x)
374
373
 
375
374
 
@@ -401,196 +400,181 @@ def Relu(
401
400
  (num): Transformed data after applying ReLU function.
402
401
  """
403
402
 
404
-
405
403
  return np.maximum(0, x)
406
404
 
407
405
 
408
-
409
-
410
406
  def evaluate(
411
407
  x_test, # list[num]: Test input data.
412
408
  y_test, # list[num]: Test labels.
413
- visualize, # str: visualize Testing procces or not visualize ('y' or 'n')
409
+ show_metrices, # show_metrices (bool): (True or False)
414
410
  W # list[num]: Weight matrix list of the neural network.
415
411
  ) -> tuple:
416
- infoTestModel = """
412
+ infoTestModel = """
417
413
  Tests the neural network model with the given test data.
418
414
 
419
415
  Args:
420
416
  x_test (list[num]): Test input data.
421
417
  y_test (list[num]): Test labels.
422
- activation_potential (float): Input activation potential
423
- visualize (str): Visualize test progress ? ('y' or 'n')
418
+ show_metrices (bool): (True or False)
424
419
  W (list[num]): Weight matrix list of the neural network.
425
420
 
426
421
  Returns:
427
422
  tuple: A tuple containing the predicted labels and the accuracy of the model.
428
423
  """
429
-
430
- layers = ['fex','cat']
431
424
 
425
+ layers = ['fex', 'cat']
432
426
 
433
- try:
434
- Wc = [0] * len(W) # Wc = Weight copy
435
- true = 0
436
- TestPredictions = [None] * len(y_test)
437
- for i, w in enumerate(W):
438
- Wc[i] = np.copy(w)
439
- print('\rCopying weights.....',i+1,'/',len(W),end = "")
440
-
441
- print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
442
- start_time = time.time()
443
- for inpIndex,Input in enumerate(x_test):
444
- Input = np.array(Input)
445
- Input = Input.ravel()
446
- uni_start_time = time.time()
447
- neural_layer = Input
427
+ try:
428
+ Wc = [0] * len(W) # Wc = Weight copy
429
+ true = 0
430
+ y_preds = [-1] * len(y_test)
431
+ acc_list = []
448
432
 
449
- for index, Layer in enumerate(layers):
450
-
451
- neural_layer = normalization(neural_layer)
452
-
453
- if Layer == 'fex':
454
- neural_layer = fex(neural_layer, W[index], False, None)
455
- elif Layer == 'cat':
456
- neural_layer = cat(neural_layer, W[index], False, None)
457
-
433
+ for i, w in enumerate(W):
434
+ Wc[i] = np.copy(w)
435
+ print('\rCopying weights.....', i+1, '/', len(W), end="")
436
+
437
+ print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
438
+ start_time = time.time()
439
+ for inpIndex, Input in enumerate(x_test):
440
+ Input = np.array(Input)
441
+ Input = Input.ravel()
442
+ uni_start_time = time.time()
443
+ neural_layer = Input
444
+
445
+ for index, Layer in enumerate(layers):
446
+
447
+ neural_layer = normalization(neural_layer)
448
+
449
+ if Layer == 'fex':
450
+ neural_layer = fex(neural_layer, W[index], False, None)
451
+ elif Layer == 'cat':
452
+ neural_layer = cat(neural_layer, W[index], False, None)
453
+
454
+ for i, w in enumerate(Wc):
455
+ W[i] = np.copy(w)
456
+ RealOutput = np.argmax(y_test[inpIndex])
457
+ PredictedOutput = np.argmax(neural_layer)
458
+ if RealOutput == PredictedOutput:
459
+ true += 1
460
+ acc = true / len(y_test)
461
+ if show_metrices == True:
462
+ acc_list.append(acc)
463
+ y_preds[inpIndex] = PredictedOutput
464
+
465
+ uni_end_time = time.time()
466
+
467
+ calculating_est = round(
468
+ (uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
469
+
470
+ if calculating_est < 60:
471
+ print('\rest......(sec):', calculating_est, '\n', end="")
472
+ print('\rTest accuracy: ', acc, "\n", end="")
473
+
474
+ elif calculating_est > 60 and calculating_est < 3600:
475
+ print('\rest......(min):', calculating_est/60, '\n', end="")
476
+ print('\rTest accuracy: ', acc, "\n", end="")
477
+
478
+ elif calculating_est > 3600:
479
+ print('\rest......(h):', calculating_est/3600, '\n', end="")
480
+ print('\rTest accuracy: ', acc, "\n", end="")
481
+ if show_metrices == True:
482
+ plot_evaluate(y_test, y_preds, acc_list)
483
+
484
+ EndTime = time.time()
458
485
  for i, w in enumerate(Wc):
459
486
  W[i] = np.copy(w)
460
- RealOutput = np.argmax(y_test[inpIndex])
461
- PredictedOutput = np.argmax(neural_layer)
462
- if RealOutput == PredictedOutput:
463
- true += 1
464
- acc = true / len(y_test)
465
- TestPredictions[inpIndex] = PredictedOutput
466
-
467
- if visualize == 'y':
468
-
469
- y_testVisual = np.copy(y_test)
470
- y_testVisual = np.argmax(y_testVisual, axis=1)
471
-
472
- plt.figure(figsize=(12, 6))
473
- sns.kdeplot(y_testVisual, label='Real Outputs', fill=True)
474
- sns.kdeplot(TestPredictions, label='Predictions', fill=True)
475
- plt.legend()
476
- plt.xlabel('Class')
477
- plt.ylabel('Data size')
478
- plt.title('Predictions and Real Outputs for Testing KDE Plot')
479
- plt.show()
480
-
481
- if inpIndex + 1 != len(x_test):
482
-
483
- plt.close('all')
484
-
485
- uni_end_time = time.time()
486
-
487
- calculating_est = round((uni_end_time - uni_start_time) * (len(x_test) - inpIndex),3)
488
-
487
+
488
+ calculating_est = round(EndTime - start_time, 2)
489
+
490
+ print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
491
+
489
492
  if calculating_est < 60:
490
- print('\rest......(sec):',calculating_est,'\n',end= "")
491
- print('\rTest accuracy: ' ,acc ,"\n", end="")
492
-
493
+ print('Total testing time(sec): ', calculating_est)
494
+
493
495
  elif calculating_est > 60 and calculating_est < 3600:
494
- print('\rest......(min):',calculating_est/60,'\n',end= "")
495
- print('\rTest accuracy: ' ,acc ,"\n", end="")
496
-
497
- elif calculating_est > 3600:
498
- print('\rest......(h):',calculating_est/3600,'\n',end= "")
499
- print('\rTest accuracy: ' ,acc ,"\n", end="")
500
-
501
- EndTime = time.time()
502
- for i, w in enumerate(Wc):
503
- W[i] = np.copy(w)
504
-
505
- calculating_est = round(EndTime - start_time,2)
496
+ print('Total testing time(min): ', calculating_est/60)
506
497
 
507
- print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
498
+ elif calculating_est > 3600:
499
+ print('Total testing time(h): ', calculating_est/3600)
508
500
 
509
- if calculating_est < 60:
510
- print('Total testing time(sec): ',calculating_est)
511
-
512
- elif calculating_est > 60 and calculating_est < 3600:
513
- print('Total testing time(min): ',calculating_est/60)
514
-
515
- elif calculating_est > 3600:
516
- print('Total testing time(h): ',calculating_est/3600)
517
-
518
- if acc >= 0.8:
519
- print(Fore.GREEN + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
501
+ if acc >= 0.8:
502
+ print(Fore.GREEN + '\nTotal Test accuracy: ',
503
+ acc, '\n' + Style.RESET_ALL)
520
504
 
521
- elif acc < 0.8 and acc > 0.6:
522
- print(Fore.MAGENTA + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
505
+ elif acc < 0.8 and acc > 0.6:
506
+ print(Fore.MAGENTA + '\nTotal Test accuracy: ',
507
+ acc, '\n' + Style.RESET_ALL)
523
508
 
524
- elif acc <= 0.6:
525
- print(Fore.RED+ '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
509
+ elif acc <= 0.6:
510
+ print(Fore.RED + '\nTotal Test accuracy: ', acc, '\n' + Style.RESET_ALL)
526
511
 
527
-
528
-
529
- except:
530
-
531
- print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
512
+ except:
513
+
514
+ print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" +
515
+ infoTestModel + Style.RESET_ALL)
532
516
  return 'e'
533
-
534
517
 
535
-
536
- return W,TestPredictions,acc
518
+ return W, y_preds, acc
519
+
537
520
 
538
521
  def multiple_evaluate(
539
522
  x_test, # list[num]: Test input data.
540
523
  y_test, # list[num]: Test labels.
541
- visualize, # str: visualize Testing procces or not visualize ('y' or 'n')
524
+ show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
542
525
  MW # list[list[num]]: Weight matrix of the neural network.
543
526
  ) -> tuple:
544
- infoTestModel = """
527
+ infoTestModel = """
545
528
  Tests the neural network model with the given test data.
546
529
 
547
530
  Args:
548
531
  x_test (list[num]): Test input data.
549
532
  y_test (list[num]): Test labels.
550
- activation_potential (float): Input activation potential
551
- visualize (str): Visualize test progress ? ('y' or 'n')
533
+ show_metrices (bool): (True or False)
552
534
  MW (list(list[num])): Multiple Weight matrix list of the neural network. (Multiple model testing)
553
535
 
554
536
  Returns:
555
537
  tuple: A tuple containing the predicted labels and the accuracy of the model.
556
538
  """
557
-
558
- layers = ['fex','cat']
559
-
560
- try:
539
+
540
+ layers = ['fex', 'cat']
541
+
542
+ try:
543
+ y_preds = [-1] * len(y_test)
544
+ acc_list = []
561
545
  print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
562
546
  start_time = time.time()
563
547
  true = 0
564
- for inpIndex,Input in enumerate(x_test):
565
-
548
+ for inpIndex, Input in enumerate(x_test):
549
+
566
550
  output_layer = 0
567
-
551
+
568
552
  for m, Model in enumerate(MW):
569
-
553
+
570
554
  W = Model
571
-
572
- Wc = [0] * len(W) # Wc = weight copy
573
-
574
- TestPredictions = [None] * len(y_test)
555
+
556
+ Wc = [0] * len(W) # Wc = weight copy
557
+
558
+ y_preds = [None] * len(y_test)
575
559
  for i, w in enumerate(W):
576
560
  Wc[i] = np.copy(w)
577
-
561
+
578
562
  Input = np.array(Input)
579
563
  Input = Input.ravel()
580
564
  uni_start_time = time.time()
581
565
  neural_layer = Input
582
-
566
+
583
567
  for index, Layer in enumerate(layers):
584
-
568
+
585
569
  neural_layer = normalization(neural_layer)
586
-
570
+
587
571
  if Layer == 'fex':
588
572
  neural_layer = fex(neural_layer, W[index], False, None)
589
573
  elif Layer == 'cat':
590
574
  neural_layer = cat(neural_layer, W[index], False, None)
591
-
575
+
592
576
  output_layer += neural_layer
593
-
577
+
594
578
  for i, w in enumerate(Wc):
595
579
  W[i] = np.copy(w)
596
580
  for i, w in enumerate(Wc):
@@ -600,89 +584,78 @@ def multiple_evaluate(
600
584
  if RealOutput == PredictedOutput:
601
585
  true += 1
602
586
  acc = true / len(y_test)
603
- TestPredictions[inpIndex] = PredictedOutput
604
-
605
- if visualize == 'y':
606
-
607
- y_testVisual = np.copy(y_test)
608
- y_testVisual = np.argmax(y_testVisual, axis=1)
609
-
610
- plt.figure(figsize=(12, 6))
611
- sns.kdeplot(y_testVisual, label='Real Outputs', fill=True)
612
- sns.kdeplot(TestPredictions, label='Predictions', fill=True)
613
- plt.legend()
614
- plt.xlabel('Class')
615
- plt.ylabel('Data size')
616
- plt.title('Predictions and Real Outputs for Testing KDE Plot')
617
- plt.show()
618
-
619
- if inpIndex + 1 != len(x_test):
620
-
621
- plt.close('all')
587
+ if show_metrices == True:
588
+ acc_list.append(acc)
589
+ y_preds[inpIndex] = PredictedOutput
622
590
 
591
+
623
592
  uni_end_time = time.time()
624
-
625
- calculating_est = round((uni_end_time - uni_start_time) * (len(x_test) - inpIndex),3)
626
-
593
+
594
+ calculating_est = round(
595
+ (uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
596
+
627
597
  if calculating_est < 60:
628
- print('\rest......(sec):',calculating_est,'\n',end= "")
629
- print('\rTest accuracy: ' ,acc ,"\n", end="")
630
-
598
+ print('\rest......(sec):', calculating_est, '\n', end="")
599
+ print('\rTest accuracy: ', acc, "\n", end="")
600
+
631
601
  elif calculating_est > 60 and calculating_est < 3600:
632
- print('\rest......(min):',calculating_est/60,'\n',end= "")
633
- print('\rTest accuracy: ' ,acc ,"\n", end="")
634
-
602
+ print('\rest......(min):', calculating_est/60, '\n', end="")
603
+ print('\rTest accuracy: ', acc, "\n", end="")
604
+
635
605
  elif calculating_est > 3600:
636
- print('\rest......(h):',calculating_est/3600,'\n',end= "")
637
- print('\rTest accuracy: ' ,acc ,"\n", end="")
638
-
606
+ print('\rest......(h):', calculating_est/3600, '\n', end="")
607
+ print('\rTest accuracy: ', acc, "\n", end="")
608
+ if show_metrices == True:
609
+ plot_evaluate(y_test, y_preds, acc_list)
610
+
639
611
  EndTime = time.time()
640
612
  for i, w in enumerate(Wc):
641
613
  W[i] = np.copy(w)
642
-
643
- calculating_est = round(EndTime - start_time,2)
644
-
614
+
615
+ calculating_est = round(EndTime - start_time, 2)
616
+
645
617
  print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
646
-
618
+
647
619
  if calculating_est < 60:
648
- print('Total testing time(sec): ',calculating_est)
649
-
620
+ print('Total testing time(sec): ', calculating_est)
621
+
650
622
  elif calculating_est > 60 and calculating_est < 3600:
651
- print('Total testing time(min): ',calculating_est/60)
652
-
623
+ print('Total testing time(min): ', calculating_est/60)
624
+
653
625
  elif calculating_est > 3600:
654
- print('Total testing time(h): ',calculating_est/3600)
655
-
626
+ print('Total testing time(h): ', calculating_est/3600)
627
+
656
628
  if acc >= 0.8:
657
- print(Fore.GREEN + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
658
-
629
+ print(Fore.GREEN + '\nTotal Test accuracy: ',
630
+ acc, '\n' + Style.RESET_ALL)
631
+
659
632
  elif acc < 0.8 and acc > 0.6:
660
- print(Fore.MAGENTA + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
661
-
633
+ print(Fore.MAGENTA + '\nTotal Test accuracy: ',
634
+ acc, '\n' + Style.RESET_ALL)
635
+
662
636
  elif acc <= 0.6:
663
- print(Fore.RED+ '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
664
-
665
-
637
+ print(Fore.RED + '\nTotal Test accuracy: ',
638
+ acc, '\n' + Style.RESET_ALL)
666
639
 
667
640
  except:
668
-
669
- print(Fore.RED + "ERROR: Testing model parameters like 'activation_potential' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
670
- return 'e'
671
-
672
641
 
673
-
674
- return W,TestPredictions,acc
642
+ print(Fore.RED + "ERROR: Testing model parameters like 'activation_potential' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
643
+ return 'e'
644
+
645
+ return W, y_preds, acc
646
+
675
647
 
676
648
  def save_model(model_name,
677
- model_type,
678
- class_count,
679
- test_acc,
680
- weights_type,
681
- weights_format,
682
- model_path,
683
- W
684
- ):
685
-
649
+ model_type,
650
+ class_count,
651
+ test_acc,
652
+ weights_type,
653
+ weights_format,
654
+ model_path,
655
+ scaler,
656
+ W
657
+ ):
658
+
686
659
  infosave_model = """
687
660
  Function to save a pruning learning model.
688
661
 
@@ -695,40 +668,44 @@ def save_model(model_name,
695
668
  weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
696
669
  WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
697
670
  model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
671
+ scaler (bool): trained data it used standard_scaler ? (True or False)
698
672
  W: Weights of the model.
699
673
 
700
674
  Returns:
701
675
  str: Message indicating if the model was saved successfully or encountered an error.
702
676
  """
703
-
677
+
704
678
  # Operations to be performed by the function will be written here
705
679
  pass
706
680
 
707
- layers = ['fex','cat']
681
+ layers = ['fex', 'cat']
708
682
 
709
- if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
710
- print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" + infosave_model + Style.RESET_ALL)
683
+ if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
684
+ print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
685
+ infosave_model + Style.RESET_ALL)
711
686
  return 'e'
712
-
713
- if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
714
- print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" + infosave_model + Style.RESET_ALL)
687
+
688
+ if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
689
+ print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" +
690
+ infosave_model + Style.RESET_ALL)
715
691
  return 'e'
716
-
692
+
717
693
  NeuronCount = 0
718
694
  SynapseCount = 0
719
-
695
+
720
696
  try:
721
697
  for w in W:
722
698
  NeuronCount += np.shape(w)[0]
723
699
  SynapseCount += np.shape(w)[0] * np.shape(w)[1]
724
700
  except:
725
-
726
- print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + infosave_model + Style.RESET_ALL)
701
+
702
+ print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
703
+ infosave_model + Style.RESET_ALL)
727
704
  return 'e'
728
705
  import pandas as pd
729
706
  from datetime import datetime
730
707
  from scipy import io
731
-
708
+
732
709
  data = {'MODEL NAME': model_name,
733
710
  'MODEL TYPE': model_type,
734
711
  'LAYERS': layers,
@@ -740,96 +717,96 @@ def save_model(model_name,
740
717
  'SAVE DATE': datetime.now(),
741
718
  'WEIGHTS TYPE': weights_type,
742
719
  'WEIGHTS FORMAT': weights_format,
743
- 'MODEL PATH': model_path
720
+ 'MODEL PATH': model_path,
721
+ 'STANDARD SCALER': scaler
744
722
  }
745
723
  try:
746
-
724
+
747
725
  df = pd.DataFrame(data)
748
726
 
749
-
750
727
  df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
751
-
752
728
 
753
729
  except:
754
-
755
- print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" + infosave_model + Style.RESET_ALL)
730
+
731
+ print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
732
+ infosave_model + Style.RESET_ALL)
756
733
  return 'e'
757
734
  try:
758
-
735
+
759
736
  if weights_type == 'txt' and weights_format == 'd':
760
-
737
+
761
738
  for i, w in enumerate(W):
762
- np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w, fmt='%d')
763
-
739
+ np.savetxt(model_path + model_name +
740
+ str(i+1) + 'w.txt', w, fmt='%d')
741
+
764
742
  if weights_type == 'txt' and weights_format == 'f':
765
-
743
+
766
744
  for i, w in enumerate(W):
767
- np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w, fmt='%f')
768
-
745
+ np.savetxt(model_path + model_name +
746
+ str(i+1) + 'w.txt', w, fmt='%f')
747
+
769
748
  if weights_type == 'txt' and weights_format == 'raw':
770
-
749
+
771
750
  for i, w in enumerate(W):
772
- np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w)
773
-
774
-
751
+ np.savetxt(model_path + model_name + str(i+1) + 'w.txt', w)
752
+
775
753
  ###
776
-
777
-
754
+
778
755
  if weights_type == 'npy' and weights_format == 'd':
779
-
756
+
780
757
  for i, w in enumerate(W):
781
- np.save(model_path + model_name + str(i+1) + 'w.npy', w.astype(int))
782
-
758
+ np.save(model_path + model_name +
759
+ str(i+1) + 'w.npy', w.astype(int))
760
+
783
761
  if weights_type == 'npy' and weights_format == 'f':
784
-
762
+
785
763
  for i, w in enumerate(W):
786
- np.save(model_path + model_name + str(i+1) + 'w.npy' , w, w.astype(float))
787
-
764
+ np.save(model_path + model_name + str(i+1) +
765
+ 'w.npy', w, w.astype(float))
766
+
788
767
  if weights_type == 'npy' and weights_format == 'raw':
789
-
768
+
790
769
  for i, w in enumerate(W):
791
- np.save(model_path + model_name + str(i+1) + 'w.npy' , w)
792
-
793
-
770
+ np.save(model_path + model_name + str(i+1) + 'w.npy', w)
771
+
794
772
  ###
795
-
796
-
773
+
797
774
  if weights_type == 'mat' and weights_format == 'd':
798
-
775
+
799
776
  for i, w in enumerate(W):
800
777
  w = {'w': w.astype(int)}
801
778
  io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
802
-
779
+
803
780
  if weights_type == 'mat' and weights_format == 'f':
804
-
781
+
805
782
  for i, w in enumerate(W):
806
783
  w = {'w': w.astype(float)}
807
784
  io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
808
-
785
+
809
786
  if weights_type == 'mat' and weights_format == 'raw':
810
-
787
+
811
788
  for i, w in enumerate(W):
812
789
  w = {'w': w}
813
790
  io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
814
-
791
+
815
792
  except:
816
-
793
+
817
794
  print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
818
795
  return 'e'
819
796
  print(df)
820
797
  message = (
821
798
  Fore.GREEN + "Model Saved Successfully\n" +
822
- Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
799
+ Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
823
800
  Style.RESET_ALL
824
- )
825
-
801
+ )
802
+
826
803
  return print(message)
827
804
 
828
805
 
829
806
  def load_model(model_name,
830
- model_path,
831
- ):
832
- infoload_model = """
807
+ model_path,
808
+ ):
809
+ infoload_model = """
833
810
  Function to load a pruning learning model.
834
811
 
835
812
  Arguments:
@@ -839,50 +816,43 @@ def load_model(model_name,
839
816
  Returns:
840
817
  lists: W(list[num]), activation_potential, DataFrame of the model
841
818
  """
842
- pass
819
+ pass
843
820
 
844
-
845
- import pandas as pd
846
- import scipy.io as sio
847
-
848
- try:
821
+ import scipy.io as sio
822
+
823
+ try:
824
+
825
+ df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
826
+
827
+ except:
828
+
829
+ print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" +
830
+ infoload_model + Style.RESET_ALL)
831
+
832
+ model_name = str(df['MODEL NAME'].iloc[0])
833
+ layer_count = int(df['LAYER COUNT'].iloc[0])
834
+ WeightType = str(df['WEIGHTS TYPE'].iloc[0])
835
+
836
+ W = [0] * layer_count
837
+
838
+ if WeightType == 'txt':
839
+ for i in range(layer_count):
840
+ W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
841
+ elif WeightType == 'npy':
842
+ for i in range(layer_count):
843
+ W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
844
+ elif WeightType == 'mat':
845
+ for i in range(layer_count):
846
+ W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
847
+ else:
848
+ raise ValueError(
849
+ Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
850
+ print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
851
+ return W, df
852
+
853
+
854
+ def predict_model_ssd(Input, model_name, model_path):
849
855
 
850
- df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
851
-
852
- except:
853
-
854
- print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" + infoload_model + Style.RESET_ALL)
855
-
856
- model_name = str(df['MODEL NAME'].iloc[0])
857
- layers = df['LAYERS'].tolist()
858
- layer_count = int(df['LAYER COUNT'].iloc[0])
859
- class_count = int(df['CLASS COUNT'].iloc[0])
860
- NeuronCount = int(df['NEURON COUNT'].iloc[0])
861
- SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
862
- test_acc = int(df['TEST ACCURACY'].iloc[0])
863
- model_type = str(df['MODEL TYPE'].iloc[0])
864
- WeightType = str(df['WEIGHTS TYPE'].iloc[0])
865
- WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
866
- model_path = str(df['MODEL PATH'].iloc[0])
867
-
868
- W = [0] * layer_count
869
-
870
- if WeightType == 'txt':
871
- for i in range(layer_count):
872
- W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
873
- elif WeightType == 'npy':
874
- for i in range(layer_count):
875
- W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
876
- elif WeightType == 'mat':
877
- for i in range(layer_count):
878
- W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
879
- else:
880
- raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
881
- print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
882
- return W,df
883
-
884
- def predict_model_ssd(Input,model_name,model_path):
885
-
886
856
  infopredict_model_ssd = """
887
857
  Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
888
858
 
@@ -893,10 +863,16 @@ def predict_model_ssd(Input,model_name,model_path):
893
863
  Returns:
894
864
  ndarray: Output from the model.
895
865
  """
896
- W = load_model(model_name,model_path)[0]
897
-
898
- layers = ['fex','cat']
899
-
866
+ W, df = load_model(model_name, model_path)
867
+
868
+ scaler = str(df['STANDARD SCALER'].iloc[0])
869
+
870
+ if scaler == 'True':
871
+
872
+ Input = standard_scaler(Input, None)
873
+
874
+ layers = ['fex', 'cat']
875
+
900
876
  Wc = [0] * len(W)
901
877
  for i, w in enumerate(W):
902
878
  Wc[i] = np.copy(w)
@@ -904,39 +880,44 @@ def predict_model_ssd(Input,model_name,model_path):
904
880
  neural_layer = Input
905
881
  neural_layer = np.array(neural_layer)
906
882
  neural_layer = neural_layer.ravel()
907
- for index, Layer in enumerate(layers):
883
+ for index, Layer in enumerate(layers):
908
884
 
909
885
  neural_layer = normalization(neural_layer)
910
-
886
+
911
887
  if Layer == 'fex':
912
888
  neural_layer = fex(neural_layer, W[index], False, None)
913
889
  elif Layer == 'cat':
914
890
  neural_layer = cat(neural_layer, W[index], False, None)
915
891
  except:
916
- print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" + infopredict_model_ssd + Style.RESET_ALL)
917
- return 'e'
892
+ print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
893
+ infopredict_model_ssd + Style.RESET_ALL)
894
+ return 'e'
918
895
  for i, w in enumerate(Wc):
919
896
  W[i] = np.copy(w)
920
897
  return neural_layer
921
898
 
922
899
 
923
- def predict_model_ram(Input,W):
924
-
900
+ def predict_model_ram(Input, scaler, W):
901
+
925
902
  infopredict_model_ram = """
926
903
  Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
927
904
  from weights and parameters stored in memory.
928
905
 
929
906
  Arguments:
930
907
  Input (list or ndarray): Input data for the model (single vector or single matrix).
931
- activation_potential (float): Activation potential.
908
+ scaler (bool): trained data it used standard_scaler ? (True or False)
932
909
  W (list of ndarrays): Weights of the model.
933
910
 
934
911
  Returns:
935
912
  ndarray: Output from the model.
936
913
  """
937
-
938
- layers = ['fex','cat']
939
-
914
+
915
+ if scaler == True:
916
+
917
+ Input = standard_scaler(Input, None)
918
+
919
+ layers = ['fex', 'cat']
920
+
940
921
  Wc = [0] * len(W)
941
922
  for i, w in enumerate(W):
942
923
  Wc[i] = np.copy(w)
@@ -944,147 +925,452 @@ def predict_model_ram(Input,W):
944
925
  neural_layer = Input
945
926
  neural_layer = np.array(neural_layer)
946
927
  neural_layer = neural_layer.ravel()
947
- for index, Layer in enumerate(layers):
928
+ for index, Layer in enumerate(layers):
948
929
 
949
930
  neural_layer = normalization(neural_layer)
950
-
931
+
951
932
  if Layer == 'fex':
952
933
  neural_layer = fex(neural_layer, W[index], False, None)
953
934
  elif Layer == 'cat':
954
935
  neural_layer = cat(neural_layer, W[index], False, None)
955
-
936
+
956
937
  except:
957
- print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + infopredict_model_ram + Style.RESET_ALL)
938
+ print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
939
+ infopredict_model_ram + Style.RESET_ALL)
958
940
  return 'e'
959
941
  for i, w in enumerate(Wc):
960
942
  W[i] = np.copy(w)
961
943
  return neural_layer
962
-
963
944
 
964
- def auto_balancer(x_train, y_train, class_count):
965
-
966
- infoauto_balancer = """
945
+
946
+ def auto_balancer(x_train, y_train):
947
+
948
+ infoauto_balancer = """
967
949
  Function to balance the training data across different classes.
968
950
 
969
951
  Arguments:
970
952
  x_train (list): Input data for training.
971
953
  y_train (list): Labels corresponding to the input data.
972
- class_count (int): Number of classes.
973
954
 
974
955
  Returns:
975
956
  tuple: A tuple containing balanced input data and labels.
976
957
  """
977
- try:
978
- ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[0] for i in range(class_count)}
958
+ classes = np.arange(y_train.shape[1])
959
+ class_count = len(classes)
960
+
961
+ try:
962
+ ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[
963
+ 0] for i in range(class_count)}
979
964
  classes = [len(ClassIndices[i]) for i in range(class_count)]
980
-
965
+
981
966
  if len(set(classes)) == 1:
982
- print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
967
+ print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
983
968
  return x_train, y_train
984
-
969
+
985
970
  MinCount = min(classes)
986
-
971
+
987
972
  BalancedIndices = []
988
973
  for i in range(class_count):
989
974
  if len(ClassIndices[i]) > MinCount:
990
- SelectedIndices = np.random.choice(ClassIndices[i], MinCount, replace=False)
975
+ SelectedIndices = np.random.choice(
976
+ ClassIndices[i], MinCount, replace=False)
991
977
  else:
992
978
  SelectedIndices = ClassIndices[i]
993
979
  BalancedIndices.extend(SelectedIndices)
994
-
980
+
995
981
  BalancedInputs = [x_train[idx] for idx in BalancedIndices]
996
982
  BalancedLabels = [y_train[idx] for idx in BalancedIndices]
997
-
998
- print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
999
- except:
983
+
984
+ print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)
985
+ ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
986
+ except:
1000
987
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
1001
988
  return 'e'
1002
-
1003
- return BalancedInputs, BalancedLabels
1004
-
1005
- def synthetic_augmentation(x, y, class_count):
989
+
990
+ return BalancedInputs, BalancedLabels
991
+
992
+
993
+ def synthetic_augmentation(x_train, y_train):
1006
994
  """
1007
995
  Generates synthetic examples to balance classes with fewer examples.
1008
-
996
+
1009
997
  Arguments:
1010
998
  x -- Input dataset (examples) - list format
1011
999
  y -- Class labels (one-hot encoded) - list format
1012
- class_count -- Number of classes
1013
-
1000
+
1014
1001
  Returns:
1015
1002
  x_balanced -- Balanced input dataset (list format)
1016
1003
  y_balanced -- Balanced class labels (one-hot encoded, list format)
1017
1004
  """
1005
+ x = x_train
1006
+ y = y_train
1007
+ classes = np.arange(y_train.shape[1])
1008
+ class_count = len(classes)
1009
+
1018
1010
  # Calculate class distribution
1019
1011
  class_distribution = {i: 0 for i in range(class_count)}
1020
1012
  for label in y:
1021
1013
  class_distribution[np.argmax(label)] += 1
1022
-
1014
+
1023
1015
  max_class_count = max(class_distribution.values())
1024
-
1016
+
1025
1017
  x_balanced = list(x)
1026
1018
  y_balanced = list(y)
1027
-
1019
+
1028
1020
  for class_label in range(class_count):
1029
- class_indices = [i for i, label in enumerate(y) if np.argmax(label) == class_label]
1021
+ class_indices = [i for i, label in enumerate(
1022
+ y) if np.argmax(label) == class_label]
1030
1023
  num_samples = len(class_indices)
1031
-
1024
+
1032
1025
  if num_samples < max_class_count:
1033
1026
  while num_samples < max_class_count:
1034
1027
 
1035
- random_indices = np.random.choice(class_indices, 2, replace=False)
1028
+ random_indices = np.random.choice(
1029
+ class_indices, 2, replace=False)
1036
1030
  sample1 = x[random_indices[0]]
1037
1031
  sample2 = x[random_indices[1]]
1038
-
1039
- synthetic_sample = sample1 + (np.array(sample2) - np.array(sample1)) * np.random.rand()
1040
-
1032
+
1033
+ synthetic_sample = sample1 + \
1034
+ (np.array(sample2) - np.array(sample1)) * np.random.rand()
1035
+
1041
1036
  x_balanced.append(synthetic_sample.tolist())
1042
1037
  y_balanced.append(y[class_indices[0]])
1043
-
1038
+
1044
1039
  num_samples += 1
1045
-
1040
+
1046
1041
  return np.array(x_balanced), np.array(y_balanced)
1047
1042
 
1043
+
1048
1044
  def standard_scaler(x_train, x_test):
1049
- info_standard_scaler = """
1050
- Standardizes training and test datasets.
1045
+ info_standard_scaler = """
1046
+ Standardizes training and test datasets. x_test may be None.
1051
1047
 
1052
1048
  Args:
1053
1049
  train_data: numpy.ndarray
1054
- Training data (n_samples, n_features)
1050
+ Training data
1055
1051
  test_data: numpy.ndarray
1056
- Test data (n_samples, n_features)
1052
+ Test data
1057
1053
 
1058
1054
  Returns:
1059
1055
  tuple
1060
1056
  Standardized training and test datasets
1061
1057
  """
1062
- try:
1063
- mean = np.mean(x_train, axis=0)
1064
- std = np.std(x_train, axis=0)
1065
-
1066
-
1067
- train_data_scaled = (x_train - mean) / std
1068
- test_data_scaled = (x_test - mean) / std
1058
+ try:
1069
1059
 
1070
- except:
1071
- print(Fore.RED + "ERROR: x_train and x_test must be numpy array from standard_scaler" + info_standard_scaler)
1072
-
1073
- return train_data_scaled, test_data_scaled
1060
+ mean = np.mean(x_train, axis=0)
1061
+ std = np.std(x_train, axis=0)
1062
+
1063
+ if x_test == None:
1064
+
1065
+ train_data_scaled = (x_train - mean) / std
1066
+ return train_data_scaled
1067
+
1068
+ else:
1069
+ train_data_scaled = (x_train - mean) / std
1070
+ test_data_scaled = (x_test - mean) / std
1071
+ return train_data_scaled, test_data_scaled
1072
+
1073
+ except:
1074
+ print(
1075
+ Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
1076
+
1077
+
1078
+ def encode_one_hot(y_train, y_test):
1079
+ info_one_hot_encode = """
1080
+ Performs one-hot encoding on y_train and y_test data..
1081
+
1082
+ Args:
1083
+ y_train (numpy.ndarray): Eğitim etiketi verisi.
1084
+ y_test (numpy.ndarray): Test etiketi verisi.
1085
+
1086
+ Returns:
1087
+ tuple: One-hot encoded y_train ve y_test verileri.
1088
+ """
1089
+ try:
1090
+ classes = np.unique(y_train)
1091
+ class_count = len(classes)
1092
+
1093
+ class_to_index = {cls: idx for idx, cls in enumerate(classes)}
1094
+
1095
+ y_train_encoded = np.zeros((y_train.shape[0], class_count))
1096
+ for i, label in enumerate(y_train):
1097
+ y_train_encoded[i, class_to_index[label]] = 1
1098
+
1099
+ y_test_encoded = np.zeros((y_test.shape[0], class_count))
1100
+ for i, label in enumerate(y_test):
1101
+ y_test_encoded[i, class_to_index[label]] = 1
1102
+ except:
1103
+ print(Fore.RED + 'ERROR: y_train and y_test must be numpy array. from: one_hot_encode' + info_one_hot_encode)
1104
+
1105
+ return y_train_encoded, y_test_encoded
1106
+
1107
+
1108
+ def split(X, y, test_size, random_state):
1109
+ """
1110
+ Splits the given X (features) and y (labels) data into training and testing subsets.
1111
+
1112
+ Args:
1113
+ X (numpy.ndarray): Features data.
1114
+ y (numpy.ndarray): Labels data.
1115
+ test_size (float or int): Proportion or number of samples for the test subset.
1116
+ random_state (int or None): Seed for random state.
1117
+
1118
+ Returns:
1119
+ tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
1120
+ """
1121
+ # Size of the dataset
1122
+ num_samples = X.shape[0]
1123
+
1124
+ if isinstance(test_size, float):
1125
+ test_size = int(test_size * num_samples)
1126
+ elif isinstance(test_size, int):
1127
+ if test_size > num_samples:
1128
+ raise ValueError(
1129
+ "test_size cannot be larger than the number of samples.")
1130
+ else:
1131
+ raise ValueError("test_size should be float or int.")
1132
+
1133
+ if random_state is not None:
1134
+ np.random.seed(random_state)
1135
+
1136
+ indices = np.arange(num_samples)
1137
+ np.random.shuffle(indices)
1138
+
1139
+ test_indices = indices[:test_size]
1140
+ train_indices = indices[test_size:]
1141
+
1142
+ x_train, x_test = X[train_indices], X[test_indices]
1143
+ y_train, y_test = y[train_indices], y[test_indices]
1144
+
1145
+ return x_train, x_test, y_train, y_test
1146
+
1147
+
1148
+ def metrics(y_ts, test_preds):
1149
+ """
1150
+ Calculates precision, recall and F1 score for a classification task.
1151
+
1152
+ Args:
1153
+ y_test (list or numpy.ndarray): True labels.
1154
+ test_preds (list or numpy.ndarray): Predicted labels.
1155
+
1156
+ Returns:
1157
+ tuple: Precision, recall, F1 score.
1158
+ """
1159
+ y_test_d = decode_one_hot(y_ts)
1160
+ y_test_d = np.array(y_test_d)
1161
+ y_pred = np.array(test_preds)
1162
+
1163
+ if y_test_d.ndim > 1:
1164
+ y_test_d = y_test_d.reshape(-1)
1165
+ if y_pred.ndim > 1:
1166
+ y_pred = y_pred.reshape(-1)
1167
+
1168
+ tp = {}
1169
+ fp = {}
1170
+ fn = {}
1171
+
1172
+ classes = np.unique(np.concatenate((y_test_d, y_pred)))
1173
+
1174
+ for c in classes:
1175
+ tp[c] = 0
1176
+ fp[c] = 0
1177
+ fn[c] = 0
1178
+
1179
+ for c in classes:
1180
+ for true, pred in zip(y_test_d, y_pred):
1181
+ if true == c and pred == c:
1182
+ tp[c] += 1
1183
+ elif true != c and pred == c:
1184
+ fp[c] += 1
1185
+ elif true == c and pred != c:
1186
+ fn[c] += 1
1187
+
1188
+ precision = {}
1189
+ recall = {}
1190
+ f1 = {}
1191
+
1192
+ for c in classes:
1193
+ precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
1194
+ recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
1195
+ f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] +
1196
+ recall[c]) if (precision[c] + recall[c]) > 0 else 0
1197
+
1198
+ micro_precision = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(
1199
+ list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
1200
+ micro_recall = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(
1201
+ fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
1202
+ micro_f1 = 2 * (micro_precision * micro_recall) / (micro_precision +
1203
+ micro_recall) if (micro_precision + micro_recall) > 0 else 0
1204
+
1205
+ return micro_precision, micro_recall, micro_f1
1206
+
1207
+
1208
+ def decode_one_hot(encoded_data):
1209
+ """
1210
+ Decodes one-hot encoded data to original categorical labels.
1211
+
1212
+ Args:
1213
+ encoded_data (numpy.ndarray): One-hot encoded data with shape (n_samples, n_classes).
1214
+
1215
+ Returns:
1216
+ numpy.ndarray: Decoded categorical labels with shape (n_samples,).
1217
+ """
1218
+
1219
+ decoded_labels = np.argmax(encoded_data, axis=1)
1220
+
1221
+ return decoded_labels
1222
+
1223
+
1224
+ def roc_curve(y_true, y_score):
1225
+ """
1226
+ Computes ROC curve.
1227
+
1228
+ Args:
1229
+ y_true (numpy.ndarray): True class labels (binary: 0 or 1).
1230
+ y_score (numpy.ndarray): Predicted probabilities for positive class.
1231
+
1232
+ Returns:
1233
+ tuple: FPR (False Positive Rate), TPR (True Positive Rate), thresholds.
1234
+ """
1235
+
1236
+ idx = np.argsort(y_score)[::-1]
1237
+ y_score_sorted = y_score[idx]
1238
+ y_true_sorted = y_true[idx]
1239
+
1240
+ tpr = []
1241
+ fpr = []
1242
+ thresholds = np.linspace(0, 1, 100)
1243
+
1244
+ for threshold in thresholds:
1245
+ y_pred_binary = np.where(y_score_sorted >= threshold, 1, 0)
1246
+
1247
+ tp = np.sum((y_true_sorted == 1) & (y_pred_binary == 1))
1248
+ fn = np.sum((y_true_sorted == 1) & (y_pred_binary == 0))
1249
+ tn = np.sum((y_true_sorted == 0) & (y_pred_binary == 0))
1250
+ fp = np.sum((y_true_sorted == 0) & (y_pred_binary == 1))
1251
+
1252
+ # Check for division by zero
1253
+ if (tp + fn) == 0:
1254
+ tpr_value = 0.0
1255
+ else:
1256
+ tpr_value = tp / (tp + fn)
1257
+
1258
+ if (fp + tn) == 0:
1259
+ fpr_value = 0.0
1260
+ else:
1261
+ fpr_value = fp / (fp + tn)
1262
+
1263
+ tpr.append(tpr_value)
1264
+ fpr.append(fpr_value)
1265
+
1266
+ return fpr, tpr, thresholds
1267
+
1268
+
1269
+ def confusion_matrix(y_true, y_pred, class_count):
1270
+ """
1271
+ Computes confusion matrix.
1272
+
1273
+ Args:
1274
+ y_true (numpy.ndarray): True class labels (1D array).
1275
+ y_pred (numpy.ndarray): Predicted class labels (1D array).
1276
+ num_classes (int): Number of classes.
1277
+
1278
+ Returns:
1279
+ numpy.ndarray: Confusion matrix of shape (num_classes, num_classes).
1280
+ """
1281
+ confusion = np.zeros((class_count, class_count), dtype=int)
1282
+
1283
+ for i in range(len(y_true)):
1284
+ true_label = y_true[i]
1285
+ pred_label = y_pred[i]
1286
+ confusion[true_label, pred_label] += 1
1287
+
1288
+ return confusion
1289
+
1290
+
1291
+ def plot_evaluate(y_test, y_preds, acc_list):
1292
+
1293
+ acc = acc_list[len(acc_list) - 1]
1294
+ y_true = decode_one_hot(y_test)
1295
+
1296
+ y_true = np.array(y_true)
1297
+ y_preds = np.array(y_preds)
1298
+ fpr, tpr, thresholds = roc_curve(y_true, y_preds)
1299
+ precision, recall, f1 = metrics(y_test, y_preds)
1300
+ Class = np.unique(y_test)
1301
+
1302
+ # Confusion matrix
1303
+ cm = confusion_matrix(y_true, y_preds, len(Class))
1304
+ # Subplot içinde düzenleme
1305
+ fig, axs = plt.subplots(2, 2, figsize=(16, 12))
1306
+
1307
+ # Confusion Matrix
1308
+ sns.heatmap(cm, annot=True, fmt='d', ax=axs[0, 0])
1309
+ axs[0, 0].set_title("Confusion Matrix")
1310
+ axs[0, 0].set_xlabel("Predicted Class")
1311
+ axs[0, 0].set_ylabel("Actual Class")
1312
+
1313
+ # ROC Curve
1314
+ axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve')
1315
+ axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
1316
+ axs[1, 0].set_xlim([0.0, 1.0])
1317
+ axs[1, 0].set_ylim([0.0, 1.05])
1318
+ axs[1, 0].set_xlabel('False Positive Rate')
1319
+ axs[1, 0].set_ylabel('True Positive Rate')
1320
+ axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
1321
+ axs[1, 0].legend(loc="lower right")
1322
+
1323
+ # Precision, Recall, F1 Score ve Accuracy bar grafiği
1324
+ metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
1325
+ values = [precision, recall, f1, acc]
1326
+ colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
1327
+
1328
+ # Bar grafiği oluşturma
1329
+ bars = axs[0, 1].bar(metric, values, color=colors)
1330
+
1331
+ # Bar grafiği üzerine değer ekleme
1332
+ for bar, value in zip(bars, values):
1333
+ axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
1334
+ ha='center', va='bottom', fontsize=12, color='white', weight='bold')
1335
+
1336
+ axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
1337
+ axs[0, 1].set_xlabel('Metrics')
1338
+ axs[0, 1].set_ylabel('Score')
1339
+ axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy')
1340
+ axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
1341
+
1342
+ # Accuracy
1343
+ plt.plot(acc_list, marker='o', linestyle='-',
1344
+ color='r', label='Accuracy')
1345
+
1346
+ # Sabit yüksek değer için yatay çizgi
1347
+ plt.axhline(y=1, color='g', linestyle='--', label='Maximum Accuracy')
1348
+
1349
+ # Eksen ve başlık ayarları
1350
+ plt.xlabel('Samples')
1351
+ plt.ylabel('Accuracy')
1352
+ plt.title('Accuracy History')
1353
+ plt.legend()
1354
+
1355
+ # Subplot düzenleme
1356
+ plt.tight_layout()
1357
+ plt.show()
1074
1358
 
1075
-
1076
1359
  def get_weights():
1077
-
1360
+
1078
1361
  return 0
1079
-
1362
+
1363
+
1080
1364
  def get_df():
1081
-
1082
- return 1
1083
-
1365
+
1366
+ return 2
1367
+
1368
+
1084
1369
  def get_preds():
1085
-
1370
+
1086
1371
  return 1
1087
-
1372
+
1373
+
1088
1374
  def get_acc():
1089
-
1090
- return 2
1375
+
1376
+ return 2