pyerualjetwork 2.1.1__py3-none-any.whl → 2.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan_di/plan_di.py CHANGED
@@ -3,9 +3,11 @@ Created on Thu Jun 12 00:00:00 2024
3
3
 
4
4
  @author: hasan can beydili
5
5
  """
6
+
7
+ import pandas as pd
6
8
  import numpy as np
7
9
  import time
8
- from colorama import Fore,Style
10
+ from colorama import Fore, Style
9
11
  from typing import List, Union
10
12
  import math
11
13
  from scipy.special import expit, softmax
@@ -13,11 +15,14 @@ import matplotlib.pyplot as plt
13
15
  import seaborn as sns
14
16
 
15
17
  # BUILD -----
18
+
19
+
16
20
  def fit(
17
21
  x_train: List[Union[int, float]],
18
- y_train: List[Union[int, float, str]], # At least two.. and one hot encoded
22
+ # At least two.. and one hot encoded
23
+ y_train: List[Union[int, float, str]],
19
24
  ) -> str:
20
-
25
+
21
26
  infoPLAN = """
22
27
  Creates and configures a PLAN model.
23
28
 
@@ -32,45 +37,43 @@ def fit(
32
37
  """
33
38
 
34
39
  if len(x_train) != len(y_train):
35
- print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit",infoPLAN)
36
- return 'e'
37
-
40
+ print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
41
+ return 'e'
42
+
38
43
  class_count = set()
39
44
  for sublist in y_train:
40
-
45
+
41
46
  class_count.add(tuple(sublist))
42
-
43
-
47
+
44
48
  class_count = list(class_count)
45
-
49
+
46
50
  y_train = [tuple(sublist) for sublist in y_train]
47
-
48
- neurons = [len(class_count),len(class_count)]
49
- layers = ['fex','cat']
50
-
51
+
52
+ neurons = [len(class_count), len(class_count)]
53
+ layers = ['fex', 'cat']
54
+
51
55
  x_train[0] = np.array(x_train[0])
52
56
  x_train[0] = x_train[0].ravel()
53
57
  x_train_size = len(x_train[0])
54
-
55
- W = weight_identification(len(layers) - 1,len(class_count),neurons,x_train_size)
56
- Divides, Piece = synaptic_dividing(len(class_count),W)
58
+
59
+ W = weight_identification(
60
+ len(layers) - 1, len(class_count), neurons, x_train_size)
61
+ Divides, Piece = synaptic_dividing(len(class_count), W)
57
62
  trained_W = [1] * len(W)
58
- print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL,)
59
- train_predictions = [None] * len(y_train)
60
- true = 0
63
+ print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
61
64
  start_time = time.time()
62
65
  for index, inp in enumerate(x_train):
63
66
  uni_start_time = time.time()
64
67
  inp = np.array(inp)
65
68
  inp = inp.ravel()
66
-
69
+
67
70
  if x_train_size != len(inp):
68
- print(Fore.RED +"ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",infoPLAN + Style.RESET_ALL)
71
+ print(Fore.RED + "ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",
72
+ infoPLAN + Style.RESET_ALL)
69
73
  return 'e'
70
-
71
-
74
+
72
75
  for Ulindex, Ul in enumerate(class_count):
73
-
76
+
74
77
  if Ul == y_train[index]:
75
78
  for Windex, w in enumerate(W):
76
79
  for i, ul in enumerate(Ul):
@@ -79,85 +82,63 @@ def fit(
79
82
 
80
83
  cs = Divides[int(k)][Windex][0]
81
84
 
82
-
83
- W[Windex] = synaptic_pruning(w, cs, 'row', int(k),len(class_count),Piece[Windex],1)
85
+ W[Windex] = synaptic_pruning(w, cs, 'row', int(
86
+ k), len(class_count), Piece[Windex], True)
84
87
 
85
88
  neural_layer = inp
86
-
89
+
87
90
  for Lindex, Layer in enumerate(layers):
88
-
89
-
91
+
90
92
  neural_layer = normalization(neural_layer)
91
-
92
93
 
94
+ y = np.argmax(y_train[index])
93
95
  if Layer == 'fex':
94
- y = np.argmax(y_train[index])
95
-
96
- neural_layer,W[Lindex] = fex(neural_layer, W[Lindex], y , 1)
96
+ W[Lindex] = fex(neural_layer, W[Lindex], True, y)
97
97
  elif Layer == 'cat':
98
- neural_layer,W[Lindex] = cat(neural_layer, W[Lindex], 1, Piece[Windex])
99
-
100
- RealOutput = np.argmax(y_train[index])
101
- PredictedOutput = np.argmax(neural_layer)
102
- if RealOutput == PredictedOutput:
103
- true += 1
104
- acc = true / len(y_train)
105
- train_predictions[index] = PredictedOutput
106
-
98
+ W[Lindex] = cat(neural_layer, W[Lindex], True, y)
99
+
107
100
  for i, w in enumerate(W):
108
101
  trained_W[i] = trained_W[i] + w
109
102
 
110
-
111
- W = weight_identification(len(layers) - 1, len(class_count), neurons, x_train_size)
112
-
113
-
103
+ W = weight_identification(
104
+ len(layers) - 1, len(class_count), neurons, x_train_size)
105
+
114
106
  uni_end_time = time.time()
115
-
116
- calculating_est = round((uni_end_time - uni_start_time) * (len(x_train) - index),3)
117
-
107
+
108
+ calculating_est = round(
109
+ (uni_end_time - uni_start_time) * (len(x_train) - index), 3)
110
+
118
111
  if calculating_est < 60:
119
- print('\rest......(sec):',calculating_est,'\n',end= "")
120
- print('\rTrain accuracy: ' ,acc ,"\n", end="")
121
-
112
+ print('\rest......(sec):', calculating_est, '\n', end="")
113
+
122
114
  elif calculating_est > 60 and calculating_est < 3600:
123
- print('\rest......(min):',calculating_est/60,'\n',end= "")
124
- print('\rTrain accuracy: ' ,acc ,"\n", end="")
125
-
115
+ print('\rest......(min):', calculating_est/60, '\n', end="")
116
+
126
117
  elif calculating_est > 3600:
127
- print('\rest......(h):',calculating_est/3600,'\n',end= "")
128
- print('\rTrain accuracy: ' ,acc ,"\n", end="")
129
-
118
+ print('\rest......(h):', calculating_est/3600, '\n', end="")
119
+
120
+ print('\rTraining: ', index, "/", len(x_train), "\n", end="")
121
+
130
122
  EndTime = time.time()
131
-
132
- calculating_est = round(EndTime - start_time,2)
133
-
134
- print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n")
135
-
123
+
124
+ calculating_est = round(EndTime - start_time, 2)
125
+
126
+ print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n" + Style.RESET_ALL)
127
+
136
128
  if calculating_est < 60:
137
- print('Total training time(sec): ',calculating_est)
138
-
129
+ print('Total training time(sec): ', calculating_est)
130
+
139
131
  elif calculating_est > 60 and calculating_est < 3600:
140
- print('Total training time(min): ',calculating_est/60)
141
-
132
+ print('Total training time(min): ', calculating_est/60)
133
+
142
134
  elif calculating_est > 3600:
143
- print('Total training time(h): ',calculating_est/3600)
144
-
145
- if acc > 0.8:
146
- print(Fore.GREEN + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
147
-
148
- elif acc < 0.8 and acc > 0.6:
149
- print(Fore.MAGENTA + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
150
-
151
- elif acc < 0.6:
152
- print(Fore.RED+ '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
153
-
154
-
155
-
135
+ print('Total training time(h): ', calculating_est/3600)
136
+
137
+ return trained_W
156
138
 
157
- return trained_W,train_predictions,acc
158
-
159
139
  # FUNCTIONS -----
160
140
 
141
+
161
142
  def weight_identification(
162
143
  layer_count, # int: Number of layers in the neural network.
163
144
  class_count, # int: Number of classes in the classification task.
@@ -177,25 +158,26 @@ def weight_identification(
177
158
  list([numpy_arrays],[...]): pretrained weight matices of the model. .
178
159
  """
179
160
 
180
-
181
161
  Wlen = layer_count + 1
182
162
  W = [None] * Wlen
183
- W[0] = np.ones((neurons[0],x_train_size))
163
+ W[0] = np.ones((neurons[0], x_train_size))
184
164
  ws = layer_count - 1
185
165
  for w in range(ws):
186
- W[w + 1] = np.ones((neurons[w + 1],neurons[w]))
187
- W[layer_count] = np.ones((class_count,neurons[layer_count - 1]))
166
+ W[w + 1] = np.ones((neurons[w + 1], neurons[w]))
167
+ W[layer_count] = np.ones((class_count, neurons[layer_count - 1]))
188
168
  return W
189
169
 
170
+
190
171
  def synaptic_pruning(
191
172
  w, # num: Weight matrix of the neural network.
192
173
  cs, # int: cs = cut_start, Synaptic connections between neurons.
193
174
  key, # int: key for identifying synaptic connections.
194
175
  Class, # int: Class label for the current training instance.
195
- class_count, # int: Total number of classes in the dataset.
196
- piece, # int: Which set of neurons will information be transferred to?
197
- is_training # int: 1 or 0
198
-
176
+ class_count, # int: Total number of classes in the dataset.
177
+ piece, # int: Which set of neurons will information be transferred to?
178
+ # bool: Flag indicating if the function is called during training (True or False).
179
+ is_training
180
+
199
181
  ) -> str:
200
182
  infoPruning = """
201
183
  Performs synaptic pruning in a neural network model.
@@ -207,53 +189,50 @@ def synaptic_pruning(
207
189
  Class (int): Class label for the current training instance.
208
190
  class_count (int): Total number of classes in the dataset.
209
191
  piece (int): Which set of neurons will information be transferred to?
210
- is_training (int): 1 or 0
192
+ is_training (bool): Flag indicating if the function is called during training (True or False).
211
193
 
212
194
  Returns:
213
195
  numpy array: Weight matrix.
214
196
  """
215
-
216
-
217
- Class += 1 # because index start 0
218
-
219
- if Class != 1:
220
-
221
-
222
-
223
- ce = cs / Class # ce(cut_end) = cs(cut_start) / current_class
224
-
225
- if is_training == 1:
226
-
197
+
198
+ Class += 1 # because index start 0
199
+
200
+ if Class != 1:
201
+
202
+ ce = cs / Class # ce(cut_end) = cs(cut_start) / current_class
203
+
204
+ if is_training == True:
205
+
227
206
  p = piece
228
-
207
+
229
208
  for i in range(Class - 3):
230
-
231
- piece+=p
232
-
233
- if Class!= 2:
234
- ce += piece
235
-
236
- w[int(ce)-1::-1,:] = 0
237
-
238
-
239
- w[cs:,:] = 0
209
+
210
+ piece += p
211
+
212
+ if Class != 2:
213
+ ce += piece
214
+
215
+ w[int(ce)-1::-1, :] = 0
216
+
217
+ w[cs:, :] = 0
240
218
 
241
219
  else:
242
-
243
- if key == 'row':
244
-
245
- w[cs:,:] = 0
246
-
247
- elif key == 'col':
248
-
249
- w[:,cs] = 0
250
-
251
- else:
252
- print(Fore.RED + "ERROR103: synaptic_pruning func's key parameter must be 'row' or 'col' from: synaptic_pruning" + infoPruning)
253
- return 'e'
254
-
220
+
221
+ if key == 'row':
222
+
223
+ w[cs:, :] = 0
224
+
225
+ elif key == 'col':
226
+
227
+ w[:, cs] = 0
228
+
229
+ else:
230
+ print(Fore.RED + "ERROR103: synaptic_pruning func's key parameter must be 'row' or 'col' from: synaptic_pruning" + infoPruning)
231
+ return 'e'
232
+
255
233
  return w
256
234
 
235
+
257
236
  def synaptic_dividing(
258
237
  class_count, # int: Total number of classes in the dataset.
259
238
  W # list[num]: Weight matrix of the neural network.
@@ -269,35 +248,33 @@ def synaptic_dividing(
269
248
  list: a 3D list holds informations of divided net and list of neuron groups separated by classes.
270
249
  """
271
250
 
272
-
273
251
  Piece = [1] * len(W)
274
-
252
+
275
253
  Divides = [[[0] for _ in range(len(W))] for _ in range(class_count)]
276
-
277
-
254
+
278
255
  for i in range(len(W)):
279
-
280
256
 
281
- Piece[i] = int(math.floor(W[i].shape[0] / class_count))
257
+ Piece[i] = int(math.floor(W[i].shape[0] / class_count))
282
258
 
283
- cs = 0
259
+ cs = 0
284
260
 
285
261
  for i in range(len(W)):
286
262
  for j in range(class_count):
287
263
  cs = cs + Piece[i]
288
264
  Divides[j][i][0] = cs
289
-
265
+
290
266
  j = 0
291
267
  cs = 0
292
-
268
+
293
269
  return Divides, Piece
294
-
270
+
295
271
 
296
272
  def fex(
297
273
  Input, # list[num]: Input data.
298
- w, # num: Weight matrix of the neural network.,
299
- Class, # int: Which class is, if training.
300
- is_training # int: 1 or 0
274
+ w, # num: Weight matrix of the neural network.
275
+ # bool: Flag indicating if the function is called during training (True or False).
276
+ is_training,
277
+ Class # int: Which class is, if training.
301
278
  ) -> tuple:
302
279
  """
303
280
  Applies feature extraction process to the input data using synaptic pruning.
@@ -305,50 +282,56 @@ def fex(
305
282
  Args:
306
283
  Input (num): Input data.
307
284
  w (num): Weight matrix of the neural network.
308
- Class (int): Which class is, if training.
309
- is_training (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
285
+ is_training (bool): Flag indicating if the function is called during training (True or False).
286
+ Class (int): if is during training then which class(label) ? is isnt then put None.
310
287
 
311
288
  Returns:
312
289
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
313
290
  """
314
-
315
- if is_training == 1:
316
-
317
- w[Class,:] = Input
318
291
 
319
- neural_layer = np.dot(w, Input)
320
-
321
- return neural_layer,w
292
+ if is_training == True:
293
+
294
+ w[Class, :] = Input
295
+
296
+ return w
297
+
298
+ else:
299
+
300
+ neural_layer = np.dot(w, Input)
301
+
302
+ return neural_layer
303
+
322
304
 
323
305
  def cat(
324
306
  Input, # list[num]: Input data.
325
307
  w, # list[num]: Weight matrix of the neural network.
326
- is_training, # int: Flag indicating if the function is called during training (1 for training, 0 otherwise).
327
- piece # int Which set of neurons will information be transferred to?
308
+ # (bool): Flag indicating if the function is called during training (True or False).
309
+ is_training,
310
+ Class
328
311
  ) -> tuple:
329
312
  """
330
313
  Applies categorization process to the input data using synaptic pruning if specified.
331
314
 
332
315
  Args:
333
316
  Input (list[num]): Input data.
334
- w (num): Weight matrix of the neural network.
335
- is_training (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
336
- piece (int): Which set of neurons will information be transferred to?
337
- ) -> tuple:
317
+ w (list[num]): Weight matrix of the neural network.
318
+ is_training (bool): Flag indicating if the function is called during training (True or False).
319
+ Class (int): if is during training then which class(label) ? is isnt then put None.
338
320
  Returns:
339
321
  tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
340
322
  """
341
-
342
- PruneIndex = np.where(Input == 0)
343
-
344
- if is_training == 1:
345
-
346
- w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, is_training)
347
-
348
-
349
- neural_layer = np.dot(w, Input)
350
-
351
- return neural_layer,w
323
+
324
+ if is_training == True:
325
+
326
+ w[Class, Class] += 1
327
+
328
+ return w
329
+
330
+ else:
331
+
332
+ neural_layer = np.dot(w, Input)
333
+
334
+ return neural_layer
352
335
 
353
336
 
354
337
  def normalization(
@@ -364,13 +347,12 @@ def normalization(
364
347
  (num) Scaled input data after normalization.
365
348
  """
366
349
 
367
-
368
350
  AbsVector = np.abs(Input)
369
-
351
+
370
352
  MaxAbs = np.max(AbsVector)
371
-
353
+
372
354
  ScaledInput = Input / MaxAbs
373
-
355
+
374
356
  return ScaledInput
375
357
 
376
358
 
@@ -386,7 +368,7 @@ def Softmax(
386
368
  Returns:
387
369
  (num): Transformed data after applying softmax function.
388
370
  """
389
-
371
+
390
372
  return softmax(x)
391
373
 
392
374
 
@@ -418,150 +400,262 @@ def Relu(
418
400
  (num): Transformed data after applying ReLU function.
419
401
  """
420
402
 
421
-
422
403
  return np.maximum(0, x)
423
404
 
424
405
 
425
-
426
-
427
406
  def evaluate(
428
407
  x_test, # list[num]: Test input data.
429
408
  y_test, # list[num]: Test labels.
430
- visualize, # str: visualize Testing procces or not visualize ('y' or 'n')
409
+ show_metrices, # show_metrices (bool): (True or False)
431
410
  W # list[num]: Weight matrix list of the neural network.
432
411
  ) -> tuple:
433
- infoTestModel = """
412
+ infoTestModel = """
434
413
  Tests the neural network model with the given test data.
435
414
 
436
415
  Args:
437
416
  x_test (list[num]): Test input data.
438
417
  y_test (list[num]): Test labels.
439
- activation_potential (float): Input activation potential
440
- visualize (str): Visualize test progress ? ('y' or 'n')
418
+ show_metrices (bool): (True or False)
441
419
  W (list[num]): Weight matrix list of the neural network.
442
420
 
443
421
  Returns:
444
422
  tuple: A tuple containing the predicted labels and the accuracy of the model.
445
423
  """
446
-
447
- layers = ['fex','cat']
448
424
 
425
+ layers = ['fex', 'cat']
449
426
 
450
- try:
451
- Wc = [0] * len(W) # Wc = Weight copy
452
- true = 0
453
- TestPredictions = [None] * len(y_test)
454
- for i, w in enumerate(W):
455
- Wc[i] = np.copy(w)
456
- print('\rCopying weights.....',i+1,'/',len(W),end = "")
457
-
458
- print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
459
- start_time = time.time()
460
- for inpIndex,Input in enumerate(x_test):
461
- Input = np.array(Input)
462
- Input = Input.ravel()
463
- uni_start_time = time.time()
464
- neural_layer = Input
427
+ try:
428
+ Wc = [0] * len(W) # Wc = Weight copy
429
+ true = 0
430
+ y_preds = [-1] * len(y_test)
431
+ acc_list = []
465
432
 
466
- for index, Layer in enumerate(layers):
467
-
468
- neural_layer = normalization(neural_layer)
469
-
470
- if layers[index] == 'fex':
471
- neural_layer = fex(neural_layer, W[index], 0, 0)[0]
472
- if layers[index] == 'cat':
473
- neural_layer = cat(neural_layer, W[index], 0, 0)[0]
474
-
433
+ for i, w in enumerate(W):
434
+ Wc[i] = np.copy(w)
435
+ print('\rCopying weights.....', i+1, '/', len(W), end="")
436
+
437
+ print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
438
+ start_time = time.time()
439
+ for inpIndex, Input in enumerate(x_test):
440
+ Input = np.array(Input)
441
+ Input = Input.ravel()
442
+ uni_start_time = time.time()
443
+ neural_layer = Input
444
+
445
+ for index, Layer in enumerate(layers):
446
+
447
+ neural_layer = normalization(neural_layer)
448
+
449
+ if Layer == 'fex':
450
+ neural_layer = fex(neural_layer, W[index], False, None)
451
+ elif Layer == 'cat':
452
+ neural_layer = cat(neural_layer, W[index], False, None)
453
+
454
+ for i, w in enumerate(Wc):
455
+ W[i] = np.copy(w)
456
+ RealOutput = np.argmax(y_test[inpIndex])
457
+ PredictedOutput = np.argmax(neural_layer)
458
+ if RealOutput == PredictedOutput:
459
+ true += 1
460
+ acc = true / len(y_test)
461
+ if show_metrices == True:
462
+ acc_list.append(acc)
463
+ y_preds[inpIndex] = PredictedOutput
464
+
465
+ uni_end_time = time.time()
466
+
467
+ calculating_est = round(
468
+ (uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
469
+
470
+ if calculating_est < 60:
471
+ print('\rest......(sec):', calculating_est, '\n', end="")
472
+ print('\rTest accuracy: ', acc, "\n", end="")
473
+
474
+ elif calculating_est > 60 and calculating_est < 3600:
475
+ print('\rest......(min):', calculating_est/60, '\n', end="")
476
+ print('\rTest accuracy: ', acc, "\n", end="")
477
+
478
+ elif calculating_est > 3600:
479
+ print('\rest......(h):', calculating_est/3600, '\n', end="")
480
+ print('\rTest accuracy: ', acc, "\n", end="")
481
+ if show_metrices == True:
482
+ plot_evaluate(y_test, y_preds, acc_list)
483
+
484
+ EndTime = time.time()
475
485
  for i, w in enumerate(Wc):
476
486
  W[i] = np.copy(w)
477
- RealOutput = np.argmax(y_test[inpIndex])
478
- PredictedOutput = np.argmax(neural_layer)
479
- if RealOutput == PredictedOutput:
480
- true += 1
481
- acc = true / len(y_test)
482
- TestPredictions[inpIndex] = PredictedOutput
483
-
484
- if visualize == 'y':
485
-
486
- y_testVisual = np.copy(y_test)
487
- y_testVisual = np.argmax(y_testVisual, axis=1)
488
-
489
- plt.figure(figsize=(12, 6))
490
- sns.kdeplot(y_testVisual, label='Real Outputs', fill=True)
491
- sns.kdeplot(TestPredictions, label='Predictions', fill=True)
492
- plt.legend()
493
- plt.xlabel('Class')
494
- plt.ylabel('Data size')
495
- plt.title('Predictions and Real Outputs for Testing KDE Plot')
496
- plt.show()
497
-
498
- if inpIndex + 1 != len(x_test):
499
-
500
- plt.close('all')
501
-
502
- uni_end_time = time.time()
503
-
504
- calculating_est = round((uni_end_time - uni_start_time) * (len(x_test) - inpIndex),3)
505
-
487
+
488
+ calculating_est = round(EndTime - start_time, 2)
489
+
490
+ print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
491
+
506
492
  if calculating_est < 60:
507
- print('\rest......(sec):',calculating_est,'\n',end= "")
508
- print('\rTest accuracy: ' ,acc ,"\n", end="")
509
-
493
+ print('Total testing time(sec): ', calculating_est)
494
+
510
495
  elif calculating_est > 60 and calculating_est < 3600:
511
- print('\rest......(min):',calculating_est/60,'\n',end= "")
512
- print('\rTest accuracy: ' ,acc ,"\n", end="")
513
-
514
- elif calculating_est > 3600:
515
- print('\rest......(h):',calculating_est/3600,'\n',end= "")
516
- print('\rTest accuracy: ' ,acc ,"\n", end="")
517
-
518
- EndTime = time.time()
519
- for i, w in enumerate(Wc):
520
- W[i] = np.copy(w)
521
-
522
- calculating_est = round(EndTime - start_time,2)
496
+ print('Total testing time(min): ', calculating_est/60)
523
497
 
524
- print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
498
+ elif calculating_est > 3600:
499
+ print('Total testing time(h): ', calculating_est/3600)
525
500
 
526
- if calculating_est < 60:
527
- print('Total testing time(sec): ',calculating_est)
528
-
529
- elif calculating_est > 60 and calculating_est < 3600:
530
- print('Total testing time(min): ',calculating_est/60)
531
-
532
- elif calculating_est > 3600:
533
- print('Total testing time(h): ',calculating_est/3600)
534
-
535
- if acc >= 0.8:
536
- print(Fore.GREEN + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
501
+ if acc >= 0.8:
502
+ print(Fore.GREEN + '\nTotal Test accuracy: ',
503
+ acc, '\n' + Style.RESET_ALL)
537
504
 
538
- elif acc < 0.8 and acc > 0.6:
539
- print(Fore.MAGENTA + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
505
+ elif acc < 0.8 and acc > 0.6:
506
+ print(Fore.MAGENTA + '\nTotal Test accuracy: ',
507
+ acc, '\n' + Style.RESET_ALL)
540
508
 
541
- elif acc <= 0.6:
542
- print(Fore.RED+ '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
509
+ elif acc <= 0.6:
510
+ print(Fore.RED + '\nTotal Test accuracy: ', acc, '\n' + Style.RESET_ALL)
543
511
 
512
+ except:
513
+
514
+ print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" +
515
+ infoTestModel + Style.RESET_ALL)
516
+ return 'e'
517
+
518
+ return W, y_preds, acc
519
+
520
+
521
+ def multiple_evaluate(
522
+ x_test, # list[num]: Test input data.
523
+ y_test, # list[num]: Test labels.
524
+ show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
525
+ MW # list[list[num]]: Weight matrix of the neural network.
526
+ ) -> tuple:
527
+ infoTestModel = """
528
+ Tests the neural network model with the given test data.
529
+
530
+ Args:
531
+ x_test (list[num]): Test input data.
532
+ y_test (list[num]): Test labels.
533
+ show_metrices (bool): (True or False)
534
+ MW (list(list[num])): Multiple Weight matrix list of the neural network. (Multiple model testing)
535
+
536
+ Returns:
537
+ tuple: A tuple containing the predicted labels and the accuracy of the model.
538
+ """
539
+
540
+ layers = ['fex', 'cat']
541
+
542
+ try:
543
+ y_preds = [-1] * len(y_test)
544
+ acc_list = []
545
+ print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
546
+ start_time = time.time()
547
+ true = 0
548
+ for inpIndex, Input in enumerate(x_test):
549
+
550
+ output_layer = 0
551
+
552
+ for m, Model in enumerate(MW):
553
+
554
+ W = Model
555
+
556
+ Wc = [0] * len(W) # Wc = weight copy
557
+
558
+ y_preds = [None] * len(y_test)
559
+ for i, w in enumerate(W):
560
+ Wc[i] = np.copy(w)
561
+
562
+ Input = np.array(Input)
563
+ Input = Input.ravel()
564
+ uni_start_time = time.time()
565
+ neural_layer = Input
566
+
567
+ for index, Layer in enumerate(layers):
568
+
569
+ neural_layer = normalization(neural_layer)
570
+
571
+ if Layer == 'fex':
572
+ neural_layer = fex(neural_layer, W[index], False, None)
573
+ elif Layer == 'cat':
574
+ neural_layer = cat(neural_layer, W[index], False, None)
575
+
576
+ output_layer += neural_layer
577
+
578
+ for i, w in enumerate(Wc):
579
+ W[i] = np.copy(w)
580
+ for i, w in enumerate(Wc):
581
+ W[i] = np.copy(w)
582
+ RealOutput = np.argmax(y_test[inpIndex])
583
+ PredictedOutput = np.argmax(output_layer)
584
+ if RealOutput == PredictedOutput:
585
+ true += 1
586
+ acc = true / len(y_test)
587
+ if show_metrices == True:
588
+ acc_list.append(acc)
589
+ y_preds[inpIndex] = PredictedOutput
590
+
591
+
592
+ uni_end_time = time.time()
593
+
594
+ calculating_est = round(
595
+ (uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
596
+
597
+ if calculating_est < 60:
598
+ print('\rest......(sec):', calculating_est, '\n', end="")
599
+ print('\rTest accuracy: ', acc, "\n", end="")
600
+
601
+ elif calculating_est > 60 and calculating_est < 3600:
602
+ print('\rest......(min):', calculating_est/60, '\n', end="")
603
+ print('\rTest accuracy: ', acc, "\n", end="")
604
+
605
+ elif calculating_est > 3600:
606
+ print('\rest......(h):', calculating_est/3600, '\n', end="")
607
+ print('\rTest accuracy: ', acc, "\n", end="")
608
+ if show_metrices == True:
609
+ plot_evaluate(y_test, y_preds, acc_list)
544
610
 
545
-
546
- except:
547
-
548
- print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
611
+ EndTime = time.time()
612
+ for i, w in enumerate(Wc):
613
+ W[i] = np.copy(w)
614
+
615
+ calculating_est = round(EndTime - start_time, 2)
616
+
617
+ print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
618
+
619
+ if calculating_est < 60:
620
+ print('Total testing time(sec): ', calculating_est)
621
+
622
+ elif calculating_est > 60 and calculating_est < 3600:
623
+ print('Total testing time(min): ', calculating_est/60)
624
+
625
+ elif calculating_est > 3600:
626
+ print('Total testing time(h): ', calculating_est/3600)
627
+
628
+ if acc >= 0.8:
629
+ print(Fore.GREEN + '\nTotal Test accuracy: ',
630
+ acc, '\n' + Style.RESET_ALL)
631
+
632
+ elif acc < 0.8 and acc > 0.6:
633
+ print(Fore.MAGENTA + '\nTotal Test accuracy: ',
634
+ acc, '\n' + Style.RESET_ALL)
635
+
636
+ elif acc <= 0.6:
637
+ print(Fore.RED + '\nTotal Test accuracy: ',
638
+ acc, '\n' + Style.RESET_ALL)
639
+
640
+ except:
641
+
642
+ print(Fore.RED + "ERROR: Testing model parameters like 'activation_potential' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
549
643
  return 'e'
550
-
551
644
 
552
-
553
- return W,TestPredictions,acc
645
+ return W, y_preds, acc
646
+
554
647
 
555
648
  def save_model(model_name,
556
- model_type,
557
- class_count,
558
- test_acc,
559
- weights_type,
560
- weights_format,
561
- model_path,
562
- W
563
- ):
564
-
649
+ model_type,
650
+ class_count,
651
+ test_acc,
652
+ weights_type,
653
+ weights_format,
654
+ model_path,
655
+ scaler,
656
+ W
657
+ ):
658
+
565
659
  infosave_model = """
566
660
  Function to save a pruning learning model.
567
661
 
@@ -574,40 +668,44 @@ def save_model(model_name,
574
668
  weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
575
669
  WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
576
670
  model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
671
+ scaler (bool): trained data it used standard_scaler ? (True or False)
577
672
  W: Weights of the model.
578
673
 
579
674
  Returns:
580
675
  str: Message indicating if the model was saved successfully or encountered an error.
581
676
  """
582
-
677
+
583
678
  # Operations to be performed by the function will be written here
584
679
  pass
585
680
 
586
- layers = ['fex','cat']
681
+ layers = ['fex', 'cat']
587
682
 
588
- if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
589
- print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" + infosave_model + Style.RESET_ALL)
683
+ if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
684
+ print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
685
+ infosave_model + Style.RESET_ALL)
590
686
  return 'e'
591
-
592
- if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
593
- print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" + infosave_model + Style.RESET_ALL)
687
+
688
+ if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
689
+ print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" +
690
+ infosave_model + Style.RESET_ALL)
594
691
  return 'e'
595
-
692
+
596
693
  NeuronCount = 0
597
694
  SynapseCount = 0
598
-
695
+
599
696
  try:
600
697
  for w in W:
601
698
  NeuronCount += np.shape(w)[0]
602
699
  SynapseCount += np.shape(w)[0] * np.shape(w)[1]
603
700
  except:
604
-
605
- print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + infosave_model + Style.RESET_ALL)
701
+
702
+ print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
703
+ infosave_model + Style.RESET_ALL)
606
704
  return 'e'
607
705
  import pandas as pd
608
706
  from datetime import datetime
609
707
  from scipy import io
610
-
708
+
611
709
  data = {'MODEL NAME': model_name,
612
710
  'MODEL TYPE': model_type,
613
711
  'LAYERS': layers,
@@ -619,96 +717,96 @@ def save_model(model_name,
619
717
  'SAVE DATE': datetime.now(),
620
718
  'WEIGHTS TYPE': weights_type,
621
719
  'WEIGHTS FORMAT': weights_format,
622
- 'MODEL PATH': model_path
720
+ 'MODEL PATH': model_path,
721
+ 'STANDARD SCALER': scaler
623
722
  }
624
723
  try:
625
-
724
+
626
725
  df = pd.DataFrame(data)
627
726
 
628
-
629
727
  df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
630
-
631
728
 
632
729
  except:
633
-
634
- print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" + infosave_model + Style.RESET_ALL)
730
+
731
+ print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
732
+ infosave_model + Style.RESET_ALL)
635
733
  return 'e'
636
734
  try:
637
-
735
+
638
736
  if weights_type == 'txt' and weights_format == 'd':
639
-
737
+
640
738
  for i, w in enumerate(W):
641
- np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w, fmt='%d')
642
-
739
+ np.savetxt(model_path + model_name +
740
+ str(i+1) + 'w.txt', w, fmt='%d')
741
+
643
742
  if weights_type == 'txt' and weights_format == 'f':
644
-
743
+
645
744
  for i, w in enumerate(W):
646
- np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w, fmt='%f')
647
-
745
+ np.savetxt(model_path + model_name +
746
+ str(i+1) + 'w.txt', w, fmt='%f')
747
+
648
748
  if weights_type == 'txt' and weights_format == 'raw':
649
-
749
+
650
750
  for i, w in enumerate(W):
651
- np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w)
652
-
653
-
751
+ np.savetxt(model_path + model_name + str(i+1) + 'w.txt', w)
752
+
654
753
  ###
655
-
656
-
754
+
657
755
  if weights_type == 'npy' and weights_format == 'd':
658
-
756
+
659
757
  for i, w in enumerate(W):
660
- np.save(model_path + model_name + str(i+1) + 'w.npy', w.astype(int))
661
-
758
+ np.save(model_path + model_name +
759
+ str(i+1) + 'w.npy', w.astype(int))
760
+
662
761
  if weights_type == 'npy' and weights_format == 'f':
663
-
762
+
664
763
  for i, w in enumerate(W):
665
- np.save(model_path + model_name + str(i+1) + 'w.npy' , w, w.astype(float))
666
-
764
+ np.save(model_path + model_name + str(i+1) +
765
+ 'w.npy', w, w.astype(float))
766
+
667
767
  if weights_type == 'npy' and weights_format == 'raw':
668
-
768
+
669
769
  for i, w in enumerate(W):
670
- np.save(model_path + model_name + str(i+1) + 'w.npy' , w)
671
-
672
-
770
+ np.save(model_path + model_name + str(i+1) + 'w.npy', w)
771
+
673
772
  ###
674
-
675
-
773
+
676
774
  if weights_type == 'mat' and weights_format == 'd':
677
-
775
+
678
776
  for i, w in enumerate(W):
679
777
  w = {'w': w.astype(int)}
680
778
  io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
681
-
779
+
682
780
  if weights_type == 'mat' and weights_format == 'f':
683
-
781
+
684
782
  for i, w in enumerate(W):
685
783
  w = {'w': w.astype(float)}
686
784
  io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
687
-
785
+
688
786
  if weights_type == 'mat' and weights_format == 'raw':
689
-
787
+
690
788
  for i, w in enumerate(W):
691
789
  w = {'w': w}
692
790
  io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
693
-
791
+
694
792
  except:
695
-
793
+
696
794
  print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
697
795
  return 'e'
698
796
  print(df)
699
797
  message = (
700
798
  Fore.GREEN + "Model Saved Successfully\n" +
701
- Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
799
+ Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
702
800
  Style.RESET_ALL
703
- )
704
-
801
+ )
802
+
705
803
  return print(message)
706
804
 
707
805
 
708
806
  def load_model(model_name,
709
- model_path,
710
- ):
711
- infoload_model = """
807
+ model_path,
808
+ ):
809
+ infoload_model = """
712
810
  Function to load a pruning learning model.
713
811
 
714
812
  Arguments:
@@ -718,50 +816,43 @@ def load_model(model_name,
718
816
  Returns:
719
817
  lists: W(list[num]), activation_potential, DataFrame of the model
720
818
  """
721
- pass
722
-
723
-
724
- import pandas as pd
725
- import scipy.io as sio
726
-
727
- try:
728
-
729
- df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
730
-
731
- except:
732
-
733
- print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" + infoload_model + Style.RESET_ALL)
734
-
735
- model_name = str(df['MODEL NAME'].iloc[0])
736
- layers = df['LAYERS'].tolist()
737
- layer_count = int(df['LAYER COUNT'].iloc[0])
738
- class_count = int(df['CLASS COUNT'].iloc[0])
739
- NeuronCount = int(df['NEURON COUNT'].iloc[0])
740
- SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
741
- test_acc = int(df['TEST ACCURACY'].iloc[0])
742
- model_type = str(df['MODEL TYPE'].iloc[0])
743
- WeightType = str(df['WEIGHTS TYPE'].iloc[0])
744
- WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
745
- model_path = str(df['MODEL PATH'].iloc[0])
746
-
747
- W = [0] * layer_count
748
-
749
- if WeightType == 'txt':
750
- for i in range(layer_count):
751
- W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
752
- elif WeightType == 'npy':
753
- for i in range(layer_count):
754
- W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
755
- elif WeightType == 'mat':
756
- for i in range(layer_count):
757
- W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
758
- else:
759
- raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
760
- print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
761
- return W,df
762
-
763
- def predict_model_ssd(Input,model_name,model_path):
764
-
819
+ pass
820
+
821
+ import scipy.io as sio
822
+
823
+ try:
824
+
825
+ df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
826
+
827
+ except:
828
+
829
+ print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" +
830
+ infoload_model + Style.RESET_ALL)
831
+
832
+ model_name = str(df['MODEL NAME'].iloc[0])
833
+ layer_count = int(df['LAYER COUNT'].iloc[0])
834
+ WeightType = str(df['WEIGHTS TYPE'].iloc[0])
835
+
836
+ W = [0] * layer_count
837
+
838
+ if WeightType == 'txt':
839
+ for i in range(layer_count):
840
+ W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
841
+ elif WeightType == 'npy':
842
+ for i in range(layer_count):
843
+ W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
844
+ elif WeightType == 'mat':
845
+ for i in range(layer_count):
846
+ W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
847
+ else:
848
+ raise ValueError(
849
+ Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
850
+ print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
851
+ return W, df
852
+
853
+
854
+ def predict_model_ssd(Input, model_name, model_path):
855
+
765
856
  infopredict_model_ssd = """
766
857
  Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
767
858
 
@@ -772,10 +863,16 @@ def predict_model_ssd(Input,model_name,model_path):
772
863
  Returns:
773
864
  ndarray: Output from the model.
774
865
  """
775
- W = load_model(model_name,model_path)[0]
776
-
777
- layers = ['fex','cat']
778
-
866
+ W, df = load_model(model_name, model_path)
867
+
868
+ scaler = str(df['STANDARD SCALER'].iloc[0])
869
+
870
+ if scaler == 'True':
871
+
872
+ Input = standard_scaler(Input, None)
873
+
874
+ layers = ['fex', 'cat']
875
+
779
876
  Wc = [0] * len(W)
780
877
  for i, w in enumerate(W):
781
878
  Wc[i] = np.copy(w)
@@ -783,39 +880,44 @@ def predict_model_ssd(Input,model_name,model_path):
783
880
  neural_layer = Input
784
881
  neural_layer = np.array(neural_layer)
785
882
  neural_layer = neural_layer.ravel()
786
- for index, Layer in enumerate(layers):
883
+ for index, Layer in enumerate(layers):
787
884
 
788
885
  neural_layer = normalization(neural_layer)
789
-
790
- if layers[index] == 'fex':
791
- neural_layer = fex(neural_layer, W[index],0, 0)[0]
792
- if layers[index] == 'cat':
793
- neural_layer = cat(neural_layer, W[index], 0, 0)[0]
886
+
887
+ if Layer == 'fex':
888
+ neural_layer = fex(neural_layer, W[index], False, None)
889
+ elif Layer == 'cat':
890
+ neural_layer = cat(neural_layer, W[index], False, None)
794
891
  except:
795
- print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" + infopredict_model_ssd + Style.RESET_ALL)
796
- return 'e'
892
+ print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
893
+ infopredict_model_ssd + Style.RESET_ALL)
894
+ return 'e'
797
895
  for i, w in enumerate(Wc):
798
896
  W[i] = np.copy(w)
799
897
  return neural_layer
800
898
 
801
899
 
802
- def predict_model_ram(Input,W):
803
-
900
+ def predict_model_ram(Input, scaler, W):
901
+
804
902
  infopredict_model_ram = """
805
903
  Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
806
904
  from weights and parameters stored in memory.
807
905
 
808
906
  Arguments:
809
907
  Input (list or ndarray): Input data for the model (single vector or single matrix).
810
- activation_potential (float): Activation potential.
908
+ scaler (bool): trained data it used standard_scaler ? (True or False)
811
909
  W (list of ndarrays): Weights of the model.
812
910
 
813
911
  Returns:
814
912
  ndarray: Output from the model.
815
913
  """
816
-
817
- layers = ['fex','cat']
818
-
914
+
915
+ if scaler == True:
916
+
917
+ Input = standard_scaler(Input, None)
918
+
919
+ layers = ['fex', 'cat']
920
+
819
921
  Wc = [0] * len(W)
820
922
  for i, w in enumerate(W):
821
923
  Wc[i] = np.copy(w)
@@ -823,151 +925,452 @@ def predict_model_ram(Input,W):
823
925
  neural_layer = Input
824
926
  neural_layer = np.array(neural_layer)
825
927
  neural_layer = neural_layer.ravel()
826
- for index, Layer in enumerate(layers):
928
+ for index, Layer in enumerate(layers):
827
929
 
828
930
  neural_layer = normalization(neural_layer)
829
-
830
- if layers[index] == 'fex':
831
- neural_layer = fex(neural_layer, W[index],0, 0)[0]
832
- if layers[index] == 'cat':
833
- neural_layer = cat(neural_layer, W[index], 0, 0)[0]
834
-
931
+
932
+ if Layer == 'fex':
933
+ neural_layer = fex(neural_layer, W[index], False, None)
934
+ elif Layer == 'cat':
935
+ neural_layer = cat(neural_layer, W[index], False, None)
936
+
835
937
  except:
836
- print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + infopredict_model_ram + Style.RESET_ALL)
938
+ print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
939
+ infopredict_model_ram + Style.RESET_ALL)
837
940
  return 'e'
838
941
  for i, w in enumerate(Wc):
839
942
  W[i] = np.copy(w)
840
943
  return neural_layer
841
-
842
944
 
843
- def auto_balancer(x_train, y_train, class_count):
844
-
845
- infoauto_balancer = """
945
+
946
+ def auto_balancer(x_train, y_train):
947
+
948
+ infoauto_balancer = """
846
949
  Function to balance the training data across different classes.
847
950
 
848
951
  Arguments:
849
952
  x_train (list): Input data for training.
850
953
  y_train (list): Labels corresponding to the input data.
851
- class_count (int): Number of classes.
852
954
 
853
955
  Returns:
854
956
  tuple: A tuple containing balanced input data and labels.
855
957
  """
856
- try:
857
- ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[0] for i in range(class_count)}
958
+ classes = np.arange(y_train.shape[1])
959
+ class_count = len(classes)
960
+
961
+ try:
962
+ ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[
963
+ 0] for i in range(class_count)}
858
964
  classes = [len(ClassIndices[i]) for i in range(class_count)]
859
-
965
+
860
966
  if len(set(classes)) == 1:
861
- print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
967
+ print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
862
968
  return x_train, y_train
863
-
969
+
864
970
  MinCount = min(classes)
865
-
971
+
866
972
  BalancedIndices = []
867
973
  for i in range(class_count):
868
974
  if len(ClassIndices[i]) > MinCount:
869
- SelectedIndices = np.random.choice(ClassIndices[i], MinCount, replace=False)
975
+ SelectedIndices = np.random.choice(
976
+ ClassIndices[i], MinCount, replace=False)
870
977
  else:
871
978
  SelectedIndices = ClassIndices[i]
872
979
  BalancedIndices.extend(SelectedIndices)
873
-
980
+
874
981
  BalancedInputs = [x_train[idx] for idx in BalancedIndices]
875
982
  BalancedLabels = [y_train[idx] for idx in BalancedIndices]
876
-
877
- print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
878
- except:
983
+
984
+ print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)
985
+ ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
986
+ except:
879
987
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
880
988
  return 'e'
881
-
882
- return BalancedInputs, BalancedLabels
883
-
884
- def synthetic_augmentation(x, y, class_count):
989
+
990
+ return BalancedInputs, BalancedLabels
991
+
992
+
993
+ def synthetic_augmentation(x_train, y_train):
885
994
  """
886
995
  Generates synthetic examples to balance classes with fewer examples.
887
-
996
+
888
997
  Arguments:
889
998
  x -- Input dataset (examples) - list format
890
999
  y -- Class labels (one-hot encoded) - list format
891
- class_count -- Number of classes
892
-
1000
+
893
1001
  Returns:
894
1002
  x_balanced -- Balanced input dataset (list format)
895
1003
  y_balanced -- Balanced class labels (one-hot encoded, list format)
896
1004
  """
1005
+ x = x_train
1006
+ y = y_train
1007
+ classes = np.arange(y_train.shape[1])
1008
+ class_count = len(classes)
1009
+
897
1010
  # Calculate class distribution
898
1011
  class_distribution = {i: 0 for i in range(class_count)}
899
1012
  for label in y:
900
1013
  class_distribution[np.argmax(label)] += 1
901
-
1014
+
902
1015
  max_class_count = max(class_distribution.values())
903
-
1016
+
904
1017
  x_balanced = list(x)
905
1018
  y_balanced = list(y)
906
-
1019
+
907
1020
  for class_label in range(class_count):
908
- class_indices = [i for i, label in enumerate(y) if np.argmax(label) == class_label]
1021
+ class_indices = [i for i, label in enumerate(
1022
+ y) if np.argmax(label) == class_label]
909
1023
  num_samples = len(class_indices)
910
-
1024
+
911
1025
  if num_samples < max_class_count:
912
1026
  while num_samples < max_class_count:
913
1027
 
914
- random_indices = np.random.choice(class_indices, 2, replace=False)
1028
+ random_indices = np.random.choice(
1029
+ class_indices, 2, replace=False)
915
1030
  sample1 = x[random_indices[0]]
916
1031
  sample2 = x[random_indices[1]]
917
-
918
- synthetic_sample = sample1 + (np.array(sample2) - np.array(sample1)) * np.random.rand()
919
-
1032
+
1033
+ synthetic_sample = sample1 + \
1034
+ (np.array(sample2) - np.array(sample1)) * np.random.rand()
1035
+
920
1036
  x_balanced.append(synthetic_sample.tolist())
921
1037
  y_balanced.append(y[class_indices[0]])
922
-
1038
+
923
1039
  num_samples += 1
924
-
1040
+
925
1041
  return np.array(x_balanced), np.array(y_balanced)
926
1042
 
1043
+
927
1044
  def standard_scaler(x_train, x_test):
928
- info_standard_scaler = """
929
- Standardizes training and test datasets.
1045
+ info_standard_scaler = """
1046
+ Standardizes training and test datasets. x_test may be None.
930
1047
 
931
1048
  Args:
932
1049
  train_data: numpy.ndarray
933
- Training data (n_samples, n_features)
1050
+ Training data
934
1051
  test_data: numpy.ndarray
935
- Test data (n_samples, n_features)
1052
+ Test data
936
1053
 
937
1054
  Returns:
938
1055
  tuple
939
1056
  Standardized training and test datasets
940
1057
  """
941
- try:
942
- mean = np.mean(x_train, axis=0)
943
- std = np.std(x_train, axis=0)
944
-
945
-
946
- train_data_scaled = (x_train - mean) / std
947
- test_data_scaled = (x_test - mean) / std
1058
+ try:
948
1059
 
949
- except:
950
- print(Fore.RED + "ERROR: x_train and x_test must be numpy array from standard_scaler" + info_standard_scaler)
951
-
952
- return train_data_scaled, test_data_scaled
1060
+ mean = np.mean(x_train, axis=0)
1061
+ std = np.std(x_train, axis=0)
1062
+
1063
+ if x_test == None:
1064
+
1065
+ train_data_scaled = (x_train - mean) / std
1066
+ return train_data_scaled
1067
+
1068
+ else:
1069
+ train_data_scaled = (x_train - mean) / std
1070
+ test_data_scaled = (x_test - mean) / std
1071
+ return train_data_scaled, test_data_scaled
1072
+
1073
+ except:
1074
+ print(
1075
+ Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
1076
+
1077
+
1078
+ def encode_one_hot(y_train, y_test):
1079
+ info_one_hot_encode = """
1080
+ Performs one-hot encoding on y_train and y_test data..
1081
+
1082
+ Args:
1083
+ y_train (numpy.ndarray): Eğitim etiketi verisi.
1084
+ y_test (numpy.ndarray): Test etiketi verisi.
1085
+
1086
+ Returns:
1087
+ tuple: One-hot encoded y_train ve y_test verileri.
1088
+ """
1089
+ try:
1090
+ classes = np.unique(y_train)
1091
+ class_count = len(classes)
1092
+
1093
+ class_to_index = {cls: idx for idx, cls in enumerate(classes)}
1094
+
1095
+ y_train_encoded = np.zeros((y_train.shape[0], class_count))
1096
+ for i, label in enumerate(y_train):
1097
+ y_train_encoded[i, class_to_index[label]] = 1
1098
+
1099
+ y_test_encoded = np.zeros((y_test.shape[0], class_count))
1100
+ for i, label in enumerate(y_test):
1101
+ y_test_encoded[i, class_to_index[label]] = 1
1102
+ except:
1103
+ print(Fore.RED + 'ERROR: y_train and y_test must be numpy array. from: one_hot_encode' + info_one_hot_encode)
1104
+
1105
+ return y_train_encoded, y_test_encoded
1106
+
1107
+
1108
+ def split(X, y, test_size=0.2, random_state=None):
1109
+ """
1110
+ Splits the given X (features) and y (labels) data into training and testing subsets.
1111
+
1112
+ Args:
1113
+ X (numpy.ndarray): Features data.
1114
+ y (numpy.ndarray): Labels data.
1115
+ test_size (float or int): Proportion or number of samples for the test subset.
1116
+ random_state (int or None): Seed for random state.
1117
+
1118
+ Returns:
1119
+ tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
1120
+ """
1121
+ # Size of the dataset
1122
+ num_samples = X.shape[0]
1123
+
1124
+ if isinstance(test_size, float):
1125
+ test_size = int(test_size * num_samples)
1126
+ elif isinstance(test_size, int):
1127
+ if test_size > num_samples:
1128
+ raise ValueError(
1129
+ "test_size cannot be larger than the number of samples.")
1130
+ else:
1131
+ raise ValueError("test_size should be float or int.")
1132
+
1133
+ if random_state is not None:
1134
+ np.random.seed(random_state)
1135
+
1136
+ indices = np.arange(num_samples)
1137
+ np.random.shuffle(indices)
1138
+
1139
+ test_indices = indices[:test_size]
1140
+ train_indices = indices[test_size:]
1141
+
1142
+ x_train, x_test = X[train_indices], X[test_indices]
1143
+ y_train, y_test = y[train_indices], y[test_indices]
1144
+
1145
+ return x_train, x_test, y_train, y_test
1146
+
1147
+
1148
+ def metrics(y_ts, test_preds):
1149
+ """
1150
+ Calculates precision, recall and F1 score for a classification task.
1151
+
1152
+ Args:
1153
+ y_test (list or numpy.ndarray): True labels.
1154
+ test_preds (list or numpy.ndarray): Predicted labels.
1155
+
1156
+ Returns:
1157
+ tuple: Precision, recall, F1 score.
1158
+ """
1159
+ y_test_d = decode_one_hot(y_ts)
1160
+ y_test_d = np.array(y_test_d)
1161
+ y_pred = np.array(test_preds)
1162
+
1163
+ if y_test_d.ndim > 1:
1164
+ y_test_d = y_test_d.reshape(-1)
1165
+ if y_pred.ndim > 1:
1166
+ y_pred = y_pred.reshape(-1)
1167
+
1168
+ tp = {}
1169
+ fp = {}
1170
+ fn = {}
1171
+
1172
+ classes = np.unique(np.concatenate((y_test_d, y_pred)))
1173
+
1174
+ for c in classes:
1175
+ tp[c] = 0
1176
+ fp[c] = 0
1177
+ fn[c] = 0
1178
+
1179
+ for c in classes:
1180
+ for true, pred in zip(y_test_d, y_pred):
1181
+ if true == c and pred == c:
1182
+ tp[c] += 1
1183
+ elif true != c and pred == c:
1184
+ fp[c] += 1
1185
+ elif true == c and pred != c:
1186
+ fn[c] += 1
1187
+
1188
+ precision = {}
1189
+ recall = {}
1190
+ f1 = {}
1191
+
1192
+ for c in classes:
1193
+ precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
1194
+ recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
1195
+ f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] +
1196
+ recall[c]) if (precision[c] + recall[c]) > 0 else 0
1197
+
1198
+ micro_precision = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(
1199
+ list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
1200
+ micro_recall = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(
1201
+ fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
1202
+ micro_f1 = 2 * (micro_precision * micro_recall) / (micro_precision +
1203
+ micro_recall) if (micro_precision + micro_recall) > 0 else 0
1204
+
1205
+ return micro_precision, micro_recall, micro_f1
1206
+
1207
+
1208
+ def decode_one_hot(encoded_data):
1209
+ """
1210
+ Decodes one-hot encoded data to original categorical labels.
1211
+
1212
+ Args:
1213
+ encoded_data (numpy.ndarray): One-hot encoded data with shape (n_samples, n_classes).
1214
+
1215
+ Returns:
1216
+ numpy.ndarray: Decoded categorical labels with shape (n_samples,).
1217
+ """
1218
+
1219
+ decoded_labels = np.argmax(encoded_data, axis=1)
1220
+
1221
+ return decoded_labels
1222
+
1223
+
1224
+ def roc_curve(y_true, y_score):
1225
+ """
1226
+ Computes ROC curve.
1227
+
1228
+ Args:
1229
+ y_true (numpy.ndarray): True class labels (binary: 0 or 1).
1230
+ y_score (numpy.ndarray): Predicted probabilities for positive class.
1231
+
1232
+ Returns:
1233
+ tuple: FPR (False Positive Rate), TPR (True Positive Rate), thresholds.
1234
+ """
1235
+
1236
+ idx = np.argsort(y_score)[::-1]
1237
+ y_score_sorted = y_score[idx]
1238
+ y_true_sorted = y_true[idx]
1239
+
1240
+ tpr = []
1241
+ fpr = []
1242
+ thresholds = np.linspace(0, 1, 100)
1243
+
1244
+ for threshold in thresholds:
1245
+ y_pred_binary = np.where(y_score_sorted >= threshold, 1, 0)
1246
+
1247
+ tp = np.sum((y_true_sorted == 1) & (y_pred_binary == 1))
1248
+ fn = np.sum((y_true_sorted == 1) & (y_pred_binary == 0))
1249
+ tn = np.sum((y_true_sorted == 0) & (y_pred_binary == 0))
1250
+ fp = np.sum((y_true_sorted == 0) & (y_pred_binary == 1))
1251
+
1252
+ # Check for division by zero
1253
+ if (tp + fn) == 0:
1254
+ tpr_value = 0.0
1255
+ else:
1256
+ tpr_value = tp / (tp + fn)
1257
+
1258
+ if (fp + tn) == 0:
1259
+ fpr_value = 0.0
1260
+ else:
1261
+ fpr_value = fp / (fp + tn)
1262
+
1263
+ tpr.append(tpr_value)
1264
+ fpr.append(fpr_value)
1265
+
1266
+ return fpr, tpr, thresholds
1267
+
1268
+
1269
+ def confusion_matrix(y_true, y_pred, class_count):
1270
+ """
1271
+ Computes confusion matrix.
1272
+
1273
+ Args:
1274
+ y_true (numpy.ndarray): True class labels (1D array).
1275
+ y_pred (numpy.ndarray): Predicted class labels (1D array).
1276
+ num_classes (int): Number of classes.
1277
+
1278
+ Returns:
1279
+ numpy.ndarray: Confusion matrix of shape (num_classes, num_classes).
1280
+ """
1281
+ confusion = np.zeros((class_count, class_count), dtype=int)
1282
+
1283
+ for i in range(len(y_true)):
1284
+ true_label = y_true[i]
1285
+ pred_label = y_pred[i]
1286
+ confusion[true_label, pred_label] += 1
1287
+
1288
+ return confusion
1289
+
1290
+
1291
+ def plot_evaluate(y_test, y_preds, acc_list):
1292
+
1293
+ acc = acc_list[len(acc_list) - 1]
1294
+ y_true = decode_one_hot(y_test)
1295
+
1296
+ y_true = np.array(y_true)
1297
+ y_preds = np.array(y_preds)
1298
+ fpr, tpr, thresholds = roc_curve(y_true, y_preds)
1299
+ precision, recall, f1 = metrics(y_test, y_preds)
1300
+ Class = np.unique(y_test)
1301
+
1302
+ # Confusion matrix
1303
+ cm = confusion_matrix(y_true, y_preds, len(Class))
1304
+ # Subplot içinde düzenleme
1305
+ fig, axs = plt.subplots(2, 2, figsize=(16, 12))
1306
+
1307
+ # Confusion Matrix
1308
+ sns.heatmap(cm, annot=True, fmt='d', ax=axs[0, 0])
1309
+ axs[0, 0].set_title("Confusion Matrix")
1310
+ axs[0, 0].set_xlabel("Predicted Class")
1311
+ axs[0, 0].set_ylabel("Actual Class")
1312
+
1313
+ # ROC Curve
1314
+ axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve')
1315
+ axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
1316
+ axs[1, 0].set_xlim([0.0, 1.0])
1317
+ axs[1, 0].set_ylim([0.0, 1.05])
1318
+ axs[1, 0].set_xlabel('False Positive Rate')
1319
+ axs[1, 0].set_ylabel('True Positive Rate')
1320
+ axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
1321
+ axs[1, 0].legend(loc="lower right")
1322
+
1323
+ # Precision, Recall, F1 Score ve Accuracy bar grafiği
1324
+ metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
1325
+ values = [precision, recall, f1, acc]
1326
+ colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
1327
+
1328
+ # Bar grafiği oluşturma
1329
+ bars = axs[0, 1].bar(metric, values, color=colors)
1330
+
1331
+ # Bar grafiği üzerine değer ekleme
1332
+ for bar, value in zip(bars, values):
1333
+ axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
1334
+ ha='center', va='bottom', fontsize=12, color='white', weight='bold')
1335
+
1336
+ axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
1337
+ axs[0, 1].set_xlabel('Metrics')
1338
+ axs[0, 1].set_ylabel('Score')
1339
+ axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy')
1340
+ axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
1341
+
1342
+ # Accuracy
1343
+ plt.plot(acc_list, marker='o', linestyle='-',
1344
+ color='r', label='Accuracy')
1345
+
1346
+ # Sabit yüksek değer için yatay çizgi
1347
+ plt.axhline(y=1, color='g', linestyle='--', label='Maximum Accuracy')
1348
+
1349
+ # Eksen ve başlık ayarları
1350
+ plt.xlabel('Samples')
1351
+ plt.ylabel('Accuracy')
1352
+ plt.title('Accuracy History')
1353
+ plt.legend()
1354
+
1355
+ # Subplot düzenleme
1356
+ plt.tight_layout()
1357
+ plt.show()
953
1358
 
954
-
955
1359
  def get_weights():
956
-
1360
+
957
1361
  return 0
958
-
1362
+
1363
+
959
1364
  def get_df():
960
-
961
- return 1
962
-
1365
+
1366
+ return 2
1367
+
1368
+
963
1369
  def get_preds():
964
-
1370
+
965
1371
  return 1
966
-
1372
+
1373
+
967
1374
  def get_acc():
968
-
969
- return 2
970
1375
 
971
- def get_pot():
972
-
973
- return 1
1376
+ return 2