pyerualjetwork 2.6.9__py3-none-any.whl → 2.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/plan.py CHANGED
@@ -5,14 +5,6 @@ Created on Tue Jun 18 23:32:16 2024
5
5
  @author: hasan
6
6
  """
7
7
 
8
-
9
- # -*- coding: utf-8 -*-
10
- """
11
- Created on Tue Jun 18 23:32:16 2024
12
-
13
- @author: hasan
14
- """
15
-
16
8
  import pandas as pd
17
9
  import numpy as np
18
10
  import time
@@ -30,12 +22,13 @@ from tqdm import tqdm
30
22
  def fit(
31
23
  x_train: List[Union[int, float]],
32
24
  y_train: List[Union[int, float]], # At least two.. and one hot encoded
33
- show_training,
34
- show_count= None,
35
25
  val= None,
26
+ val_count = None,
27
+ activation_potentiation=None, # (float): Input activation_potentiation (optional)
36
28
  x_val= None,
37
29
  y_val= None,
38
- activation_potentiation=None # (float): Input activation_potentiation (optional)
30
+ show_training = None,
31
+ show_count= None
39
32
  ) -> str:
40
33
 
41
34
  infoPLAN = """
@@ -44,33 +37,53 @@ def fit(
44
37
  Args:
45
38
  x_train (list[num]): List of input data.
46
39
  y_train (list[num]): List of target labels. (one hot encoded)
47
- show_training (bool, str): True, None or'final'
48
- show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
49
- val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None
40
+ val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None (optional)
50
41
  val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
51
- x_val (list[num]): List of validation data. (optional) Default: x_train
52
- y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
53
42
  activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
43
+ x_val (list[num]): List of validation data. (optional) Default: 10% of x_train (auto_balanced)
44
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: 10% of y_train (auto_balanced)
45
+ show_training (bool, str): True, None or'final'
46
+ show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
54
47
  Returns:
55
48
  list([num]): (Weight matrices list, train_predictions list, Train_acc).
56
49
  error handled ?: Process status ('e')
57
50
  """
58
-
59
51
 
60
52
  if len(x_train) != len(y_train):
61
53
 
62
- print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
54
+ print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
63
55
  return 'e'
64
-
65
- if x_val == None and y_val == None:
66
-
67
- x_val = x_train
68
- y_val = y_train
69
56
 
70
57
  if val == True or val == 'final':
58
+
59
+ try:
60
+
61
+ if x_val == None and y_val == None:
62
+
63
+ x_train, x_val, y_train, y_val = split(x_train, y_train, test_size=0.1, random_state=42)
64
+
65
+ x_train, y_train = auto_balancer(x_train, y_train)
66
+ x_val, y_val = auto_balancer(x_val, y_val)
67
+
68
+ except:
69
+ pass
70
+
71
+ if val == True:
72
+
73
+ if val_count == None:
74
+
75
+ val_count = 0.1
76
+
77
+ v_iter = 0
78
+
79
+ if val == 'final':
80
+
81
+ val_count = 0.99
71
82
 
72
- val_bar = tqdm(total=1, desc="Training / Validating Accuracy", ncols=120)
73
-
83
+ val_count = int(len(x_train) * val_count)
84
+ val_count_copy = val_count
85
+ val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
86
+ val_list = [] * val_count
74
87
 
75
88
  if show_count == None:
76
89
 
@@ -92,7 +105,6 @@ def fit(
92
105
 
93
106
  neurons = [len(class_count), len(class_count)]
94
107
  layers = ['fex']
95
- val_list = []
96
108
 
97
109
  x_train[0] = np.array(x_train[0])
98
110
  x_train[0] = x_train[0].ravel()
@@ -105,8 +117,10 @@ def fit(
105
117
 
106
118
  y = decode_one_hot(y_train)
107
119
 
120
+ train_progress = tqdm(total=len(x_train),leave=False, desc="Training",ncols= 120)
121
+
108
122
  for index, inp in enumerate(x_train):
109
-
123
+
110
124
  progress = index / len(x_train) * 100
111
125
 
112
126
  inp = np.array(inp)
@@ -130,31 +144,28 @@ def fit(
130
144
  LTPW[i] = LTPW[i] + w
131
145
 
132
146
 
133
- if val == True:
147
+ if val == True and index == val_count:
134
148
 
135
- if show_training == True:
136
- try:
137
- plt.close(fig)
138
149
 
139
- except:
140
- pass
150
+ val_count += val_count_copy
141
151
 
142
- validation_model = evaluate(x_val, y_val, None, LTPW, activation_potentiation, None)
152
+ validation_model = evaluate(x_val, y_val, LTPW, activation_potentiation, None)
143
153
 
144
154
  val_acc = validation_model[get_acc()]
145
155
 
146
156
  val_list.append(val_acc)
147
157
 
148
- if index == 0:
158
+ if v_iter == 0:
149
159
 
150
160
  val_bar.update(val_acc)
151
161
 
152
162
 
153
- if index != 0:
163
+ if v_iter != 0:
154
164
 
155
- val_acc = val_acc - val_list[index - 1]
165
+ val_acc = val_acc - val_list[v_iter - 1]
156
166
  val_bar.update(val_acc)
157
167
 
168
+ v_iter += 1
158
169
 
159
170
  if show_training == True:
160
171
 
@@ -199,17 +210,18 @@ def fit(
199
210
  title_info = 'Weight Matrix Of Fex Layer'
200
211
 
201
212
 
202
-
213
+
203
214
 
204
215
  progress_status = f"{progress:.1f}"
205
216
  fig.suptitle(suptitle_info + progress_status)
206
217
  plt.draw()
207
218
  plt.pause(0.1)
208
219
 
209
-
220
+
210
221
  STPW = weight_identification(
211
222
  len(layers) - 1, len(class_count), neurons, x_train_size)
212
223
 
224
+ train_progress.update(1)
213
225
 
214
226
  if show_training == 'final':
215
227
 
@@ -240,9 +252,8 @@ def fit(
240
252
 
241
253
  val_list.append(val_acc)
242
254
 
243
- val_bar.update(val_acc)
244
-
245
-
255
+ val_bar.update(val_acc)
256
+
246
257
  return LTPW
247
258
 
248
259
  # FUNCTIONS -----
@@ -456,10 +467,10 @@ def Relu(
456
467
  def evaluate(
457
468
  x_test, # list[num]: Test input data.
458
469
  y_test, # list[num]: Test labels.
459
- show_metrices, # show_metrices (bool): (True or False)
460
470
  W, # list[num]: Weight matrix list of the neural network.
461
- activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional)
462
- acc_bar_status = True
471
+ activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
472
+ acc_bar_status=True, # acc_bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
473
+ show_metrices=None # show_metrices (bool): (True or None) (optional) Default: None
463
474
  ) -> tuple:
464
475
  infoTestModel = """
465
476
  Tests the neural network model with the given test data.
@@ -467,10 +478,10 @@ def evaluate(
467
478
  Args:
468
479
  x_test (list[num]): Test input data.
469
480
  y_test (list[num]): Test labels.
470
- show_metrices (bool): (True or False)
471
481
  W (list[num]): Weight matrix list of the neural network.
472
- activation_potentiation (float or None): Threshold value for comparison. (optional)
473
- acc_bar_status (bool): Loading bar for accuracy (True or None)
482
+ activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
483
+ acc_bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
484
+ show_metrices (bool): (True or None) (optional) Default: None
474
485
 
475
486
  Returns:
476
487
  tuple: A tuple containing the predicted labels and the accuracy of the model.
@@ -486,25 +497,27 @@ def evaluate(
486
497
 
487
498
  for i, w in enumerate(W):
488
499
  Wc[i] = np.copy(w)
489
- print('\rCopying weights.....', i+1, '/', len(W), end="")
490
500
 
491
- # print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
501
+
492
502
  if acc_bar_status == True:
493
- acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=75)
503
+
504
+ test_progress = tqdm(total=len(x_test),leave=False, desc='Testing',ncols=120)
505
+ acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=120)
506
+
507
+
494
508
  for inpIndex, Input in enumerate(x_test):
495
509
  Input = np.array(Input)
496
510
  Input = Input.ravel()
497
- uni_start_time = time.time()
498
511
  neural_layer = Input
499
-
512
+
500
513
  for index, Layer in enumerate(layers):
501
-
514
+
502
515
  neural_layer = normalization(neural_layer)
503
-
516
+
504
517
  if Layer == 'fex':
505
518
  neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
506
-
507
-
519
+
520
+
508
521
  for i, w in enumerate(Wc):
509
522
  W[i] = np.copy(w)
510
523
  RealOutput = np.argmax(y_test[inpIndex])
@@ -512,20 +525,21 @@ def evaluate(
512
525
  if RealOutput == PredictedOutput:
513
526
  true += 1
514
527
  acc = true / len(y_test)
515
-
516
-
528
+
529
+
517
530
  acc_list.append(acc)
518
531
  y_preds[inpIndex] = PredictedOutput
519
532
 
520
533
  if acc_bar_status == True:
534
+ test_progress.update(1)
521
535
  if inpIndex == 0:
522
536
  acc_bar.update(acc)
523
537
 
524
538
  else:
525
539
  acc = acc - acc_list[inpIndex - 1]
526
540
  acc_bar.update(acc)
527
-
528
- if show_metrices == True:
541
+
542
+ if show_metrices == True:
529
543
  plot_evaluate(y_test, y_preds, acc_list)
530
544
 
531
545
 
@@ -537,7 +551,7 @@ def evaluate(
537
551
  print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" +
538
552
  infoTestModel + Style.RESET_ALL)
539
553
  return 'e'
540
-
554
+
541
555
  return W, y_preds, acc
542
556
 
543
557
 
@@ -909,7 +923,7 @@ def predict_model_ssd(Input, model_name, model_path):
909
923
 
910
924
  except:
911
925
 
912
- non_scaled = True
926
+ pass
913
927
 
914
928
 
915
929
  layers = ['fex']
@@ -938,7 +952,7 @@ def predict_model_ssd(Input, model_name, model_path):
938
952
  return neural_layer
939
953
 
940
954
 
941
- def predict_model_ram(Input, scaler_params, W, activation_potentiation=None):
955
+ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=None):
942
956
 
943
957
  infopredict_model_ram = """
944
958
  Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
@@ -946,17 +960,19 @@ def predict_model_ram(Input, scaler_params, W, activation_potentiation=None):
946
960
 
947
961
  Arguments:
948
962
  Input (list or ndarray): Input data for the model (single vector or single matrix).
949
- scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
950
963
  W (list of ndarrays): Weights of the model.
951
- activation_potentiation (float or None): Threshold value for comparison. (optional)
964
+ scaler_params (int, float): standard scaler params list: mean,std. (optional) Default: None.
965
+ activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
952
966
 
953
967
  Returns:
954
968
  ndarray: Output from the model.
955
969
  """
956
-
957
- if scaler_params != None:
970
+ try:
971
+ if scaler_params != None:
958
972
 
959
- Input = standard_scaler(None, Input, scaler_params)
973
+ Input = standard_scaler(None, Input, scaler_params)
974
+ except:
975
+ Input = standard_scaler(None, Input, scaler_params)
960
976
 
961
977
  layers = ['fex']
962
978
 
@@ -1012,7 +1028,7 @@ def auto_balancer(x_train, y_train):
1012
1028
  MinCount = min(classes)
1013
1029
 
1014
1030
  BalancedIndices = []
1015
- for i in range(class_count):
1031
+ for i in tqdm(range(class_count),leave=False,desc='Balancing Data',ncols=120):
1016
1032
  if len(ClassIndices[i]) > MinCount:
1017
1033
  SelectedIndices = np.random.choice(
1018
1034
  ClassIndices[i], MinCount, replace=False)
@@ -1029,7 +1045,7 @@ def auto_balancer(x_train, y_train):
1029
1045
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
1030
1046
  return 'e'
1031
1047
 
1032
- return BalancedInputs, BalancedLabels
1048
+ return np.array(BalancedInputs), np.array(BalancedLabels)
1033
1049
 
1034
1050
 
1035
1051
  def synthetic_augmentation(x_train, y_train):
@@ -1059,7 +1075,7 @@ def synthetic_augmentation(x_train, y_train):
1059
1075
  x_balanced = list(x)
1060
1076
  y_balanced = list(y)
1061
1077
 
1062
- for class_label in range(class_count):
1078
+ for class_label in tqdm(range(class_count), leave=False, desc='Augmenting Data',ncols= 120):
1063
1079
  class_indices = [i for i, label in enumerate(
1064
1080
  y) if np.argmax(label) == class_label]
1065
1081
  num_samples = len(class_indices)
@@ -1099,6 +1115,15 @@ def standard_scaler(x_train, x_test, scaler_params=None):
1099
1115
  tuple
1100
1116
  Standardized training and test datasets
1101
1117
  """
1118
+ try:
1119
+
1120
+ x_train = x_train.tolist()
1121
+ x_test = x_test.tolist()
1122
+
1123
+ except:
1124
+
1125
+ pass
1126
+
1102
1127
  try:
1103
1128
 
1104
1129
  if scaler_params == None and x_test != None:
@@ -1182,7 +1207,6 @@ def split(X, y, test_size, random_state):
1182
1207
  Returns:
1183
1208
  tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
1184
1209
  """
1185
- # Size of the dataset
1186
1210
  num_samples = X.shape[0]
1187
1211
 
1188
1212
  if isinstance(test_size, float):
@@ -1399,7 +1423,6 @@ def plot_evaluate(y_test, y_preds, acc_list):
1399
1423
 
1400
1424
  # Confusion matrix
1401
1425
  cm = confusion_matrix(y_true, y_preds, len(Class))
1402
- # Subplot içinde düzenleme
1403
1426
  fig, axs = plt.subplots(2, 2, figsize=(16, 12))
1404
1427
 
1405
1428
  # Confusion Matrix
@@ -1518,16 +1541,19 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
1518
1541
  x_balanced -- Balanced input dataset (NumPy array format)
1519
1542
  y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
1520
1543
  """
1521
- start_time = time.time()
1522
- x_train = np.array(x_train)
1523
- y_train = np.array(y_train)
1544
+ try:
1545
+ x_train = np.array(x_train)
1546
+ y_train = np.array(y_train)
1547
+ except:
1548
+ print(Fore.GREEN + "x_tarin and y_train already numpyarray." + Style.RESET_ALL)
1549
+ pass
1524
1550
  classes = np.arange(y_train.shape[1])
1525
1551
  class_count = len(classes)
1526
1552
 
1527
1553
  x_balanced = []
1528
1554
  y_balanced = []
1529
1555
 
1530
- for class_label in range(class_count):
1556
+ for class_label in tqdm(range(class_count),leave=False, desc='Augmenting Data',ncols= 120):
1531
1557
  class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
1532
1558
  num_samples = len(class_indices)
1533
1559
 
@@ -1549,8 +1575,7 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
1549
1575
  additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
1550
1576
 
1551
1577
  for i in range(samples_to_add):
1552
- uni_start_time = time.time()
1553
-
1578
+
1554
1579
  random_indices = np.random.choice(class_indices, 2, replace=False)
1555
1580
  sample1 = x_train[random_indices[0]]
1556
1581
  sample2 = x_train[random_indices[1]]
@@ -1561,41 +1586,1627 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
1561
1586
  additional_samples[i] = synthetic_sample
1562
1587
  additional_labels[i] = y_train[class_indices[0]]
1563
1588
 
1564
- uni_end_time = time.time()
1565
- calculating_est = round(
1566
- (uni_end_time - uni_start_time) * (samples_to_add - i), 3)
1567
-
1568
- if calculating_est < 60:
1569
- print('\rest......(sec):', calculating_est, '\n', end="")
1570
-
1571
- elif calculating_est > 60 and calculating_est < 3600:
1572
- print('\rest......(min):', calculating_est/60, '\n', end="")
1573
-
1574
- elif calculating_est > 3600:
1575
- print('\rest......(h):', calculating_est/3600, '\n', end="")
1576
-
1577
- print('Augmenting: ', class_label, '/', class_count, '...', i, "/", samples_to_add, "\n", end="")
1578
-
1589
+
1579
1590
  x_balanced.append(additional_samples)
1580
1591
  y_balanced.append(additional_labels)
1581
-
1582
-
1583
- EndTime = time.time()
1584
-
1585
- calculating_est = round(EndTime - start_time, 2)
1586
-
1587
- print(Fore.GREEN + " \nBalancing Finished with 0 ERROR\n" + Style.RESET_ALL)
1588
-
1589
- if calculating_est < 60:
1590
- print('Total balancing time(sec): ', calculating_est)
1591
-
1592
- elif calculating_est > 60 and calculating_est < 3600:
1593
- print('Total balancing time(min): ', calculating_est/60)
1594
-
1595
- elif calculating_est > 3600:
1596
- print('Total balancing time(h): ', calculating_est/3600)
1597
-
1598
- # Stack the balanced arrays
1592
+
1593
+ x_balanced = np.vstack(x_balanced)
1594
+ y_balanced = np.vstack(y_balanced)
1595
+
1596
+ return x_balanced, y_balanced
1597
+
1598
+ def get_weights():
1599
+
1600
+ return 0
1601
+
1602
+
1603
+ def get_df():
1604
+
1605
+ return 2
1606
+
1607
+
1608
+ def get_preds():
1609
+
1610
+ return 1
1611
+
1612
+
1613
+ def get_acc():
1614
+
1615
+ return 2
1616
+
1617
+ # -*- coding: utf-8 -*-
1618
+ """
1619
+ Created on Tue Jun 18 23:32:16 2024
1620
+
1621
+ @author: hasan
1622
+ """
1623
+
1624
+ import pandas as pd
1625
+ import numpy as np
1626
+ import time
1627
+ from colorama import Fore, Style
1628
+ from typing import List, Union
1629
+ import math
1630
+ from scipy.special import expit, softmax
1631
+ import matplotlib.pyplot as plt
1632
+ import seaborn as sns
1633
+ from tqdm import tqdm
1634
+
1635
+ # BUILD -----
1636
+
1637
+
1638
+ def fit(
1639
+ x_train: List[Union[int, float]],
1640
+ y_train: List[Union[int, float]], # At least two.. and one hot encoded
1641
+ val= None,
1642
+ val_count = None,
1643
+ activation_potentiation=None, # (float): Input activation_potentiation (optional)
1644
+ x_val= None,
1645
+ y_val= None,
1646
+ show_training = None,
1647
+ show_count= None
1648
+ ) -> str:
1649
+
1650
+ infoPLAN = """
1651
+ Creates and configures a PLAN model.
1652
+
1653
+ Args:
1654
+ x_train (list[num]): List of input data.
1655
+ y_train (list[num]): List of target labels. (one hot encoded)
1656
+ val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None (optional)
1657
+ val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
1658
+ activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
1659
+ x_val (list[num]): List of validation data. (optional) Default: 10% of x_train (auto_balanced)
1660
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: 10% of y_train (auto_balanced)
1661
+ show_training (bool, str): True, None or'final'
1662
+ show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
1663
+ Returns:
1664
+ list([num]): (Weight matrices list, train_predictions list, Train_acc).
1665
+ error handled ?: Process status ('e')
1666
+ """
1667
+
1668
+ if len(x_train) != len(y_train):
1669
+
1670
+ print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
1671
+ return 'e'
1672
+
1673
+ if val == True or val == 'final':
1674
+
1675
+ try:
1676
+
1677
+ if x_val == None and y_val == None:
1678
+
1679
+ x_train, x_val, y_train, y_val = split(x_train, y_train,test_size=0.1,random_state=42)
1680
+
1681
+ x_train, y_train = auto_balancer(x_train, y_train)
1682
+ x_val, y_val = auto_balancer(x_val, y_val)
1683
+
1684
+ y_train, y_val = encode_one_hot(y_train, y_val)
1685
+
1686
+ except:
1687
+ pass
1688
+
1689
+ if val == True:
1690
+
1691
+ if val_count == None:
1692
+
1693
+ val_count = 0.1
1694
+
1695
+ v_iter = 0
1696
+
1697
+ if val == 'final':
1698
+
1699
+ val_count = 0.99
1700
+
1701
+ val_count = int(len(x_train) * val_count)
1702
+ val_count_copy = val_count
1703
+ val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
1704
+ val_list = [] * val_count
1705
+
1706
+ if show_count == None:
1707
+
1708
+ show_count = 10
1709
+
1710
+ if show_training == True or show_training == 'final':
1711
+
1712
+ row, col = shape_control(x_train)
1713
+
1714
+ class_count = set()
1715
+
1716
+ for sublist in y_train:
1717
+
1718
+ class_count.add(tuple(sublist))
1719
+
1720
+ class_count = list(class_count)
1721
+
1722
+ y_train = [tuple(sublist) for sublist in y_train]
1723
+
1724
+ neurons = [len(class_count), len(class_count)]
1725
+ layers = ['fex']
1726
+
1727
+ x_train[0] = np.array(x_train[0])
1728
+ x_train[0] = x_train[0].ravel()
1729
+ x_train_size = len(x_train[0])
1730
+
1731
+ STPW = weight_identification(
1732
+ len(layers) - 1, len(class_count), neurons, x_train_size) # STPW = SHORT TIME POTENTIATION WEIGHT
1733
+
1734
+ LTPW = [1] * len(STPW) # LTPW = LONG TIME POTENTIATION WEIGHT
1735
+
1736
+ y = decode_one_hot(y_train)
1737
+
1738
+ train_progress = tqdm(total=len(x_train),leave=False, desc="Training",ncols= 120)
1739
+
1740
+ for index, inp in enumerate(x_train):
1741
+
1742
+ progress = index / len(x_train) * 100
1743
+
1744
+ inp = np.array(inp)
1745
+ inp = inp.ravel()
1746
+
1747
+ if x_train_size != len(inp):
1748
+ print(Fore.RED + "ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",
1749
+ infoPLAN + Style.RESET_ALL)
1750
+ return 'e'
1751
+
1752
+ neural_layer = inp
1753
+
1754
+ for Lindex, Layer in enumerate(layers):
1755
+
1756
+ neural_layer = normalization(neural_layer)
1757
+
1758
+ if Layer == 'fex':
1759
+ STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation)
1760
+
1761
+ for i, w in enumerate(STPW):
1762
+ LTPW[i] = LTPW[i] + w
1763
+
1764
+
1765
+ if val == True and index == val_count:
1766
+
1767
+
1768
+ val_count += val_count_copy
1769
+
1770
+ validation_model = evaluate(x_val, y_val, LTPW, activation_potentiation, None)
1771
+
1772
+ val_acc = validation_model[get_acc()]
1773
+
1774
+ val_list.append(val_acc)
1775
+
1776
+ if v_iter == 0:
1777
+
1778
+ val_bar.update(val_acc)
1779
+
1780
+
1781
+ if v_iter != 0:
1782
+
1783
+ val_acc = val_acc - val_list[v_iter - 1]
1784
+ val_bar.update(val_acc)
1785
+
1786
+ v_iter += 1
1787
+
1788
+ if show_training == True:
1789
+
1790
+ if index %show_count == 0:
1791
+
1792
+
1793
+ if index != 0:
1794
+ plt.close(fig)
1795
+
1796
+ if row != 0:
1797
+
1798
+ fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
1799
+
1800
+ else:
1801
+
1802
+ fig, ax = plt.subplots(1, 1, figsize=(18, 14))
1803
+
1804
+ for j in range(len(class_count)):
1805
+
1806
+
1807
+ if row != 0:
1808
+
1809
+ mat = LTPW[0][j,:].reshape(row, col)
1810
+ suptitle_info = 'Neurons Learning Progress: % '
1811
+ title_info = f'{j+1}. Neuron'
1812
+
1813
+ mat = LTPW[0][j,:].reshape(row, col)
1814
+
1815
+ ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
1816
+
1817
+ ax[j].set_aspect('equal')
1818
+
1819
+ ax[j].set_xticks([])
1820
+ ax[j].set_yticks([])
1821
+ ax[j].set_title(title_info)
1822
+
1823
+ else:
1824
+
1825
+ mat = LTPW[0]
1826
+ ax.imshow(mat, interpolation='sinc', cmap='viridis')
1827
+ suptitle_info = 'Weight Learning Progress: % '
1828
+ title_info = 'Weight Matrix Of Fex Layer'
1829
+
1830
+
1831
+
1832
+
1833
+ progress_status = f"{progress:.1f}"
1834
+ fig.suptitle(suptitle_info + progress_status)
1835
+ plt.draw()
1836
+ plt.pause(0.1)
1837
+
1838
+
1839
+ STPW = weight_identification(
1840
+ len(layers) - 1, len(class_count), neurons, x_train_size)
1841
+
1842
+ train_progress.update(1)
1843
+
1844
+ if show_training == 'final':
1845
+
1846
+ fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
1847
+
1848
+ for j in range(len(class_count)):
1849
+
1850
+ mat = LTPW[0][j,:].reshape(row, col)
1851
+
1852
+ ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
1853
+ ax[j].set_aspect('equal')
1854
+
1855
+ ax[j].set_xticks([])
1856
+ ax[j].set_yticks([])
1857
+ ax[j].set_title(f'{j+1}. Neuron')
1858
+
1859
+ progress_status = f"{progress:.1f}"
1860
+ fig.suptitle('Neurons Learning Progress: % ' + progress_status)
1861
+ plt.draw()
1862
+ plt.pause(0.1)
1863
+
1864
+
1865
+ if val == 'final':
1866
+
1867
+ validation_model = evaluate(x_val, y_val, None, LTPW, activation_potentiation, None)
1868
+
1869
+ val_acc = validation_model[get_acc()]
1870
+
1871
+ val_list.append(val_acc)
1872
+
1873
+ val_bar.update(val_acc)
1874
+
1875
+ return LTPW
1876
+
1877
+ # FUNCTIONS -----
1878
+
1879
+ def shape_control(x_train):
1880
+
1881
+ try:
1882
+ row = x_train[1].shape[0]
1883
+ col = x_train[1].shape[1]
1884
+
1885
+ except:
1886
+
1887
+ print(Fore.MAGENTA + 'WARNING: You trying show_training but inputs is raveled. x_train inputs should be reshaped for show_training.' + Style.RESET_ALL)
1888
+
1889
+ try:
1890
+ row, col = find_numbers(len(x_train[0]))
1891
+
1892
+ except:
1893
+
1894
+ print(Fore.MAGENTA + 'WARNING: Input length cannot be reshaped. Neurons learning progression cannot be draw, weight learning progress drwaing started.' + Style.RESET_ALL)
1895
+ return [0, 0]
1896
+
1897
+ return row, col
1898
+
1899
+ def find_numbers(n):
1900
+ if n <= 1:
1901
+ raise ValueError("Parameter 'n' must be greater than 1.")
1902
+
1903
+ for i in range(2, int(n**0.5) + 1):
1904
+ if n % i == 0:
1905
+ factor1 = i
1906
+ factor2 = n // i
1907
+ if factor1 == factor2:
1908
+ return factor1, factor2
1909
+
1910
+ return None
1911
+
1912
+ def weight_normalization(
1913
+ W,
1914
+ class_count
1915
+ ) -> str:
1916
+ """
1917
+ Row(Neuron) based normalization. For unbalanced models.
1918
+
1919
+ Args:
1920
+ W (list(num)): Trained weight matrix list.
1921
+ class_count (int): Class count of model.
1922
+
1923
+ Returns:
1924
+ list([numpy_arrays],[...]): posttrained weight matices of the model. .
1925
+ """
1926
+
1927
+ for i in range(class_count):
1928
+
1929
+ W[0][i,:] = normalization(W[0][i,:])
1930
+
1931
+ return W
1932
+
1933
+ def weight_identification(
1934
+ layer_count, # int: Number of layers in the neural network.
1935
+ class_count, # int: Number of classes in the classification task.
1936
+ neurons, # list[num]: List of neuron counts for each layer.
1937
+ x_train_size # int: Size of the input data.
1938
+ ) -> str:
1939
+ """
1940
+ Identifies the weights for a neural network model.
1941
+
1942
+ Args:
1943
+ layer_count (int): Number of layers in the neural network.
1944
+ class_count (int): Number of classes in the classification task.
1945
+ neurons (list[num]): List of neuron counts for each layer.
1946
+ x_train_size (int): Size of the input data.
1947
+
1948
+ Returns:
1949
+ list([numpy_arrays],[...]): pretrained weight matices of the model. .
1950
+ """
1951
+
1952
+ Wlen = layer_count + 1
1953
+ W = [None] * Wlen
1954
+ W[0] = np.ones((neurons[0], x_train_size))
1955
+ ws = layer_count - 1
1956
+ for w in range(ws):
1957
+ W[w + 1] = np.ones((neurons[w + 1], neurons[w]))
1958
+
1959
+ return W
1960
+
1961
+
1962
+ def fex(
1963
+ Input, # list[num]: Input data.
1964
+ w, # num: Weight matrix of the neural network.
1965
+ is_training, # bool: Flag indicating if the function is called during training (True or False).
1966
+ Class, # int: Which class is, if training.
1967
+ activation_potentiation # float or None: Input activation potentiation (optional)
1968
+ ) -> tuple:
1969
+ """
1970
+ Applies feature extraction process to the input data using synaptic potentiation.
1971
+
1972
+ Args:
1973
+ Input (num): Input data.
1974
+ w (num): Weight matrix of the neural network.
1975
+ is_training (bool): Flag indicating if the function is called during training (True or False).
1976
+ Class (int): if is during training then which class(label) ? is isnt then put None.
1977
+ activation_potentiation (float or None): Threshold value for comparison. (optional)
1978
+
1979
+ Returns:
1980
+ tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
1981
+ """
1982
+
1983
+ if is_training == True and activation_potentiation == None:
1984
+
1985
+ w[Class, :] = Input
1986
+
1987
+ return w
1988
+
1989
+ elif is_training == True and activation_potentiation != None:
1990
+
1991
+
1992
+ Input[Input < activation_potentiation] = 0
1993
+ Input[Input > activation_potentiation] = 1
1994
+
1995
+ w[Class,:] = Input
1996
+
1997
+ return w
1998
+
1999
+ elif is_training == False and activation_potentiation == None:
2000
+
2001
+ neural_layer = np.dot(w, Input)
2002
+
2003
+ return neural_layer
2004
+
2005
+ elif is_training == False and activation_potentiation != None:
2006
+
2007
+ Input[Input < activation_potentiation] = 0
2008
+ Input[Input > activation_potentiation] = 1
2009
+
2010
+ neural_layer = np.dot(w, Input)
2011
+
2012
+ return neural_layer
2013
+
2014
+
2015
+
2016
+ def normalization(
2017
+ Input # num: Input data to be normalized.
2018
+ ):
2019
+ """
2020
+ Normalizes the input data using maximum absolute scaling.
2021
+
2022
+ Args:
2023
+ Input (num): Input data to be normalized.
2024
+
2025
+ Returns:
2026
+ (num) Scaled input data after normalization.
2027
+ """
2028
+
2029
+ AbsVector = np.abs(Input)
2030
+
2031
+ MaxAbs = np.max(AbsVector)
2032
+
2033
+ ScaledInput = Input / MaxAbs
2034
+
2035
+ return ScaledInput
2036
+
2037
+
2038
+ def Softmax(
2039
+ x # num: Input data to be transformed using softmax function.
2040
+ ):
2041
+ """
2042
+ Applies the softmax function to the input data.
2043
+
2044
+ Args:
2045
+ (num): Input data to be transformed using softmax function.
2046
+
2047
+ Returns:
2048
+ (num): Transformed data after applying softmax function.
2049
+ """
2050
+
2051
+ return softmax(x)
2052
+
2053
+
2054
+ def Sigmoid(
2055
+ x # num: Input data to be transformed using sigmoid function.
2056
+ ):
2057
+ """
2058
+ Applies the sigmoid function to the input data.
2059
+
2060
+ Args:
2061
+ (num): Input data to be transformed using sigmoid function.
2062
+
2063
+ Returns:
2064
+ (num): Transformed data after applying sigmoid function.
2065
+ """
2066
+ return expit(x)
2067
+
2068
+
2069
+ def Relu(
2070
+ x # num: Input data to be transformed using ReLU function.
2071
+ ):
2072
+ """
2073
+ Applies the Rectified Linear Unit (ReLU) function to the input data.
2074
+
2075
+ Args:
2076
+ (num): Input data to be transformed using ReLU function.
2077
+
2078
+ Returns:
2079
+ (num): Transformed data after applying ReLU function.
2080
+ """
2081
+
2082
+ return np.maximum(0, x)
2083
+
2084
+
2085
+ def evaluate(
2086
+ x_test, # list[num]: Test input data.
2087
+ y_test, # list[num]: Test labels.
2088
+ W, # list[num]: Weight matrix list of the neural network.
2089
+ activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
2090
+ acc_bar_status=True, # acc_bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
2091
+ show_metrices=None # show_metrices (bool): (True or None) (optional) Default: None
2092
+ ) -> tuple:
2093
+ infoTestModel = """
2094
+ Tests the neural network model with the given test data.
2095
+
2096
+ Args:
2097
+ x_test (list[num]): Test input data.
2098
+ y_test (list[num]): Test labels.
2099
+ W (list[num]): Weight matrix list of the neural network.
2100
+ activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
2101
+ acc_bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
2102
+ show_metrices (bool): (True or None) (optional) Default: None
2103
+
2104
+ Returns:
2105
+ tuple: A tuple containing the predicted labels and the accuracy of the model.
2106
+ """
2107
+
2108
+ layers = ['fex']
2109
+
2110
+ try:
2111
+ Wc = [0] * len(W) # Wc = Weight copy
2112
+ true = 0
2113
+ y_preds = [-1] * len(y_test)
2114
+ acc_list = []
2115
+
2116
+ for i, w in enumerate(W):
2117
+ Wc[i] = np.copy(w)
2118
+
2119
+
2120
+ if acc_bar_status == True:
2121
+
2122
+ test_progress = tqdm(total=len(x_test),leave=False, desc='Testing',ncols=120)
2123
+ acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=120)
2124
+
2125
+
2126
+ for inpIndex, Input in enumerate(x_test):
2127
+ Input = np.array(Input)
2128
+ Input = Input.ravel()
2129
+ neural_layer = Input
2130
+
2131
+ for index, Layer in enumerate(layers):
2132
+
2133
+ neural_layer = normalization(neural_layer)
2134
+
2135
+ if Layer == 'fex':
2136
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
2137
+
2138
+
2139
+ for i, w in enumerate(Wc):
2140
+ W[i] = np.copy(w)
2141
+ RealOutput = np.argmax(y_test[inpIndex])
2142
+ PredictedOutput = np.argmax(neural_layer)
2143
+ if RealOutput == PredictedOutput:
2144
+ true += 1
2145
+ acc = true / len(y_test)
2146
+
2147
+
2148
+ acc_list.append(acc)
2149
+ y_preds[inpIndex] = PredictedOutput
2150
+
2151
+ if acc_bar_status == True:
2152
+ test_progress.update(1)
2153
+ if inpIndex == 0:
2154
+ acc_bar.update(acc)
2155
+
2156
+ else:
2157
+ acc = acc - acc_list[inpIndex - 1]
2158
+ acc_bar.update(acc)
2159
+
2160
+ if show_metrices == True:
2161
+ plot_evaluate(y_test, y_preds, acc_list)
2162
+
2163
+
2164
+ for i, w in enumerate(Wc):
2165
+ W[i] = np.copy(w)
2166
+
2167
+ except:
2168
+
2169
+ print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" +
2170
+ infoTestModel + Style.RESET_ALL)
2171
+ return 'e'
2172
+
2173
+ return W, y_preds, acc
2174
+
2175
+
2176
+ def multiple_evaluate(
2177
+ x_test, # list[num]: Test input data.
2178
+ y_test, # list[num]: Test labels.
2179
+ show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
2180
+ MW, # list[list[num]]: Weight matrix of the neural network.
2181
+ activation_potentiation=None # (float or None): Threshold value for comparison. (optional)
2182
+ ) -> tuple:
2183
+ infoTestModel = """
2184
+ Tests the neural network model with the given test data.
2185
+
2186
+ Args:
2187
+ x_test (list[num]): Test input data.
2188
+ y_test (list[num]): Test labels.
2189
+ show_metrices (bool): (True or False)
2190
+ MW (list(list[num])): Multiple Weight matrix list of the neural network. (Multiple model testing)
2191
+
2192
+ Returns:
2193
+ tuple: A tuple containing the predicted labels and the accuracy of the model.
2194
+ """
2195
+
2196
+ layers = ['fex', 'cat']
2197
+
2198
+ try:
2199
+ y_preds = [-1] * len(y_test)
2200
+ acc_list = []
2201
+ print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
2202
+ start_time = time.time()
2203
+ true = 0
2204
+ for inpIndex, Input in enumerate(x_test):
2205
+
2206
+ output_layer = 0
2207
+
2208
+ for m, Model in enumerate(MW):
2209
+
2210
+ W = Model
2211
+
2212
+ Wc = [0] * len(W) # Wc = weight copy
2213
+
2214
+ y_preds = [None] * len(y_test)
2215
+ for i, w in enumerate(W):
2216
+ Wc[i] = np.copy(w)
2217
+
2218
+ Input = np.array(Input)
2219
+ Input = Input.ravel()
2220
+ uni_start_time = time.time()
2221
+ neural_layer = Input
2222
+
2223
+ for index, Layer in enumerate(layers):
2224
+
2225
+ neural_layer = normalization(neural_layer)
2226
+
2227
+ if Layer == 'fex':
2228
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
2229
+
2230
+ output_layer += neural_layer
2231
+
2232
+ for i, w in enumerate(Wc):
2233
+ W[i] = np.copy(w)
2234
+ for i, w in enumerate(Wc):
2235
+ W[i] = np.copy(w)
2236
+ RealOutput = np.argmax(y_test[inpIndex])
2237
+ PredictedOutput = np.argmax(output_layer)
2238
+ if RealOutput == PredictedOutput:
2239
+ true += 1
2240
+ acc = true / len(y_test)
2241
+ if show_metrices == True:
2242
+ acc_list.append(acc)
2243
+ y_preds[inpIndex] = PredictedOutput
2244
+
2245
+
2246
+ uni_end_time = time.time()
2247
+
2248
+ calculating_est = round(
2249
+ (uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
2250
+
2251
+ if calculating_est < 60:
2252
+ print('\rest......(sec):', calculating_est, '\n', end="")
2253
+ print('\rTest accuracy: ', acc, "\n", end="")
2254
+
2255
+ elif calculating_est > 60 and calculating_est < 3600:
2256
+ print('\rest......(min):', calculating_est/60, '\n', end="")
2257
+ print('\rTest accuracy: ', acc, "\n", end="")
2258
+
2259
+ elif calculating_est > 3600:
2260
+ print('\rest......(h):', calculating_est/3600, '\n', end="")
2261
+ print('\rTest accuracy: ', acc, "\n", end="")
2262
+ if show_metrices == True:
2263
+ plot_evaluate(y_test, y_preds, acc_list)
2264
+
2265
+ EndTime = time.time()
2266
+ for i, w in enumerate(Wc):
2267
+ W[i] = np.copy(w)
2268
+
2269
+ calculating_est = round(EndTime - start_time, 2)
2270
+
2271
+ print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
2272
+
2273
+ if calculating_est < 60:
2274
+ print('Total testing time(sec): ', calculating_est)
2275
+
2276
+ elif calculating_est > 60 and calculating_est < 3600:
2277
+ print('Total testing time(min): ', calculating_est/60)
2278
+
2279
+ elif calculating_est > 3600:
2280
+ print('Total testing time(h): ', calculating_est/3600)
2281
+
2282
+ if acc >= 0.8:
2283
+ print(Fore.GREEN + '\nTotal Test accuracy: ',
2284
+ acc, '\n' + Style.RESET_ALL)
2285
+
2286
+ elif acc < 0.8 and acc > 0.6:
2287
+ print(Fore.MAGENTA + '\nTotal Test accuracy: ',
2288
+ acc, '\n' + Style.RESET_ALL)
2289
+
2290
+ elif acc <= 0.6:
2291
+ print(Fore.RED + '\nTotal Test accuracy: ',
2292
+ acc, '\n' + Style.RESET_ALL)
2293
+
2294
+ except:
2295
+
2296
+ print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
2297
+ return 'e'
2298
+
2299
+ return W, y_preds, acc
2300
+
2301
+
2302
+ def save_model(model_name,
2303
+ model_type,
2304
+ class_count,
2305
+ test_acc,
2306
+ weights_type,
2307
+ weights_format,
2308
+ model_path,
2309
+ scaler_params,
2310
+ W,
2311
+ activation_potentiation=None
2312
+ ):
2313
+
2314
+ infosave_model = """
2315
+ Function to save a potentiation learning model.
2316
+
2317
+ Arguments:
2318
+ model_name (str): Name of the model.
2319
+ model_type (str): Type of the model.(options: PLAN)
2320
+ class_count (int): Number of classes.
2321
+ test_acc (float): Test accuracy of the model.
2322
+ weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
2323
+ WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
2324
+ model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
2325
+ scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
2326
+ W: Weights of the model.
2327
+ activation_potentiation (float or None): Threshold value for comparison. (optional)
2328
+
2329
+ Returns:
2330
+ str: Message indicating if the model was saved successfully or encountered an error.
2331
+ """
2332
+
2333
+ # Operations to be performed by the function will be written here
2334
+ pass
2335
+
2336
+ layers = ['fex']
2337
+
2338
+ if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
2339
+ print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
2340
+ infosave_model + Style.RESET_ALL)
2341
+ return 'e'
2342
+
2343
+ if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
2344
+ print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" +
2345
+ infosave_model + Style.RESET_ALL)
2346
+ return 'e'
2347
+
2348
+ NeuronCount = 0
2349
+ SynapseCount = 0
2350
+
2351
+ try:
2352
+ for w in W:
2353
+ NeuronCount += np.shape(w)[0]
2354
+ SynapseCount += np.shape(w)[0] * np.shape(w)[1]
2355
+ except:
2356
+
2357
+ print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
2358
+ infosave_model + Style.RESET_ALL)
2359
+ return 'e'
2360
+ import pandas as pd
2361
+ from datetime import datetime
2362
+ from scipy import io
2363
+
2364
+ data = {'MODEL NAME': model_name,
2365
+ 'MODEL TYPE': model_type,
2366
+ 'LAYERS': layers,
2367
+ 'LAYER COUNT': len(layers),
2368
+ 'CLASS COUNT': class_count,
2369
+ 'NEURON COUNT': NeuronCount,
2370
+ 'SYNAPSE COUNT': SynapseCount,
2371
+ 'TEST ACCURACY': test_acc,
2372
+ 'SAVE DATE': datetime.now(),
2373
+ 'WEIGHTS TYPE': weights_type,
2374
+ 'WEIGHTS FORMAT': weights_format,
2375
+ 'MODEL PATH': model_path,
2376
+ 'STANDARD SCALER': scaler_params,
2377
+ 'ACTIVATION POTENTIATION': activation_potentiation
2378
+ }
2379
+ try:
2380
+
2381
+ df = pd.DataFrame(data)
2382
+
2383
+ df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
2384
+
2385
+ except:
2386
+
2387
+ print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
2388
+ infosave_model + Style.RESET_ALL)
2389
+ return 'e'
2390
+ try:
2391
+
2392
+ if weights_type == 'txt' and weights_format == 'd':
2393
+
2394
+ for i, w in enumerate(W):
2395
+ np.savetxt(model_path + model_name +
2396
+ str(i+1) + 'w.txt', w, fmt='%d')
2397
+
2398
+ if weights_type == 'txt' and weights_format == 'f':
2399
+
2400
+ for i, w in enumerate(W):
2401
+ np.savetxt(model_path + model_name +
2402
+ str(i+1) + 'w.txt', w, fmt='%f')
2403
+
2404
+ if weights_type == 'txt' and weights_format == 'raw':
2405
+
2406
+ for i, w in enumerate(W):
2407
+ np.savetxt(model_path + model_name + str(i+1) + 'w.txt', w)
2408
+
2409
+ ###
2410
+
2411
+ if weights_type == 'npy' and weights_format == 'd':
2412
+
2413
+ for i, w in enumerate(W):
2414
+ np.save(model_path + model_name +
2415
+ str(i+1) + 'w.npy', w.astype(int))
2416
+
2417
+ if weights_type == 'npy' and weights_format == 'f':
2418
+
2419
+ for i, w in enumerate(W):
2420
+ np.save(model_path + model_name + str(i+1) +
2421
+ 'w.npy', w, w.astype(float))
2422
+
2423
+ if weights_type == 'npy' and weights_format == 'raw':
2424
+
2425
+ for i, w in enumerate(W):
2426
+ np.save(model_path + model_name + str(i+1) + 'w.npy', w)
2427
+
2428
+ ###
2429
+
2430
+ if weights_type == 'mat' and weights_format == 'd':
2431
+
2432
+ for i, w in enumerate(W):
2433
+ w = {'w': w.astype(int)}
2434
+ io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
2435
+
2436
+ if weights_type == 'mat' and weights_format == 'f':
2437
+
2438
+ for i, w in enumerate(W):
2439
+ w = {'w': w.astype(float)}
2440
+ io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
2441
+
2442
+ if weights_type == 'mat' and weights_format == 'raw':
2443
+
2444
+ for i, w in enumerate(W):
2445
+ w = {'w': w}
2446
+ io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
2447
+
2448
+ except:
2449
+
2450
+ print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
2451
+ return 'e'
2452
+ print(df)
2453
+ message = (
2454
+ Fore.GREEN + "Model Saved Successfully\n" +
2455
+ Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
2456
+ Style.RESET_ALL
2457
+ )
2458
+
2459
+ return print(message)
2460
+
2461
+
2462
+ def load_model(model_name,
2463
+ model_path,
2464
+ ):
2465
+ infoload_model = """
2466
+ Function to load a potentiation learning model.
2467
+
2468
+ Arguments:
2469
+ model_name (str): Name of the model.
2470
+ model_path (str): Path where the model is saved.
2471
+
2472
+ Returns:
2473
+ lists: W(list[num]), activation_potentiation, DataFrame of the model
2474
+ """
2475
+ pass
2476
+
2477
+ import scipy.io as sio
2478
+
2479
+ try:
2480
+
2481
+ df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
2482
+
2483
+ except:
2484
+
2485
+ print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" +
2486
+ infoload_model + Style.RESET_ALL)
2487
+
2488
+ model_name = str(df['MODEL NAME'].iloc[0])
2489
+ layer_count = int(df['LAYER COUNT'].iloc[0])
2490
+ WeightType = str(df['WEIGHTS TYPE'].iloc[0])
2491
+
2492
+ W = [0] * layer_count
2493
+
2494
+ if WeightType == 'txt':
2495
+ for i in range(layer_count):
2496
+ W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
2497
+ elif WeightType == 'npy':
2498
+ for i in range(layer_count):
2499
+ W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
2500
+ elif WeightType == 'mat':
2501
+ for i in range(layer_count):
2502
+ W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
2503
+ else:
2504
+ raise ValueError(
2505
+ Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
2506
+ print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
2507
+ return W, df
2508
+
2509
+
2510
+ def predict_model_ssd(Input, model_name, model_path):
2511
+
2512
+ infopredict_model_ssd = """
2513
+ Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
2514
+
2515
+ Arguments:
2516
+ Input (list or ndarray): Input data for the model (single vector or single matrix).
2517
+ model_name (str): Name of the model.
2518
+ Returns:
2519
+ ndarray: Output from the model.
2520
+ """
2521
+ W, df = load_model(model_name, model_path)
2522
+
2523
+ activation_potentiation = str(df['ACTIVATION POTENTIATION'].iloc[0])
2524
+
2525
+ if activation_potentiation != 'nan':
2526
+
2527
+ activation_potentiation = float(activation_potentiation)
2528
+
2529
+ else:
2530
+
2531
+ activation_potentiation = None
2532
+
2533
+ try:
2534
+
2535
+ scaler_params = df['STANDARD SCALER'].tolist()
2536
+
2537
+
2538
+ scaler_params = [np.fromstring(arr.strip('[]'), sep=' ') for arr in scaler_params]
2539
+
2540
+ Input = standard_scaler(None, Input, scaler_params)
2541
+
2542
+ except:
2543
+
2544
+ pass
2545
+
2546
+
2547
+ layers = ['fex']
2548
+
2549
+ Wc = [0] * len(W)
2550
+ for i, w in enumerate(W):
2551
+ Wc[i] = np.copy(w)
2552
+ try:
2553
+ neural_layer = Input
2554
+ neural_layer = np.array(neural_layer)
2555
+ neural_layer = neural_layer.ravel()
2556
+ for index, Layer in enumerate(layers):
2557
+
2558
+ neural_layer = normalization(neural_layer)
2559
+
2560
+ if Layer == 'fex':
2561
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
2562
+ elif Layer == 'cat':
2563
+ neural_layer = np.dot(W[index], neural_layer)
2564
+ except:
2565
+ print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
2566
+ infopredict_model_ssd + Style.RESET_ALL)
2567
+ return 'e'
2568
+ for i, w in enumerate(Wc):
2569
+ W[i] = np.copy(w)
2570
+ return neural_layer
2571
+
2572
+
2573
+ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=None):
2574
+
2575
+ infopredict_model_ram = """
2576
+ Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
2577
+ from weights and parameters stored in memory.
2578
+
2579
+ Arguments:
2580
+ Input (list or ndarray): Input data for the model (single vector or single matrix).
2581
+ W (list of ndarrays): Weights of the model.
2582
+ scaler_params (int, float): standard scaler params list: mean,std. (optional) Default: None.
2583
+ activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
2584
+
2585
+ Returns:
2586
+ ndarray: Output from the model.
2587
+ """
2588
+ try:
2589
+ if scaler_params != None:
2590
+
2591
+ Input = standard_scaler(None, Input, scaler_params)
2592
+ except:
2593
+ Input = standard_scaler(None, Input, scaler_params)
2594
+
2595
+ layers = ['fex']
2596
+
2597
+ Wc = [0] * len(W)
2598
+ for i, w in enumerate(W):
2599
+ Wc[i] = np.copy(w)
2600
+ try:
2601
+ neural_layer = Input
2602
+ neural_layer = np.array(neural_layer)
2603
+ neural_layer = neural_layer.ravel()
2604
+ for index, Layer in enumerate(layers):
2605
+
2606
+ neural_layer = normalization(neural_layer)
2607
+
2608
+ if Layer == 'fex':
2609
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
2610
+ elif Layer == 'cat':
2611
+ neural_layer = np.dot(W[index], neural_layer)
2612
+
2613
+ except:
2614
+ print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
2615
+ infopredict_model_ram + Style.RESET_ALL)
2616
+ return 'e'
2617
+ for i, w in enumerate(Wc):
2618
+ W[i] = np.copy(w)
2619
+ return neural_layer
2620
+
2621
+
2622
+ def auto_balancer(x_train, y_train):
2623
+
2624
+ infoauto_balancer = """
2625
+ Function to balance the training data across different classes.
2626
+
2627
+ Arguments:
2628
+ x_train (list): Input data for training.
2629
+ y_train (list): Labels corresponding to the input data.
2630
+
2631
+ Returns:
2632
+ tuple: A tuple containing balanced input data and labels.
2633
+ """
2634
+ classes = np.arange(y_train.shape[1])
2635
+ class_count = len(classes)
2636
+
2637
+ try:
2638
+ ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[
2639
+ 0] for i in range(class_count)}
2640
+ classes = [len(ClassIndices[i]) for i in range(class_count)]
2641
+
2642
+ if len(set(classes)) == 1:
2643
+ print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
2644
+ return x_train, y_train
2645
+
2646
+ MinCount = min(classes)
2647
+
2648
+ BalancedIndices = []
2649
+ for i in tqdm(range(class_count),leave=False,desc='Balancing Data',ncols=120):
2650
+ if len(ClassIndices[i]) > MinCount:
2651
+ SelectedIndices = np.random.choice(
2652
+ ClassIndices[i], MinCount, replace=False)
2653
+ else:
2654
+ SelectedIndices = ClassIndices[i]
2655
+ BalancedIndices.extend(SelectedIndices)
2656
+
2657
+ BalancedInputs = [x_train[idx] for idx in BalancedIndices]
2658
+ BalancedLabels = [y_train[idx] for idx in BalancedIndices]
2659
+
2660
+ print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)
2661
+ ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
2662
+ except:
2663
+ print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
2664
+ return 'e'
2665
+
2666
+ return np.array(BalancedInputs), np.array(BalancedLabels)
2667
+
2668
+
2669
+ def synthetic_augmentation(x_train, y_train):
2670
+ """
2671
+ Generates synthetic examples to balance classes with fewer examples.
2672
+
2673
+ Arguments:
2674
+ x -- Input dataset (examples) - list format
2675
+ y -- Class labels (one-hot encoded) - list format
2676
+
2677
+ Returns:
2678
+ x_balanced -- Balanced input dataset (list format)
2679
+ y_balanced -- Balanced class labels (one-hot encoded, list format)
2680
+ """
2681
+ x = x_train
2682
+ y = y_train
2683
+ classes = np.arange(y_train.shape[1])
2684
+ class_count = len(classes)
2685
+
2686
+ # Calculate class distribution
2687
+ class_distribution = {i: 0 for i in range(class_count)}
2688
+ for label in y:
2689
+ class_distribution[np.argmax(label)] += 1
2690
+
2691
+ max_class_count = max(class_distribution.values())
2692
+
2693
+ x_balanced = list(x)
2694
+ y_balanced = list(y)
2695
+
2696
+ for class_label in tqdm(range(class_count), leave=False, desc='Augmenting Data',ncols= 120):
2697
+ class_indices = [i for i, label in enumerate(
2698
+ y) if np.argmax(label) == class_label]
2699
+ num_samples = len(class_indices)
2700
+
2701
+ if num_samples < max_class_count:
2702
+ while num_samples < max_class_count:
2703
+
2704
+ random_indices = np.random.choice(
2705
+ class_indices, 2, replace=False)
2706
+ sample1 = x[random_indices[0]]
2707
+ sample2 = x[random_indices[1]]
2708
+
2709
+ synthetic_sample = sample1 + \
2710
+ (np.array(sample2) - np.array(sample1)) * np.random.rand()
2711
+
2712
+ x_balanced.append(synthetic_sample.tolist())
2713
+ y_balanced.append(y[class_indices[0]])
2714
+
2715
+ num_samples += 1
2716
+
2717
+ return np.array(x_balanced), np.array(y_balanced)
2718
+
2719
+
2720
+ def standard_scaler(x_train, x_test, scaler_params=None):
2721
+ info_standard_scaler = """
2722
+ Standardizes training and test datasets. x_test may be None.
2723
+
2724
+ Args:
2725
+ train_data: numpy.ndarray
2726
+ Training data
2727
+ test_data: numpy.ndarray
2728
+ Test data
2729
+
2730
+ Returns:
2731
+ list:
2732
+ Scaler parameters: mean and std
2733
+ tuple
2734
+ Standardized training and test datasets
2735
+ """
2736
+ try:
2737
+
2738
+ x_train = x_train.tolist()
2739
+ x_test = x_test.tolist()
2740
+
2741
+ except:
2742
+
2743
+ pass
2744
+
2745
+ try:
2746
+
2747
+ if scaler_params == None and x_test != None:
2748
+
2749
+ mean = np.mean(x_train, axis=0)
2750
+ std = np.std(x_train, axis=0)
2751
+ train_data_scaled = (x_train - mean) / std
2752
+ test_data_scaled = (x_test - mean) / std
2753
+
2754
+ train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
2755
+ test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
2756
+
2757
+ scaler_params = [mean, std]
2758
+
2759
+ return scaler_params, train_data_scaled, test_data_scaled
2760
+
2761
+ if scaler_params == None and x_test == None:
2762
+
2763
+ mean = np.mean(x_train, axis=0)
2764
+ std = np.std(x_train, axis=0)
2765
+ train_data_scaled = (x_train - mean) / std
2766
+
2767
+ train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
2768
+
2769
+ scaler_params = [mean, std]
2770
+
2771
+ return scaler_params, train_data_scaled
2772
+
2773
+ if scaler_params != None:
2774
+
2775
+ test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
2776
+ test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
2777
+
2778
+ return test_data_scaled
2779
+
2780
+ except:
2781
+ print(
2782
+ Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
2783
+
2784
+
2785
+ def encode_one_hot(y_train, y_test):
2786
+ info_one_hot_encode = """
2787
+ Performs one-hot encoding on y_train and y_test data..
2788
+
2789
+ Args:
2790
+ y_train (numpy.ndarray): Eğitim etiketi verisi.
2791
+ y_test (numpy.ndarray): Test etiketi verisi.
2792
+
2793
+ Returns:
2794
+ tuple: One-hot encoded y_train ve y_test verileri.
2795
+ """
2796
+
2797
+ classes = np.unique(y_train)
2798
+ class_count = len(classes)
2799
+
2800
+ class_to_index = {cls: idx for idx, cls in enumerate(classes)}
2801
+
2802
+ y_train_encoded = np.zeros((y_train.shape[0], class_count))
2803
+ for i, label in enumerate(y_train):
2804
+ y_train_encoded[i, class_to_index[label]] = 1
2805
+
2806
+ y_test_encoded = np.zeros((y_test.shape[0], class_count))
2807
+ for i, label in enumerate(y_test):
2808
+ y_test_encoded[i, class_to_index[label]] = 1
2809
+
2810
+
2811
+ return y_train_encoded, y_test_encoded
2812
+
2813
+
2814
+ def split(X, y, test_size, random_state):
2815
+ """
2816
+ Splits the given X (features) and y (labels) data into training and testing subsets.
2817
+
2818
+ Args:
2819
+ X (numpy.ndarray): Features data.
2820
+ y (numpy.ndarray): Labels data.
2821
+ test_size (float or int): Proportion or number of samples for the test subset.
2822
+ random_state (int or None): Seed for random state.
2823
+
2824
+ Returns:
2825
+ tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
2826
+ """
2827
+ num_samples = X.shape[0]
2828
+
2829
+ if isinstance(test_size, float):
2830
+ test_size = int(test_size * num_samples)
2831
+ elif isinstance(test_size, int):
2832
+ if test_size > num_samples:
2833
+ raise ValueError(
2834
+ "test_size cannot be larger than the number of samples.")
2835
+ else:
2836
+ raise ValueError("test_size should be float or int.")
2837
+
2838
+ if random_state is not None:
2839
+ np.random.seed(random_state)
2840
+
2841
+ indices = np.arange(num_samples)
2842
+ np.random.shuffle(indices)
2843
+
2844
+ test_indices = indices[:test_size]
2845
+ train_indices = indices[test_size:]
2846
+
2847
+ x_train, x_test = X[train_indices], X[test_indices]
2848
+ y_train, y_test = y[train_indices], y[test_indices]
2849
+
2850
+ return x_train, x_test, y_train, y_test
2851
+
2852
+
2853
+ def metrics(y_ts, test_preds, average='weighted'):
2854
+ """
2855
+ Calculates precision, recall and F1 score for a classification task.
2856
+
2857
+ Args:
2858
+ y_ts (list or numpy.ndarray): True labels.
2859
+ test_preds (list or numpy.ndarray): Predicted labels.
2860
+ average (str): Type of averaging ('micro', 'macro', 'weighted').
2861
+
2862
+ Returns:
2863
+ tuple: Precision, recall, F1 score.
2864
+ """
2865
+ y_test_d = decode_one_hot(y_ts)
2866
+ y_test_d = np.array(y_test_d)
2867
+ y_pred = np.array(test_preds)
2868
+
2869
+ if y_test_d.ndim > 1:
2870
+ y_test_d = y_test_d.reshape(-1)
2871
+ if y_pred.ndim > 1:
2872
+ y_pred = y_pred.reshape(-1)
2873
+
2874
+ tp = {}
2875
+ fp = {}
2876
+ fn = {}
2877
+
2878
+ classes = np.unique(np.concatenate((y_test_d, y_pred)))
2879
+
2880
+ for c in classes:
2881
+ tp[c] = 0
2882
+ fp[c] = 0
2883
+ fn[c] = 0
2884
+
2885
+ for c in classes:
2886
+ for true, pred in zip(y_test_d, y_pred):
2887
+ if true == c and pred == c:
2888
+ tp[c] += 1
2889
+ elif true != c and pred == c:
2890
+ fp[c] += 1
2891
+ elif true == c and pred != c:
2892
+ fn[c] += 1
2893
+
2894
+ precision = {}
2895
+ recall = {}
2896
+ f1 = {}
2897
+
2898
+ for c in classes:
2899
+ precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
2900
+ recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
2901
+ f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
2902
+
2903
+ if average == 'micro':
2904
+ precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
2905
+ recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
2906
+ f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
2907
+
2908
+ elif average == 'macro':
2909
+ precision_val = np.mean(list(precision.values()))
2910
+ recall_val = np.mean(list(recall.values()))
2911
+ f1_val = np.mean(list(f1.values()))
2912
+
2913
+ elif average == 'weighted':
2914
+ weights = np.array([np.sum(y_test_d == c) for c in classes])
2915
+ weights = weights / np.sum(weights)
2916
+ precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
2917
+ recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
2918
+ f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
2919
+
2920
+ else:
2921
+ raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
2922
+
2923
+ return precision_val, recall_val, f1_val
2924
+
2925
+
2926
+ def decode_one_hot(encoded_data):
2927
+ """
2928
+ Decodes one-hot encoded data to original categorical labels.
2929
+
2930
+ Args:
2931
+ encoded_data (numpy.ndarray): One-hot encoded data with shape (n_samples, n_classes).
2932
+
2933
+ Returns:
2934
+ numpy.ndarray: Decoded categorical labels with shape (n_samples,).
2935
+ """
2936
+
2937
+ decoded_labels = np.argmax(encoded_data, axis=1)
2938
+
2939
+ return decoded_labels
2940
+
2941
+
2942
+ def roc_curve(y_true, y_score):
2943
+ """
2944
+ Compute Receiver Operating Characteristic (ROC) curve.
2945
+
2946
+ Parameters:
2947
+ y_true : array, shape = [n_samples]
2948
+ True binary labels in range {0, 1} or {-1, 1}.
2949
+ y_score : array, shape = [n_samples]
2950
+ Target scores, can either be probability estimates of the positive class,
2951
+ confidence values, or non-thresholded measure of decisions (as returned
2952
+ by decision_function on some classifiers).
2953
+
2954
+ Returns:
2955
+ fpr : array, shape = [n]
2956
+ Increasing false positive rates such that element i is the false positive rate
2957
+ of predictions with score >= thresholds[i].
2958
+ tpr : array, shape = [n]
2959
+ Increasing true positive rates such that element i is the true positive rate
2960
+ of predictions with score >= thresholds[i].
2961
+ thresholds : array, shape = [n]
2962
+ Decreasing thresholds on the decision function used to compute fpr and tpr.
2963
+ """
2964
+
2965
+ y_true = np.asarray(y_true)
2966
+ y_score = np.asarray(y_score)
2967
+
2968
+ if len(np.unique(y_true)) != 2:
2969
+ raise ValueError("Only binary classification is supported.")
2970
+
2971
+
2972
+ desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
2973
+ y_score = y_score[desc_score_indices]
2974
+ y_true = y_true[desc_score_indices]
2975
+
2976
+
2977
+ fpr = []
2978
+ tpr = []
2979
+ thresholds = []
2980
+ n_pos = np.sum(y_true)
2981
+ n_neg = len(y_true) - n_pos
2982
+
2983
+ tp = 0
2984
+ fp = 0
2985
+ prev_score = None
2986
+
2987
+
2988
+ for i, score in enumerate(y_score):
2989
+ if score != prev_score:
2990
+ fpr.append(fp / n_neg)
2991
+ tpr.append(tp / n_pos)
2992
+ thresholds.append(score)
2993
+ prev_score = score
2994
+
2995
+ if y_true[i] == 1:
2996
+ tp += 1
2997
+ else:
2998
+ fp += 1
2999
+
3000
+ fpr.append(fp / n_neg)
3001
+ tpr.append(tp / n_pos)
3002
+ thresholds.append(score)
3003
+
3004
+ return np.array(fpr), np.array(tpr), np.array(thresholds)
3005
+
3006
+
3007
+ def confusion_matrix(y_true, y_pred, class_count):
3008
+ """
3009
+ Computes confusion matrix.
3010
+
3011
+ Args:
3012
+ y_true (numpy.ndarray): True class labels (1D array).
3013
+ y_pred (numpy.ndarray): Predicted class labels (1D array).
3014
+ num_classes (int): Number of classes.
3015
+
3016
+ Returns:
3017
+ numpy.ndarray: Confusion matrix of shape (num_classes, num_classes).
3018
+ """
3019
+ confusion = np.zeros((class_count, class_count), dtype=int)
3020
+
3021
+ for i in range(len(y_true)):
3022
+ true_label = y_true[i]
3023
+ pred_label = y_pred[i]
3024
+ confusion[true_label, pred_label] += 1
3025
+
3026
+ return confusion
3027
+
3028
+
3029
+ def plot_evaluate(y_test, y_preds, acc_list):
3030
+
3031
+ acc = acc_list[len(acc_list) - 1]
3032
+ y_true = decode_one_hot(y_test)
3033
+
3034
+ y_true = np.array(y_true)
3035
+ y_preds = np.array(y_preds)
3036
+ Class = np.unique(decode_one_hot(y_test))
3037
+
3038
+ precision, recall, f1 = metrics(y_test, y_preds)
3039
+
3040
+
3041
+ # Confusion matrix
3042
+ cm = confusion_matrix(y_true, y_preds, len(Class))
3043
+ fig, axs = plt.subplots(2, 2, figsize=(16, 12))
3044
+
3045
+ # Confusion Matrix
3046
+ sns.heatmap(cm, annot=True, fmt='d', ax=axs[0, 0])
3047
+ axs[0, 0].set_title("Confusion Matrix")
3048
+ axs[0, 0].set_xlabel("Predicted Class")
3049
+ axs[0, 0].set_ylabel("Actual Class")
3050
+
3051
+ if len(Class) == 2:
3052
+ fpr, tpr, thresholds = roc_curve(y_true, y_preds)
3053
+ # ROC Curve
3054
+ roc_auc = np.trapz(tpr, fpr)
3055
+ axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
3056
+ axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
3057
+ axs[1, 0].set_xlim([0.0, 1.0])
3058
+ axs[1, 0].set_ylim([0.0, 1.05])
3059
+ axs[1, 0].set_xlabel('False Positive Rate')
3060
+ axs[1, 0].set_ylabel('True Positive Rate')
3061
+ axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
3062
+ axs[1, 0].legend(loc="lower right")
3063
+ axs[1, 0].legend(loc="lower right")
3064
+ else:
3065
+
3066
+ for i in range(len(Class)):
3067
+
3068
+ y_true_copy = np.copy(y_true)
3069
+ y_preds_copy = np.copy(y_preds)
3070
+
3071
+ y_true_copy[y_true_copy == i] = 0
3072
+ y_true_copy[y_true_copy != 0] = 1
3073
+
3074
+ y_preds_copy[y_preds_copy == i] = 0
3075
+ y_preds_copy[y_preds_copy != 0] = 1
3076
+
3077
+
3078
+ fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
3079
+
3080
+ roc_auc = np.trapz(tpr, fpr)
3081
+ axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
3082
+ axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
3083
+ axs[1, 0].set_xlim([0.0, 1.0])
3084
+ axs[1, 0].set_ylim([0.0, 1.05])
3085
+ axs[1, 0].set_xlabel('False Positive Rate')
3086
+ axs[1, 0].set_ylabel('True Positive Rate')
3087
+ axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
3088
+ axs[1, 0].legend(loc="lower right")
3089
+ axs[1, 0].legend(loc="lower right")
3090
+
3091
+
3092
+ """
3093
+ accuracy_per_class = []
3094
+
3095
+ for cls in Class:
3096
+ correct = np.sum((y_true == cls) & (y_preds == cls))
3097
+ total = np.sum(y_true == cls)
3098
+ accuracy_cls = correct / total if total > 0 else 0.0
3099
+ accuracy_per_class.append(accuracy_cls)
3100
+
3101
+ axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
3102
+ axs[2, 0].set_xlabel('Class')
3103
+ axs[2, 0].set_ylabel('Accuracy')
3104
+ axs[2, 0].set_title('Class-wise Accuracy')
3105
+ axs[2, 0].set_xticks(Class)
3106
+ axs[2, 0].grid(True)
3107
+ """
3108
+
3109
+
3110
+
3111
+
3112
+ # Precision, Recall, F1 Score, Accuracy
3113
+ metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
3114
+ values = [precision, recall, f1, acc]
3115
+ colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
3116
+
3117
+ #
3118
+ bars = axs[0, 1].bar(metric, values, color=colors)
3119
+
3120
+
3121
+ for bar, value in zip(bars, values):
3122
+ axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
3123
+ ha='center', va='bottom', fontsize=12, color='white', weight='bold')
3124
+
3125
+ axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
3126
+ axs[0, 1].set_xlabel('Metrics')
3127
+ axs[0, 1].set_ylabel('Score')
3128
+ axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Weighted)')
3129
+ axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
3130
+
3131
+ # Accuracy
3132
+ plt.plot(acc_list, marker='o', linestyle='-',
3133
+ color='r', label='Accuracy')
3134
+
3135
+
3136
+ plt.axhline(y=1, color='g', linestyle='--', label='Maximum Accuracy')
3137
+
3138
+
3139
+ plt.xlabel('Samples')
3140
+ plt.ylabel('Accuracy')
3141
+ plt.title('Accuracy History')
3142
+ plt.legend()
3143
+
3144
+
3145
+ plt.tight_layout()
3146
+ plt.show()
3147
+
3148
+ def manuel_balancer(x_train, y_train, target_samples_per_class):
3149
+ """
3150
+ Generates synthetic examples to balance classes to the specified number of examples per class.
3151
+
3152
+ Arguments:
3153
+ x_train -- Input dataset (examples) - NumPy array format
3154
+ y_train -- Class labels (one-hot encoded) - NumPy array format
3155
+ target_samples_per_class -- Desired number of samples per class
3156
+
3157
+ Returns:
3158
+ x_balanced -- Balanced input dataset (NumPy array format)
3159
+ y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
3160
+ """
3161
+ try:
3162
+ x_train = np.array(x_train)
3163
+ y_train = np.array(y_train)
3164
+ except:
3165
+ print(Fore.GREEN + "x_tarin and y_train already numpyarray." + Style.RESET_ALL)
3166
+ pass
3167
+ classes = np.arange(y_train.shape[1])
3168
+ class_count = len(classes)
3169
+
3170
+ x_balanced = []
3171
+ y_balanced = []
3172
+
3173
+ for class_label in tqdm(range(class_count),leave=False, desc='Augmenting Data',ncols= 120):
3174
+ class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
3175
+ num_samples = len(class_indices)
3176
+
3177
+ if num_samples > target_samples_per_class:
3178
+
3179
+ selected_indices = np.random.choice(class_indices, target_samples_per_class, replace=False)
3180
+ x_balanced.append(x_train[selected_indices])
3181
+ y_balanced.append(y_train[selected_indices])
3182
+
3183
+ else:
3184
+
3185
+ x_balanced.append(x_train[class_indices])
3186
+ y_balanced.append(y_train[class_indices])
3187
+
3188
+ if num_samples < target_samples_per_class:
3189
+
3190
+ samples_to_add = target_samples_per_class - num_samples
3191
+ additional_samples = np.zeros((samples_to_add, x_train.shape[1]))
3192
+ additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
3193
+
3194
+ for i in range(samples_to_add):
3195
+
3196
+ random_indices = np.random.choice(class_indices, 2, replace=False)
3197
+ sample1 = x_train[random_indices[0]]
3198
+ sample2 = x_train[random_indices[1]]
3199
+
3200
+
3201
+ synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
3202
+
3203
+ additional_samples[i] = synthetic_sample
3204
+ additional_labels[i] = y_train[class_indices[0]]
3205
+
3206
+
3207
+ x_balanced.append(additional_samples)
3208
+ y_balanced.append(additional_labels)
3209
+
1599
3210
  x_balanced = np.vstack(x_balanced)
1600
3211
  y_balanced = np.vstack(y_balanced)
1601
3212