pyerualjetwork 1.3.1__py3-none-any.whl → 1.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/plan.py CHANGED
@@ -9,64 +9,64 @@ import seaborn as sns
9
9
 
10
10
  # BUILD -----
11
11
  def TrainPLAN(
12
- TrainInputs: List[Union[int, float]],
13
- TrainLabels: List[Union[int, float, str]], # At least two.. and one hot encoded
14
- ClassCount: int,
15
- Layers: List[str],
16
- Neurons: List[Union[int, float]],
17
- MembranThresholds: List[str],
18
- MembranPotentials: List[Union[int, float]],
19
- Normalizations: List[str],
20
- Activations: List[str],
21
- Visualize: str
12
+ x_train: List[Union[int, float]],
13
+ y_train: List[Union[int, float, str]], # At least two.. and one hot encoded
14
+ class_count: int,
15
+ layers: List[str],
16
+ neurons: List[Union[int, float]],
17
+ membran_thresholds: List[str],
18
+ membran_potentials: List[Union[int, float]],
19
+ normalizations: List[str],
20
+ activations: List[str],
21
+ visualize: str
22
22
  ) -> str:
23
23
 
24
24
  infoPLAN = """
25
25
  Creates and configures a PLAN model.
26
26
 
27
27
  Args:
28
- TrainInputs (list[num]): List of input data.
29
- TrainLabels (list[num]): List of TrainLabels. (one hot encoded)
30
- ClassCount (int): Number of classes.
31
- Layers (list[str]): List of layer names. (options: 'fex' (Feature Extraction), 'cat' (Catalyser))
32
- Neurons (list[num]): List of neuron counts for each layer.
33
- MembranThresholds (list[str]): List of MembranThresholds.
34
- MembranPotentials (list[num]): List of MembranPotentials.
35
- Normalizations (List[str]): Whether normalization will be performed at indexed layers ("y" or "n").
36
- Activations (list[str]): List of activation functions.
37
- Visualize (str): Visualize Training procces or not visualize ('y' or 'n')
28
+ x_train (list[num]): List of input data.
29
+ y_train (list[num]): List of y_train. (one hot encoded)
30
+ class_count (int): Number of classes.
31
+ layers (list[str]): List of layer names. (options: 'fex' (Feature Extraction), 'cat' (Catalyser))
32
+ neurons (list[num]): List of neuron counts for each layer.
33
+ membran_thresholds (list[str]): List of membran_thresholds.
34
+ membran_potentials (list[num]): List of membran_potentials.
35
+ normalizations (List[str]): Whether normalization will be performed at indexed layers ("y" or "n").
36
+ activations (list[str]): List of activation functions.
37
+ visualize (str): visualize Training procces or not visualize ('y' or 'n')
38
38
 
39
39
  Returns:
40
- list([num]): (Weight matrices list, TrainPredictions list, TrainAcc).
40
+ list([num]): (Weight matrices list, TrainPredictions list, Trainacc).
41
41
  error handled ?: Process status ('e')
42
42
  """
43
43
 
44
- if Visualize != 'y' and Visualize != 'n':
45
- print(Fore.RED + "ERROR109: Visualize parameter must be 'y' or 'n'. TrainPLAN",infoPLAN)
44
+ if visualize != 'y' and visualize != 'n':
45
+ print(Fore.RED + "ERROR109: visualize parameter must be 'y' or 'n'. TrainPLAN",infoPLAN)
46
46
  return 'e'
47
47
 
48
48
 
49
- LastNeuron = Neurons[-1:][0]
50
- if LastNeuron != ClassCount:
49
+ LastNeuron = neurons[-1:][0]
50
+ if LastNeuron != class_count:
51
51
  print(Fore.RED + "ERROR108: Last layer of neuron count must be equal class count. from: TrainPLAN",infoPLAN)
52
52
  return 'e'
53
53
 
54
- if len(Normalizations) != len(MembranPotentials):
54
+ if len(normalizations) != len(membran_potentials):
55
55
 
56
- print(Fore.RED + "ERROR307: Normalization list length must be equal to length of MembranThresholds List,MembranPotentials List,Layers List,Neurons List. from: TrainPLAN",infoPLAN)
56
+ print(Fore.RED + "ERROR307: Normalization list length must be equal to length of membran_thresholds List,membran_potentials List,layers List,neurons List. from: TrainPLAN",infoPLAN)
57
57
  return 'e'
58
58
 
59
- if len(TrainInputs) != len(TrainLabels):
60
- print(Fore.RED + "ERROR301: TrainInputs list and TrainLabels list must be same length.",infoPLAN)
59
+ if len(x_train) != len(y_train):
60
+ print(Fore.RED + "ERROR301: x_train list and y_train list must be same length.",infoPLAN)
61
61
  return 'e'
62
62
 
63
- for i, Value in enumerate(MembranPotentials):
63
+ for i, Value in enumerate(membran_potentials):
64
64
 
65
- if Normalizations[i] != 'y' and Normalizations[i] != 'n':
65
+ if normalizations[i] != 'y' and normalizations[i] != 'n':
66
66
  print(Fore.RED + "ERROR105: Normalization list must be 'y' or 'n'.",infoPLAN)
67
67
  return 'e'
68
68
 
69
- if MembranThresholds[i] == 'none':
69
+ if membran_thresholds[i] == 'none':
70
70
  print(Fore.MAGENTA + "WARNING102: We are advise to do not put 'none' Threshold sign. But some cases improves performance of the model from: TrainPLAN",infoPLAN + Style.RESET_ALL)
71
71
  time.sleep(3)
72
72
 
@@ -74,140 +74,140 @@ def TrainPLAN(
74
74
  print(Fore.RED + "ERROR201: MEMBRAN POTENTIALS must be numeric. from: TrainPLAN")
75
75
  return 'e'
76
76
 
77
- if isinstance(Neurons[i], str):
78
- print(Fore.RED + "ERROR202: Neurons list must be numeric.")
77
+ if isinstance(neurons[i], str):
78
+ print(Fore.RED + "ERROR202: neurons list must be numeric.")
79
79
  return 'e'
80
80
 
81
- if len(MembranThresholds) != len(MembranPotentials):
81
+ if len(membran_thresholds) != len(membran_potentials):
82
82
  print(Fore.RED + "ERROR302: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
83
83
  return 'e'
84
84
 
85
- if len(Layers) != len(Neurons):
86
- print(Fore.RED + "ERROR303: Layers list and Neurons list must same length. from: TrainPLAN",infoPLAN)
85
+ if len(layers) != len(neurons):
86
+ print(Fore.RED + "ERROR303: layers list and neurons list must same length. from: TrainPLAN",infoPLAN)
87
87
  return 'e'
88
88
 
89
- if len(MembranPotentials) != len(Layers) or len(MembranThresholds) != len(Layers):
90
- print(Fore.RED + "ERROR306: MEMBRAN POTENTIALS and MEMBRAN THRESHOLDS lists length must be same Layers list length. from: TrainPLAN",infoPLAN)
89
+ if len(membran_potentials) != len(layers) or len(membran_thresholds) != len(layers):
90
+ print(Fore.RED + "ERROR306: MEMBRAN POTENTIALS and MEMBRAN THRESHOLDS lists length must be same layers list length. from: TrainPLAN",infoPLAN)
91
91
  return 'e'
92
92
 
93
93
 
94
- for Activation in Activations:
94
+ for Activation in activations:
95
95
  if Activation != 'softmax' and Activation != 'sigmoid' and Activation != 'relu' and Activation != 'none':
96
- print(Fore.RED + "ERROR108: Activations list must be 'sigmoid' or 'softmax' or 'relu' or 'none' from: TrainPLAN",infoPLAN)
96
+ print(Fore.RED + "ERROR108: activations list must be 'sigmoid' or 'softmax' or 'relu' or 'none' from: TrainPLAN",infoPLAN)
97
97
  return 'e'
98
98
 
99
99
 
100
- for index, Neuron in enumerate(Neurons):
100
+ for index, Neuron in enumerate(neurons):
101
101
  if Neuron < 1:
102
- print(Fore.RED + "ERROR101: Neurons list must be positive non zero integer. from: TrainPLAN",infoPLAN)
102
+ print(Fore.RED + "ERROR101: neurons list must be positive non zero integer. from: TrainPLAN",infoPLAN)
103
103
  return 'e'
104
104
 
105
- if index + 1 != len(Neurons) and Neuron % 2 != 0:
105
+ if index + 1 != len(neurons) and Neuron % 2 != 0:
106
106
  print(Fore.MAGENTA + "WARNING101: We strongly advise to do Neuron counts be should even numbers. from: TrainPLAN",infoPLAN)
107
107
  time.sleep(3)
108
108
 
109
- if Neuron < ClassCount:
109
+ if Neuron < class_count:
110
110
  print(Fore.RED + "ERROR102: Neuron count must be greater than class count(For PLAN). from: TrainPLAN")
111
111
  return 'e'
112
112
 
113
- if Layers[index] != 'fex' and Layers[index] != 'cat':
114
- print(Fore.RED + "ERROR107: Layers list must be 'fex'(Feature Extraction Layer) or 'cat' (Catalyser Layer). from: TrainPLAN",infoPLAN)
113
+ if layers[index] != 'fex' and layers[index] != 'cat':
114
+ print(Fore.RED + "ERROR107: layers list must be 'fex'(Feature Extraction Layer) or 'cat' (Catalyser Layer). from: TrainPLAN",infoPLAN)
115
115
  return 'e'
116
116
 
117
- if len(MembranThresholds) != len(MembranPotentials):
117
+ if len(membran_thresholds) != len(membran_potentials):
118
118
  print(Fore.RED + "ERROR305: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
119
119
  return 'e'
120
120
 
121
121
 
122
- for i, Sign in enumerate(MembranThresholds):
122
+ for i, Sign in enumerate(membran_thresholds):
123
123
  if Sign != '>' and Sign != '<' and Sign != '==' and Sign != '!=' and Sign != 'none':
124
124
  print(Fore.RED + "ERROR104: MEMBRAN THRESHOLDS must be '>' or '<' or '==' or '!='. or 'none' WE SUGGEST '<' FOR FEX LAYER AND '==' FOR CAT LAYER (Your data, your hyperparameter) from: TrainPLAN",infoPLAN)
125
125
  return 'e'
126
126
 
127
- if Layers[i] == 'fex' and Sign == 'none':
127
+ if layers[i] == 'fex' and Sign == 'none':
128
128
  print(Fore.RED + "ERROR109: at layer type 'fex', pairing with 'none' Threshold is not acceptlable. if you want to 'none' put '==' and make threshold value '0'. from: TrainPLAN ",infoPLAN)
129
129
  return 'e'
130
130
 
131
- UniqueTrainLabels = set()
132
- for sublist in TrainLabels:
131
+ Uniquey_train = set()
132
+ for sublist in y_train:
133
133
 
134
- UniqueTrainLabels.add(tuple(sublist))
134
+ Uniquey_train.add(tuple(sublist))
135
135
 
136
136
 
137
- UniqueTrainLabels = list(UniqueTrainLabels)
137
+ Uniquey_train = list(Uniquey_train)
138
138
 
139
- TrainLabels = [tuple(sublist) for sublist in TrainLabels]
139
+ y_train = [tuple(sublist) for sublist in y_train]
140
140
 
141
141
 
142
- if len(UniqueTrainLabels) != ClassCount:
142
+ if len(Uniquey_train) != class_count:
143
143
  print(Fore.RED + "ERROR106: Label variety length must be same Class Count. from: TrainPLAN",infoPLAN)
144
144
  return 'e'
145
145
 
146
- TrainInputs[0] = np.array(TrainInputs[0])
147
- TrainInputs[0] = TrainInputs[0].ravel()
148
- TrainInputsize = len(TrainInputs[0])
146
+ x_train[0] = np.array(x_train[0])
147
+ x_train[0] = x_train[0].ravel()
148
+ x_train_size = len(x_train[0])
149
149
 
150
- W = WeightIdentification(len(Layers) - 1,ClassCount,Neurons,TrainInputsize)
151
- Divides = SynapticDividing(ClassCount,W)
150
+ W = WeightIdentification(len(layers) - 1,class_count,neurons,x_train_size)
151
+ Divides = SynapticDividing(class_count,W)
152
152
  TrainedWs = [1] * len(W)
153
153
  print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL,)
154
- TrainPredictions = [None] * len(TrainLabels)
154
+ TrainPredictions = [None] * len(y_train)
155
155
  true = 0
156
- StartTime = time.time()
157
- for index, inp in enumerate(TrainInputs):
158
- UniStartTime = time.time()
156
+ start_time = time.time()
157
+ for index, inp in enumerate(x_train):
158
+ uni_start_time = time.time()
159
159
  inp = np.array(inp)
160
160
  inp = inp.ravel()
161
161
 
162
- if TrainInputsize != len(inp):
163
- print(Fore.RED +"ERROR304: All input matrices or vectors in TrainInputs list, must be same size. from: TrainPLAN",infoPLAN + Style.RESET_ALL)
162
+ if x_train_size != len(inp):
163
+ print(Fore.RED +"ERROR304: All input matrices or vectors in x_train list, must be same size. from: TrainPLAN",infoPLAN + Style.RESET_ALL)
164
164
  return 'e'
165
165
 
166
166
 
167
- for Ulindex, Ul in enumerate(UniqueTrainLabels):
167
+ for Ulindex, Ul in enumerate(Uniquey_train):
168
168
 
169
- if Ul == TrainLabels[index]:
169
+ if Ul == y_train[index]:
170
170
  for Windex, w in enumerate(W):
171
171
  for i, ul in enumerate(Ul):
172
172
  if ul == 1.0:
173
173
  k = i
174
- Cs = Divides[int(k)][Windex][0]
174
+ cs = Divides[int(k)][Windex][0]
175
175
 
176
- W[Windex] = SynapticPruning(w, Cs, 'row', int(k),ClassCount)
176
+ W[Windex] = SynapticPruning(w, cs, 'row', int(k),class_count)
177
177
 
178
- NeuralLayer = inp
178
+ neural_layer = inp
179
179
 
180
- for Lindex, Layer in enumerate(Layers):
180
+ for Lindex, Layer in enumerate(layers):
181
181
 
182
- if Normalizations[Lindex] == 'y':
183
- NeuralLayer = Normalization(NeuralLayer)
182
+ if normalizations[Lindex] == 'y':
183
+ neural_layer = Normalization(neural_layer)
184
184
 
185
- if Activations[Lindex] == 'relu':
186
- NeuralLayer = Relu(NeuralLayer)
187
- elif Activations[Lindex] == 'sigmoid':
188
- NeuralLayer = Sigmoid(NeuralLayer)
189
- elif Activations[Lindex] == 'softmax':
190
- NeuralLayer = Softmax(NeuralLayer)
185
+ if activations[Lindex] == 'relu':
186
+ neural_layer = Relu(neural_layer)
187
+ elif activations[Lindex] == 'sigmoid':
188
+ neural_layer = Sigmoid(neural_layer)
189
+ elif activations[Lindex] == 'softmax':
190
+ neural_layer = Softmax(neural_layer)
191
191
 
192
192
  if Layer == 'fex':
193
- NeuralLayer,W[Lindex] = Fex(NeuralLayer, W[Lindex], MembranThresholds[Lindex], MembranPotentials[Lindex])
193
+ neural_layer,W[Lindex] = Fex(neural_layer, W[Lindex], membran_thresholds[Lindex], membran_potentials[Lindex])
194
194
  elif Layer == 'cat':
195
- NeuralLayer,W[Lindex] = Cat(NeuralLayer, W[Lindex], MembranThresholds[Lindex], MembranPotentials[Lindex],1)
195
+ neural_layer,W[Lindex] = Cat(neural_layer, W[Lindex], membran_thresholds[Lindex], membran_potentials[Lindex],1)
196
196
 
197
- RealOutput = np.argmax(TrainLabels[index])
198
- PredictedOutput = np.argmax(NeuralLayer)
197
+ RealOutput = np.argmax(y_train[index])
198
+ PredictedOutput = np.argmax(neural_layer)
199
199
  if RealOutput == PredictedOutput:
200
200
  true += 1
201
- Acc = true / len(TrainLabels)
201
+ acc = true / len(y_train)
202
202
  TrainPredictions[index] = PredictedOutput
203
203
 
204
- if Visualize == 'y':
204
+ if visualize == 'y':
205
205
 
206
- TrainLabelsVisual = np.copy(TrainLabels)
207
- TrainLabelsVisual = np.argmax(TrainLabelsVisual, axis=1)
206
+ y_trainVisual = np.copy(y_train)
207
+ y_trainVisual = np.argmax(y_trainVisual, axis=1)
208
208
 
209
209
  plt.figure(figsize=(12, 6))
210
- sns.kdeplot(TrainLabelsVisual, label='Real Outputs', fill=True)
210
+ sns.kdeplot(y_trainVisual, label='Real Outputs', fill=True)
211
211
  sns.kdeplot(TrainPredictions, label='Predictions', fill=True)
212
212
  plt.legend()
213
213
  plt.xlabel('Class')
@@ -215,7 +215,7 @@ def TrainPLAN(
215
215
  plt.title('Predictions and Real Outputs for Training KDE Plot')
216
216
  plt.show()
217
217
 
218
- if index + 1 != len(TrainInputs):
218
+ if index + 1 != len(x_train):
219
219
 
220
220
  plt.close('all')
221
221
 
@@ -228,91 +228,91 @@ def TrainPLAN(
228
228
  TrainedWs[i] = TrainedWs[i] + w
229
229
 
230
230
 
231
- W = WeightIdentification(len(Layers) - 1,ClassCount,Neurons,TrainInputsize)
231
+ W = WeightIdentification(len(layers) - 1,class_count,neurons,x_train_size)
232
232
 
233
233
 
234
- UniEndTime = time.time()
234
+ uni_end_time = time.time()
235
235
 
236
- CalculatingEst = round((UniEndTime - UniStartTime) * (len(TrainInputs) - index),3)
236
+ calculating_est = round((uni_end_time - uni_start_time) * (len(x_train) - index),3)
237
237
 
238
- if CalculatingEst < 60:
239
- print('\rest......(sec):',CalculatingEst,'\n',end= "")
240
- print('\rTrain Accuracy: ' ,Acc ,"\n", end="")
238
+ if calculating_est < 60:
239
+ print('\rest......(sec):',calculating_est,'\n',end= "")
240
+ print('\rTrain accuracy: ' ,acc ,"\n", end="")
241
241
 
242
- elif CalculatingEst > 60 and CalculatingEst < 3600:
243
- print('\rest......(min):',CalculatingEst/60,'\n',end= "")
244
- print('\rTrain Accuracy: ' ,Acc ,"\n", end="")
242
+ elif calculating_est > 60 and calculating_est < 3600:
243
+ print('\rest......(min):',calculating_est/60,'\n',end= "")
244
+ print('\rTrain accuracy: ' ,acc ,"\n", end="")
245
245
 
246
- elif CalculatingEst > 3600:
247
- print('\rest......(h):',CalculatingEst/3600,'\n',end= "")
248
- print('\rTrain Accuracy: ' ,Acc ,"\n", end="")
246
+ elif calculating_est > 3600:
247
+ print('\rest......(h):',calculating_est/3600,'\n',end= "")
248
+ print('\rTrain accuracy: ' ,acc ,"\n", end="")
249
249
 
250
250
  EndTime = time.time()
251
251
 
252
- CalculatingEst = round(EndTime - StartTime,2)
252
+ calculating_est = round(EndTime - start_time,2)
253
253
 
254
254
  print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n")
255
255
 
256
- if CalculatingEst < 60:
257
- print('Total training time(sec): ',CalculatingEst)
256
+ if calculating_est < 60:
257
+ print('Total training time(sec): ',calculating_est)
258
258
 
259
- elif CalculatingEst > 60 and CalculatingEst < 3600:
260
- print('Total training time(min): ',CalculatingEst/60)
259
+ elif calculating_est > 60 and calculating_est < 3600:
260
+ print('Total training time(min): ',calculating_est/60)
261
261
 
262
- elif CalculatingEst > 3600:
263
- print('Total training time(h): ',CalculatingEst/3600)
262
+ elif calculating_est > 3600:
263
+ print('Total training time(h): ',calculating_est/3600)
264
264
 
265
- if Acc > 0.8:
266
- print(Fore.GREEN + '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
265
+ if acc > 0.8:
266
+ print(Fore.GREEN + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
267
267
 
268
- elif Acc < 0.8 and Acc > 0.6:
269
- print(Fore.MAGENTA + '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
268
+ elif acc < 0.8 and acc > 0.6:
269
+ print(Fore.MAGENTA + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
270
270
 
271
- elif Acc < 0.6:
272
- print(Fore.RED+ '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
271
+ elif acc < 0.6:
272
+ print(Fore.RED+ '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
273
273
 
274
274
 
275
275
 
276
276
 
277
- return TrainedWs,TrainPredictions,Acc
277
+ return TrainedWs,TrainPredictions,acc
278
278
 
279
279
  # FUNCTIONS -----
280
280
 
281
281
  def WeightIdentification(
282
- LayerCount, # int: Number of layers in the neural network.
283
- ClassCount, # int: Number of classes in the classification task.
284
- Neurons, # list[num]: List of neuron counts for each layer.
285
- TrainInputsize # int: Size of the input data.
282
+ layer_count, # int: Number of layers in the neural network.
283
+ class_count, # int: Number of classes in the classification task.
284
+ neurons, # list[num]: List of neuron counts for each layer.
285
+ x_train_size # int: Size of the input data.
286
286
  ) -> str:
287
287
  """
288
288
  Identifies the weights for a neural network model.
289
289
 
290
290
  Args:
291
- LayerCount (int): Number of layers in the neural network.
292
- ClassCount (int): Number of classes in the classification task.
293
- Neurons (list[num]): List of neuron counts for each layer.
294
- TrainInputsize (int): Size of the input data.
291
+ layer_count (int): Number of layers in the neural network.
292
+ class_count (int): Number of classes in the classification task.
293
+ neurons (list[num]): List of neuron counts for each layer.
294
+ x_train_size (int): Size of the input data.
295
295
 
296
296
  Returns:
297
297
  list([numpy_arrays],[...]): Weight matices of the model. .
298
298
  """
299
299
 
300
300
 
301
- Wlen = LayerCount + 1
301
+ Wlen = layer_count + 1
302
302
  W = [None] * Wlen
303
- W[0] = np.ones((Neurons[0],TrainInputsize))
304
- ws = LayerCount - 1
303
+ W[0] = np.ones((neurons[0],x_train_size))
304
+ ws = layer_count - 1
305
305
  for w in range(ws):
306
- W[w + 1] = np.ones((Neurons[w + 1],Neurons[w]))
307
- W[LayerCount] = np.ones((ClassCount,Neurons[LayerCount - 1]))
306
+ W[w + 1] = np.ones((neurons[w + 1],neurons[w]))
307
+ W[layer_count] = np.ones((class_count,neurons[layer_count - 1]))
308
308
  return W
309
309
 
310
310
  def SynapticPruning(
311
311
  w, # list[list[num]]: Weight matrix of the neural network.
312
- Cs, # list[list[num]]: Synaptic connections between neurons.
313
- Key, # int: Key for identifying synaptic connections.
312
+ cs, # list[list[num]]: Synaptic connections between neurons.
313
+ key, # int: key for identifying synaptic connections.
314
314
  Class, # int: Class label for the current training instance.
315
- ClassCount # int: Total number of classes in the dataset.
315
+ class_count # int: Total number of classes in the dataset.
316
316
 
317
317
  ) -> str:
318
318
  infoPruning = """
@@ -320,10 +320,10 @@ def SynapticPruning(
320
320
 
321
321
  Args:
322
322
  w (list[list[num]]): Weight matrix of the neural network.
323
- Cs (list[list[num]]): Synaptic connections between neurons.
324
- Key (str): Key for identifying synaptic row or col connections.
323
+ cs (list[list[num]]): Synaptic connections between neurons.
324
+ key (str): key for identifying synaptic row or col connections.
325
325
  Class (int): Class label for the current training instance.
326
- ClassCount (int): Total number of classes in the dataset.
326
+ class_count (int): Total number of classes in the dataset.
327
327
 
328
328
  Returns:
329
329
  numpy array: Weight matrix.
@@ -332,55 +332,55 @@ def SynapticPruning(
332
332
 
333
333
  Class += 1 # because index start 0
334
334
 
335
- if Class != ClassCount and Class != 1:
335
+ if Class != class_count and Class != 1:
336
336
 
337
- Ce = Cs / Class
337
+ ce = cs / Class
338
338
 
339
- w[int(Ce)-1::-1,:] = 0
339
+ w[int(ce)-1::-1,:] = 0
340
340
 
341
- w[Cs:,:] = 0
341
+ w[cs:,:] = 0
342
342
 
343
343
 
344
344
  else:
345
345
 
346
346
  if Class == 1:
347
- if Key == 'row':
347
+ if key == 'row':
348
348
 
349
- w[Cs:,:] = 0
349
+ w[cs:,:] = 0
350
350
 
351
- elif Key == 'col':
351
+ elif key == 'col':
352
352
 
353
- w[:,Cs] = 0
353
+ w[:,cs] = 0
354
354
 
355
355
  else:
356
- print(Fore.RED + "ERROR103: SynapticPruning func's Key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning)
356
+ print(Fore.RED + "ERROR103: SynapticPruning func's key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning)
357
357
  return 'e'
358
358
  else:
359
- if Key == 'row':
359
+ if key == 'row':
360
360
 
361
- w[Cs:,:] = 0
361
+ w[cs:,:] = 0
362
362
 
363
- Ce = int(round(w.shape[0] - Cs / ClassCount))
364
- w[Ce-1::-1,:] = 0
363
+ ce = int(round(w.shape[0] - cs / class_count))
364
+ w[ce-1::-1,:] = 0
365
365
 
366
- elif Key == 'col':
366
+ elif key == 'col':
367
367
 
368
- w[:,Cs] = 0
368
+ w[:,cs] = 0
369
369
 
370
370
  else:
371
- print(Fore.RED + "ERROR103: SynapticPruning func's Key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning + Style.RESET_ALL)
371
+ print(Fore.RED + "ERROR103: SynapticPruning func's key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning + Style.RESET_ALL)
372
372
  return 'e'
373
373
  return w
374
374
 
375
375
  def SynapticDividing(
376
- ClassCount, # int: Total number of classes in the dataset.
376
+ class_count, # int: Total number of classes in the dataset.
377
377
  W # list[list[num]]: Weight matrix of the neural network.
378
378
  ) -> str:
379
379
  """
380
380
  Divides the synaptic weights of a neural network model based on class count.
381
381
 
382
382
  Args:
383
- ClassCount (int): Total number of classes in the dataset.
383
+ class_count (int): Total number of classes in the dataset.
384
384
  W (list[list[num]]): Weight matrix of the neural network.
385
385
 
386
386
  Returns:
@@ -392,25 +392,25 @@ def SynapticDividing(
392
392
  #print('Piece:' + Piece)
393
393
  #input()
394
394
  # Boş bir üç boyutlu liste oluşturma
395
- Divides = [[[0] for _ in range(len(W))] for _ in range(ClassCount)]
395
+ Divides = [[[0] for _ in range(len(W))] for _ in range(class_count)]
396
396
 
397
397
  for i in range(len(W)):
398
398
 
399
399
 
400
- Piece[i] = int(math.floor(W[i].shape[0] / ClassCount))
400
+ Piece[i] = int(math.floor(W[i].shape[0] / class_count))
401
401
 
402
- Cs = 0
402
+ cs = 0
403
403
  # j = Classes, i = Weights, [0] = CutStart.
404
404
 
405
405
  for i in range(len(W)):
406
- for j in range(ClassCount):
407
- Cs = Cs + Piece[i]
408
- Divides[j][i][0] = Cs
406
+ for j in range(class_count):
407
+ cs = cs + Piece[i]
408
+ Divides[j][i][0] = cs
409
409
  #print('Divides: ' + j + i + ' = ' + Divides[j][i][0])
410
410
  #input()
411
411
 
412
412
  j = 0
413
- Cs = 0
413
+ cs = 0
414
414
 
415
415
  return Divides
416
416
 
@@ -418,8 +418,8 @@ def SynapticDividing(
418
418
  def Fex(
419
419
  Input, # list[num]: Input data.
420
420
  w, # list[list[num]]: Weight matrix of the neural network.
421
- MembranThreshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
422
- MembranPotential # num: Threshold value for comparison.
421
+ membran_threshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
422
+ membran_potential # num: Threshold value for comparison.
423
423
  ) -> tuple:
424
424
  """
425
425
  Applies feature extraction process to the input data using synaptic pruning.
@@ -427,32 +427,32 @@ def Fex(
427
427
  Args:
428
428
  Input (list[num]): Input data.
429
429
  w (list[list[num]]): Weight matrix of the neural network.
430
- MembranThreshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
431
- MembranPotential (num): Threshold value for comparison.
430
+ membran_threshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
431
+ membran_potential (num): Threshold value for comparison.
432
432
 
433
433
  Returns:
434
434
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
435
435
  """
436
436
 
437
- if MembranThreshold == '<':
438
- PruneIndex = np.where(Input < MembranPotential)
439
- elif MembranThreshold == '>':
440
- PruneIndex = np.where(Input > MembranPotential)
441
- elif MembranThreshold == '==':
442
- PruneIndex = np.where(Input == MembranPotential)
443
- elif MembranThreshold == '!=':
444
- PruneIndex = np.where(Input != MembranPotential)
437
+ if membran_threshold == '<':
438
+ PruneIndex = np.where(Input < membran_potential)
439
+ elif membran_threshold == '>':
440
+ PruneIndex = np.where(Input > membran_potential)
441
+ elif membran_threshold == '==':
442
+ PruneIndex = np.where(Input == membran_potential)
443
+ elif membran_threshold == '!=':
444
+ PruneIndex = np.where(Input != membran_potential)
445
445
 
446
446
  w = SynapticPruning(w, PruneIndex, 'col', 0, 0)
447
447
 
448
- NeuralLayer = np.dot(w, Input)
449
- return NeuralLayer,w
448
+ neural_layer = np.dot(w, Input)
449
+ return neural_layer,w
450
450
 
451
451
  def Cat(
452
452
  Input, # list[num]: Input data.
453
453
  w, # list[list[num]]: Weight matrix of the neural network.
454
- MembranThreshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
455
- MembranPotential, # num: Threshold value for comparison.
454
+ membran_threshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
455
+ membran_potential, # num: Threshold value for comparison.
456
456
  isTrain # int: Flag indicating if the function is called during training (1 for training, 0 otherwise).
457
457
  ) -> tuple:
458
458
  """
@@ -461,29 +461,29 @@ def Cat(
461
461
  Args:
462
462
  Input (list[num]): Input data.
463
463
  w (list[list[num]]): Weight matrix of the neural network.
464
- MembranThreshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
465
- MembranPotential (num): Threshold value for comparison.
464
+ membran_threshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
465
+ membran_potential (num): Threshold value for comparison.
466
466
  isTrain (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
467
467
 
468
468
  Returns:
469
469
  tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
470
470
  """
471
471
 
472
- if MembranThreshold == '<':
473
- PruneIndex = np.where(Input < MembranPotential)
474
- elif MembranThreshold == '>':
475
- PruneIndex = np.where(Input > MembranPotential)
476
- elif MembranThreshold == '==':
477
- PruneIndex = np.where(Input == MembranPotential)
478
- elif MembranThreshold == '!=':
479
- PruneIndex = np.where(Input != MembranPotential)
480
- if isTrain == 1 and MembranThreshold != 'none':
472
+ if membran_threshold == '<':
473
+ PruneIndex = np.where(Input < membran_potential)
474
+ elif membran_threshold == '>':
475
+ PruneIndex = np.where(Input > membran_potential)
476
+ elif membran_threshold == '==':
477
+ PruneIndex = np.where(Input == membran_potential)
478
+ elif membran_threshold == '!=':
479
+ PruneIndex = np.where(Input != membran_potential)
480
+ if isTrain == 1 and membran_threshold != 'none':
481
481
 
482
482
  w = SynapticPruning(w, PruneIndex, 'col', 0, 0)
483
483
 
484
484
 
485
- NeuralLayer = np.dot(w, Input)
486
- return NeuralLayer,w
485
+ neural_layer = np.dot(w, Input)
486
+ return neural_layer,w
487
487
 
488
488
 
489
489
  def Normalization(
@@ -558,27 +558,27 @@ def Relu(
558
558
 
559
559
 
560
560
  def TestPLAN(
561
- TestInputs, # list[list[num]]: Test input data.
562
- TestLabels, # list[num]: Test labels.
563
- Layers, # list[str]: List of layer names.
564
- MembranThresholds, # list[str]: List of MEMBRAN THRESHOLDS for each layer.
565
- MembranPotentials, # list[num]: List of MEMBRAN POTENTIALS for each layer.
566
- Normalizations, # str: Whether normalization will be performed ("y" or "n").
567
- Activations, # str: Activation function list for the neural network.
568
- Visualize, # Visualize Testing procces or not visualize ('y' or 'n')
561
+ x_test, # list[list[num]]: Test input data.
562
+ y_test, # list[num]: Test labels.
563
+ layers, # list[str]: List of layer names.
564
+ membran_thresholds, # list[str]: List of MEMBRAN THRESHOLDS for each layer.
565
+ membran_potentials, # list[num]: List of MEMBRAN POTENTIALS for each layer.
566
+ normalizations, # str: Whether normalization will be performed ("y" or "n").
567
+ activations, # str: Activation function list for the neural network.
568
+ visualize, # visualize Testing procces or not visualize ('y' or 'n')
569
569
  W # list[list[num]]: Weight matrix of the neural network.
570
570
  ) -> tuple:
571
571
  infoTestModel = """
572
572
  Tests the neural network model with the given test data.
573
573
 
574
574
  Args:
575
- TestInputs (list[list[num]]): Test input data.
576
- TestLabels (list[num]): Test labels.
577
- Layers (list[str]): List of layer names.
578
- MembranThresholds (list[str]): List of MEMBRAN THRESHOLDS for each layer.
579
- MembranPotentials (list[num]): List of MEMBRAN POTENTIALS for each layer.
580
- Normalizatios list([str]): Whether normalization will be performed ("yes" or "no").
581
- Activation (str): Activation function for the neural network.
575
+ x_test (list[list[num]]): Test input data.
576
+ y_test (list[num]): Test labels.
577
+ layers (list[str]): List of layer names.
578
+ membran_thresholds (list[str]): List of MEMBRAN THRESHOLDS for each layer.
579
+ membran_potentials (list[num]): List of MEMBRAN POTENTIALS for each layer.
580
+ normalizatios list([str]): Whether normalization will be performed ("yes" or "no").
581
+ activations (list[str]): Activation function for the neural network.
582
582
  W (list[list[num]]): Weight matrix of the neural network.
583
583
 
584
584
  Returns:
@@ -589,49 +589,49 @@ def TestPLAN(
589
589
  try:
590
590
  Wc = [0] * len(W)
591
591
  true = 0
592
- TestPredictions = [None] * len(TestLabels)
592
+ TestPredictions = [None] * len(y_test)
593
593
  for i, w in enumerate(W):
594
594
  Wc[i] = np.copy(w)
595
595
  print('\rCopying weights.....',i+1,'/',len(W),end = "")
596
596
 
597
597
  print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
598
- StartTime = time.time()
599
- for inpIndex,Input in enumerate(TestInputs):
598
+ start_time = time.time()
599
+ for inpIndex,Input in enumerate(x_test):
600
600
  Input = np.array(Input)
601
601
  Input = Input.ravel()
602
- UniStartTime = time.time()
603
- NeuralLayer = Input
602
+ uni_start_time = time.time()
603
+ neural_layer = Input
604
604
 
605
- for index, Layer in enumerate(Layers):
606
- if Normalizations[index] == 'y':
607
- NeuralLayer = Normalization(NeuralLayer)
608
- if Activations[index] == 'relu':
609
- NeuralLayer = Relu(NeuralLayer)
610
- elif Activations[index] == 'sigmoid':
611
- NeuralLayer = Sigmoid(NeuralLayer)
612
- elif Activations[index] == 'softmax':
613
- NeuralLayer = Softmax(NeuralLayer)
605
+ for index, Layer in enumerate(layers):
606
+ if normalizations[index] == 'y':
607
+ neural_layer = Normalization(neural_layer)
608
+ if activations[index] == 'relu':
609
+ neural_layer = Relu(neural_layer)
610
+ elif activations[index] == 'sigmoid':
611
+ neural_layer = Sigmoid(neural_layer)
612
+ elif activations[index] == 'softmax':
613
+ neural_layer = Softmax(neural_layer)
614
614
 
615
- if Layers[index] == 'fex':
616
- NeuralLayer,useless = Fex(NeuralLayer, W[index], MembranThresholds[index], MembranPotentials[index])
617
- if Layers[index] == 'cat':
618
- NeuralLayer,useless = Cat(NeuralLayer, W[index], MembranThresholds[index], MembranPotentials[index],0)
615
+ if layers[index] == 'fex':
616
+ neural_layer,useless = Fex(neural_layer, W[index], membran_thresholds[index], membran_potentials[index])
617
+ if layers[index] == 'cat':
618
+ neural_layer,useless = Cat(neural_layer, W[index], membran_thresholds[index], membran_potentials[index],0)
619
619
  for i, w in enumerate(Wc):
620
620
  W[i] = np.copy(w)
621
- RealOutput = np.argmax(TestLabels[inpIndex])
622
- PredictedOutput = np.argmax(NeuralLayer)
621
+ RealOutput = np.argmax(y_test[inpIndex])
622
+ PredictedOutput = np.argmax(neural_layer)
623
623
  if RealOutput == PredictedOutput:
624
624
  true += 1
625
- Acc = true / len(TestLabels)
625
+ acc = true / len(y_test)
626
626
  TestPredictions[inpIndex] = PredictedOutput
627
627
 
628
- if Visualize == 'y':
628
+ if visualize == 'y':
629
629
 
630
- TestLabelsVisual = np.copy(TestLabels)
631
- TestLabelsVisual = np.argmax(TestLabelsVisual, axis=1)
630
+ y_testVisual = np.copy(y_test)
631
+ y_testVisual = np.argmax(y_testVisual, axis=1)
632
632
 
633
633
  plt.figure(figsize=(12, 6))
634
- sns.kdeplot(TestLabelsVisual, label='Real Outputs', fill=True)
634
+ sns.kdeplot(y_testVisual, label='Real Outputs', fill=True)
635
635
  sns.kdeplot(TestPredictions, label='Predictions', fill=True)
636
636
  plt.legend()
637
637
  plt.xlabel('Class')
@@ -639,72 +639,72 @@ def TestPLAN(
639
639
  plt.title('Predictions and Real Outputs for Testing KDE Plot')
640
640
  plt.show()
641
641
 
642
- if inpIndex + 1 != len(TestInputs):
642
+ if inpIndex + 1 != len(x_test):
643
643
 
644
644
  plt.close('all')
645
645
 
646
- UniEndTime = time.time()
646
+ uni_end_time = time.time()
647
647
 
648
- CalculatingEst = round((UniEndTime - UniStartTime) * (len(TestInputs) - inpIndex),3)
648
+ calculating_est = round((uni_end_time - uni_start_time) * (len(x_test) - inpIndex),3)
649
649
 
650
- if CalculatingEst < 60:
651
- print('\rest......(sec):',CalculatingEst,'\n',end= "")
652
- print('\rTest Accuracy: ' ,Acc ,"\n", end="")
650
+ if calculating_est < 60:
651
+ print('\rest......(sec):',calculating_est,'\n',end= "")
652
+ print('\rTest accuracy: ' ,acc ,"\n", end="")
653
653
 
654
- elif CalculatingEst > 60 and CalculatingEst < 3600:
655
- print('\rest......(min):',CalculatingEst/60,'\n',end= "")
656
- print('\rTest Accuracy: ' ,Acc ,"\n", end="")
654
+ elif calculating_est > 60 and calculating_est < 3600:
655
+ print('\rest......(min):',calculating_est/60,'\n',end= "")
656
+ print('\rTest accuracy: ' ,acc ,"\n", end="")
657
657
 
658
- elif CalculatingEst > 3600:
659
- print('\rest......(h):',CalculatingEst/3600,'\n',end= "")
660
- print('\rTest Accuracy: ' ,Acc ,"\n", end="")
658
+ elif calculating_est > 3600:
659
+ print('\rest......(h):',calculating_est/3600,'\n',end= "")
660
+ print('\rTest accuracy: ' ,acc ,"\n", end="")
661
661
 
662
662
  EndTime = time.time()
663
663
  for i, w in enumerate(Wc):
664
664
  W[i] = np.copy(w)
665
665
 
666
- CalculatingEst = round(EndTime - StartTime,2)
666
+ calculating_est = round(EndTime - start_time,2)
667
667
 
668
668
  print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
669
669
 
670
- if CalculatingEst < 60:
671
- print('Total testing time(sec): ',CalculatingEst)
670
+ if calculating_est < 60:
671
+ print('Total testing time(sec): ',calculating_est)
672
672
 
673
- elif CalculatingEst > 60 and CalculatingEst < 3600:
674
- print('Total testing time(min): ',CalculatingEst/60)
673
+ elif calculating_est > 60 and calculating_est < 3600:
674
+ print('Total testing time(min): ',calculating_est/60)
675
675
 
676
- elif CalculatingEst > 3600:
677
- print('Total testing time(h): ',CalculatingEst/3600)
676
+ elif calculating_est > 3600:
677
+ print('Total testing time(h): ',calculating_est/3600)
678
678
 
679
- if Acc >= 0.8:
680
- print(Fore.GREEN + '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
679
+ if acc >= 0.8:
680
+ print(Fore.GREEN + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
681
681
 
682
- elif Acc < 0.8 and Acc > 0.6:
683
- print(Fore.MAGENTA + '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
682
+ elif acc < 0.8 and acc > 0.6:
683
+ print(Fore.MAGENTA + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
684
684
 
685
- elif Acc <= 0.6:
686
- print(Fore.RED+ '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
685
+ elif acc <= 0.6:
686
+ print(Fore.RED+ '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
687
687
 
688
688
  except:
689
689
 
690
- print(Fore.RED + "ERROR: Testing model parameters like 'Layers' 'MembranCounts' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: TestPLAN" + infoTestModel + Style.RESET_ALL)
690
+ print(Fore.RED + "ERROR: Testing model parameters like 'layers' 'MembranCounts' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: TestPLAN" + infoTestModel + Style.RESET_ALL)
691
691
  return 'e'
692
692
 
693
- return W,TestPredictions,Acc
694
-
695
- def SavePLAN(ModelName,
696
- ModelType,
697
- Layers,
698
- ClassCount,
699
- MembranThresholds,
700
- MembranPotentials,
701
- Normalizations,
702
- Activations,
703
- TestAcc,
704
- LogType,
705
- WeightsType,
706
- WeightsFormat,
707
- ModelPath,
693
+ return W,TestPredictions,acc
694
+
695
+ def SavePLAN(model_name,
696
+ model_type,
697
+ layers,
698
+ class_count,
699
+ membran_thresholds,
700
+ membran_potentials,
701
+ normalizations,
702
+ activations,
703
+ test_acc,
704
+ log_type,
705
+ weights_type,
706
+ weights_format,
707
+ model_path,
708
708
  W
709
709
  ):
710
710
 
@@ -712,19 +712,19 @@ def SavePLAN(ModelName,
712
712
  Function to save a deep learning model.
713
713
 
714
714
  Arguments:
715
- ModelName (str): Name of the model.
716
- ModelType (str): Type of the model.(options: PLAN)
717
- Layers (list): List containing 'fex' and 'cat' layers.
718
- ClassCount (int): Number of classes.
719
- MembranThresholds (list): List containing MEMBRAN THRESHOLDS.
720
- MembranPotentials (list): List containing MEMBRAN POTENTIALS.
715
+ model_name (str): Name of the model.
716
+ model_type (str): Type of the model.(options: PLAN)
717
+ layers (list): List containing 'fex' and 'cat' layers.
718
+ class_count (int): Number of classes.
719
+ membran_thresholds (list): List containing MEMBRAN THRESHOLDS.
720
+ membran_potentials (list): List containing MEMBRAN POTENTIALS.
721
721
  DoNormalization (str): is that normalized data ? 'y' or 'n'.
722
- Activations (list): List containing activation functions for each layer.
723
- TestAcc (float): Test accuracy of the model.
724
- LogType (str): Type of log to save (options: 'csv', 'txt', 'hdf5').
725
- WeightsType (str): Type of weights to save (options: 'txt', 'npy', 'mat').
722
+ activations (list): List containing activation functions for each layer.
723
+ test_acc (float): Test accuracy of the model.
724
+ log_type (str): Type of log to save (options: 'csv', 'txt', 'hdf5').
725
+ weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
726
726
  WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
727
- ModelPath (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
727
+ model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
728
728
  W: Weights of the model.
729
729
 
730
730
  Returns:
@@ -734,15 +734,15 @@ def SavePLAN(ModelName,
734
734
  # Operations to be performed by the function will be written here
735
735
  pass
736
736
 
737
- if LogType != 'csv' and LogType != 'txt' and LogType != 'hdf5':
737
+ if log_type != 'csv' and log_type != 'txt' and log_type != 'hdf5':
738
738
  print(Fore.RED + "ERROR109: Save Log Type (File Extension) must be 'csv' or 'txt' or 'hdf5' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
739
739
  return 'e'
740
740
 
741
- if WeightsType != 'txt' and WeightsType != 'npy' and WeightsType != 'mat':
741
+ if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
742
742
  print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
743
743
  return 'e'
744
744
 
745
- if WeightsFormat != 'd' and WeightsFormat != 'f' and WeightsFormat != 'raw':
745
+ if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
746
746
  print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
747
747
  return 'e'
748
748
 
@@ -760,100 +760,100 @@ def SavePLAN(ModelName,
760
760
  from datetime import datetime
761
761
  from scipy import io
762
762
 
763
- data = {'MODEL NAME': ModelName,
764
- 'MODEL TYPE': ModelType,
765
- 'LAYERS': Layers,
766
- 'LAYER COUNT': len(Layers),
767
- 'CLASS COUNT': ClassCount,
768
- 'MEMBRAN THRESHOLDS': MembranThresholds,
769
- 'MEMBRAN POTENTIALS': MembranPotentials,
770
- 'NORMALIZATION': Normalizations,
771
- 'ACTIVATIONS': Activations,
763
+ data = {'MODEL NAME': model_name,
764
+ 'MODEL TYPE': model_type,
765
+ 'LAYERS': layers,
766
+ 'LAYER COUNT': len(layers),
767
+ 'CLASS COUNT': class_count,
768
+ 'MEMBRAN THRESHOLDS': membran_thresholds,
769
+ 'MEMBRAN POTENTIALS': membran_potentials,
770
+ 'NORMALIZATION': normalizations,
771
+ 'ACTIVATIONS': activations,
772
772
  'NEURON COUNT': NeuronCount,
773
773
  'SYNAPSE COUNT': SynapseCount,
774
- 'TEST ACCURACY': TestAcc,
774
+ 'TEST ACCURACY': test_acc,
775
775
  'SAVE DATE': datetime.now(),
776
- 'WEIGHTS TYPE': WeightsType,
777
- 'WEIGHTS FORMAT': WeightsFormat,
778
- 'MODEL PATH': ModelPath
776
+ 'WEIGHTS TYPE': weights_type,
777
+ 'WEIGHTS FORMAT': weights_format,
778
+ 'MODEL PATH': model_path
779
779
  }
780
780
  try:
781
781
 
782
782
  df = pd.DataFrame(data)
783
783
 
784
- if LogType == 'csv':
784
+ if log_type == 'csv':
785
785
 
786
- df.to_csv(ModelPath + ModelName + '.csv', sep='\t', index=False)
786
+ df.to_csv(model_path + model_name + '.csv', sep='\t', index=False)
787
787
 
788
- elif LogType == 'txt':
788
+ elif log_type == 'txt':
789
789
 
790
- df.to_csv(ModelPath + ModelName + '.txt', sep='\t', index=False)
790
+ df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
791
791
 
792
- elif LogType == 'hdf5':
792
+ elif log_type == 'hdf5':
793
793
 
794
- df.to_hdf(ModelPath + ModelName + '.h5', key='data', mode='w')
794
+ df.to_hdf(model_path + model_name + '.h5', key='data', mode='w')
795
795
 
796
796
  except:
797
797
 
798
- print(Fore.RED + "ERROR: Model log not saved probably ModelPath incorrect. Check the log parameters from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
798
+ print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
799
799
  return 'e'
800
800
  try:
801
801
 
802
- if WeightsType == 'txt' and WeightsFormat == 'd':
802
+ if weights_type == 'txt' and weights_format == 'd':
803
803
 
804
804
  for i, w in enumerate(W):
805
- np.savetxt(ModelPath + ModelName + str(i+1) + 'w.txt' , w, fmt='%d')
805
+ np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w, fmt='%d')
806
806
 
807
- if WeightsType == 'txt' and WeightsFormat == 'f':
807
+ if weights_type == 'txt' and weights_format == 'f':
808
808
 
809
809
  for i, w in enumerate(W):
810
- np.savetxt(ModelPath + ModelName + str(i+1) + 'w.txt' , w, fmt='%f')
810
+ np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w, fmt='%f')
811
811
 
812
- if WeightsType == 'txt' and WeightsFormat == 'raw':
812
+ if weights_type == 'txt' and weights_format == 'raw':
813
813
 
814
814
  for i, w in enumerate(W):
815
- np.savetxt(ModelPath + ModelName + str(i+1) + 'w.txt' , w)
815
+ np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w)
816
816
 
817
817
 
818
818
  ###
819
819
 
820
820
 
821
- if WeightsType == 'npy' and WeightsFormat == 'd':
821
+ if weights_type == 'npy' and weights_format == 'd':
822
822
 
823
823
  for i, w in enumerate(W):
824
- np.save(ModelPath + ModelName + str(i+1) + 'w.npy', w.astype(int))
824
+ np.save(model_path + model_name + str(i+1) + 'w.npy', w.astype(int))
825
825
 
826
- if WeightsType == 'npy' and WeightsFormat == 'f':
826
+ if weights_type == 'npy' and weights_format == 'f':
827
827
 
828
828
  for i, w in enumerate(W):
829
- np.save(ModelPath + ModelName + str(i+1) + 'w.npy' , w, w.astype(float))
829
+ np.save(model_path + model_name + str(i+1) + 'w.npy' , w, w.astype(float))
830
830
 
831
- if WeightsType == 'npy' and WeightsFormat == 'raw':
831
+ if weights_type == 'npy' and weights_format == 'raw':
832
832
 
833
833
  for i, w in enumerate(W):
834
- np.save(ModelPath + ModelName + str(i+1) + 'w.npy' , w)
834
+ np.save(model_path + model_name + str(i+1) + 'w.npy' , w)
835
835
 
836
836
 
837
837
  ###
838
838
 
839
839
 
840
- if WeightsType == 'mat' and WeightsFormat == 'd':
840
+ if weights_type == 'mat' and weights_format == 'd':
841
841
 
842
842
  for i, w in enumerate(W):
843
843
  w = {'w': w.astype(int)}
844
- io.savemat(ModelPath + ModelName + str(i+1) + 'w.mat', w)
844
+ io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
845
845
 
846
- if WeightsType == 'mat' and WeightsFormat == 'f':
846
+ if weights_type == 'mat' and weights_format == 'f':
847
847
 
848
848
  for i, w in enumerate(W):
849
849
  w = {'w': w.astype(float)}
850
- io.savemat(ModelPath + ModelName + str(i+1) + 'w.mat', w)
850
+ io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
851
851
 
852
- if WeightsType == 'mat' and WeightsFormat == 'raw':
852
+ if weights_type == 'mat' and weights_format == 'raw':
853
853
 
854
854
  for i, w in enumerate(W):
855
855
  w = {'w': w}
856
- io.savemat(ModelPath + ModelName + str(i+1) + 'w.mat', w)
856
+ io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
857
857
 
858
858
  except:
859
859
 
@@ -869,20 +869,20 @@ def SavePLAN(ModelName,
869
869
  return print(message)
870
870
 
871
871
 
872
- def LoadPLAN(ModelName,
873
- ModelPath,
874
- LogType,
872
+ def LoadPLAN(model_name,
873
+ model_path,
874
+ log_type,
875
875
  ):
876
876
  infoLoadPLAN = """
877
877
  Function to load a deep learning model.
878
878
 
879
879
  Arguments:
880
- ModelName (str): Name of the model.
881
- ModelPath (str): Path where the model is saved.
882
- LogType (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
880
+ model_name (str): Name of the model.
881
+ model_path (str): Path where the model is saved.
882
+ log_type (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
883
883
 
884
884
  Returns:
885
- lists: W(list[num]), Layers, MembranThresholds, MembranPotentials, Normalization,Activations
885
+ lists: W(list[num]), layers, membran_thresholds, membran_potentials, Normalization,activations
886
886
  """
887
887
  pass
888
888
 
@@ -892,112 +892,112 @@ def LoadPLAN(ModelName,
892
892
 
893
893
  try:
894
894
 
895
- if LogType == 'csv':
896
- df = pd.read_csv(ModelPath + ModelName + '.' + LogType)
895
+ if log_type == 'csv':
896
+ df = pd.read_csv(model_path + model_name + '.' + log_type)
897
897
 
898
898
 
899
- if LogType == 'txt':
900
- df = pd.read_csv(ModelPath + ModelName + '.' + LogType, delimiter='\t')
899
+ if log_type == 'txt':
900
+ df = pd.read_csv(model_path + model_name + '.' + log_type, delimiter='\t')
901
901
 
902
902
 
903
- if LogType == 'hdf5':
904
- df = pd.read_hdf(ModelPath + ModelName + '.' + LogType)
903
+ if log_type == 'hdf5':
904
+ df = pd.read_hdf(model_path + model_name + '.' + log_type)
905
905
  except:
906
- print(Fore.RED + "ERROR: Model Path error. Accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: LoadPLAN" + infoLoadPLAN + Style.RESET_ALL)
907
-
908
- ModelName = str(df['MODEL NAME'].iloc[0])
909
- Layers = df['LAYERS'].tolist()
910
- LayerCount = int(df['LAYER COUNT'].iloc[0])
911
- ClassCount = int(df['CLASS COUNT'].iloc[0])
912
- MembranThresholds = df['MEMBRAN THRESHOLDS'].tolist()
913
- MembranPotentials = df['MEMBRAN POTENTIALS'].tolist()
914
- Normalizations = df['NORMALIZATION'].tolist()
915
- Activations = df['ACTIVATIONS'].tolist()
906
+ print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: LoadPLAN" + infoLoadPLAN + Style.RESET_ALL)
907
+
908
+ model_name = str(df['MODEL NAME'].iloc[0])
909
+ layers = df['LAYERS'].tolist()
910
+ layer_count = int(df['LAYER COUNT'].iloc[0])
911
+ class_count = int(df['CLASS COUNT'].iloc[0])
912
+ membran_thresholds = df['MEMBRAN THRESHOLDS'].tolist()
913
+ membran_potentials = df['MEMBRAN POTENTIALS'].tolist()
914
+ normalizations = df['NORMALIZATION'].tolist()
915
+ activations = df['ACTIVATIONS'].tolist()
916
916
  NeuronCount = int(df['NEURON COUNT'].iloc[0])
917
917
  SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
918
- TestAcc = int(df['TEST ACCURACY'].iloc[0])
919
- ModelType = str(df['MODEL TYPE'].iloc[0])
918
+ test_acc = int(df['TEST ACCURACY'].iloc[0])
919
+ model_type = str(df['MODEL TYPE'].iloc[0])
920
920
  WeightType = str(df['WEIGHTS TYPE'].iloc[0])
921
921
  WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
922
- ModelPath = str(df['MODEL PATH'].iloc[0])
922
+ model_path = str(df['MODEL PATH'].iloc[0])
923
923
 
924
- W = [0] * LayerCount
924
+ W = [0] * layer_count
925
925
 
926
926
  if WeightType == 'txt':
927
- for i in range(LayerCount):
928
- W[i] = np.loadtxt(ModelPath + ModelName + str(i+1) + 'w.txt')
927
+ for i in range(layer_count):
928
+ W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
929
929
  elif WeightType == 'npy':
930
- for i in range(LayerCount):
931
- W[i] = np.load(ModelPath + ModelName + str(i+1) + 'w.npy')
930
+ for i in range(layer_count):
931
+ W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
932
932
  elif WeightType == 'mat':
933
- for i in range(LayerCount):
934
- W[i] = sio.loadmat(ModelPath + ModelName + str(i+1) + 'w.mat')
933
+ for i in range(layer_count):
934
+ W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
935
935
  else:
936
936
  raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: LoadPLAN." + infoLoadPLAN + Style.RESET_ALL)
937
937
  print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
938
- return W,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations,df
938
+ return W,layers,membran_thresholds,membran_potentials,normalizations,activations,df
939
939
 
940
- def PredictFromDiscPLAN(Input,ModelName,ModelPath,LogType):
940
+ def PredictFromDiscPLAN(Input,model_name,model_path,log_type):
941
941
  infoPredictFromDİscPLAN = """
942
942
  Function to make a prediction using a divided pruning deep learning neural network (PLAN).
943
943
 
944
944
  Arguments:
945
945
  Input (list or ndarray): Input data for the model (single vector or single matrix).
946
- ModelName (str): Name of the model.
947
- ModelPath (str): Path where the model is saved.
948
- LogType (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
946
+ model_name (str): Name of the model.
947
+ model_path (str): Path where the model is saved.
948
+ log_type (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
949
949
 
950
950
  Returns:
951
951
  ndarray: Output from the model.
952
952
  """
953
- W,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations = LoadPLAN(ModelName,ModelPath,
954
- LogType)[0:6]
953
+ W,layers,membran_thresholds,membran_potentials,normalizations,activations = LoadPLAN(model_name,model_path,
954
+ log_type)[0:6]
955
955
  Wc = [0] * len(W)
956
956
  for i, w in enumerate(W):
957
957
  Wc[i] = np.copy(w)
958
958
  try:
959
- NeuralLayer = Input
960
- NeuralLayer = np.array(NeuralLayer)
961
- NeuralLayer = NeuralLayer.ravel()
962
- for index, Layer in enumerate(Layers):
959
+ neural_layer = Input
960
+ neural_layer = np.array(neural_layer)
961
+ neural_layer = neural_layer.ravel()
962
+ for index, Layer in enumerate(layers):
963
963
  if Normalization == 'y':
964
- NeuralLayer = Normalization(NeuralLayer)
965
- if Activations[index] == 'relu':
966
- NeuralLayer = Relu(NeuralLayer)
967
- elif Activations[index] == 'sigmoid':
968
- NeuralLayer = Sigmoid(NeuralLayer)
969
- elif Activations[index] == 'softmax':
970
- NeuralLayer = Softmax(NeuralLayer)
964
+ neural_layer = Normalization(neural_layer)
965
+ if activations[index] == 'relu':
966
+ neural_layer = Relu(neural_layer)
967
+ elif activations[index] == 'sigmoid':
968
+ neural_layer = Sigmoid(neural_layer)
969
+ elif activations[index] == 'softmax':
970
+ neural_layer = Softmax(neural_layer)
971
971
 
972
- if Layers[index] == 'fex':
973
- NeuralLayer,useless = Fex(NeuralLayer, W[index],
974
- MembranThresholds[index],
975
- MembranPotentials[index])
976
- if Layers[index] == 'cat':
977
- NeuralLayer,useless = Cat(NeuralLayer, W[index],
978
- MembranThresholds[index],
979
- MembranPotentials[index],
972
+ if layers[index] == 'fex':
973
+ neural_layer,useless = Fex(neural_layer, W[index],
974
+ membran_thresholds[index],
975
+ membran_potentials[index])
976
+ if layers[index] == 'cat':
977
+ neural_layer,useless = Cat(neural_layer, W[index],
978
+ membran_thresholds[index],
979
+ membran_potentials[index],
980
980
  0)
981
981
  except:
982
982
  print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: PredictFromDiscPLAN" + infoPredictFromDİscPLAN + Style.RESET_ALL)
983
983
  return 'e'
984
984
  for i, w in enumerate(Wc):
985
985
  W[i] = np.copy(w)
986
- return NeuralLayer
986
+ return neural_layer
987
987
 
988
988
 
989
- def PredictFromRamPLAN(Input,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations,W):
989
+ def PredictFromRamPLAN(Input,layers,membran_thresholds,membran_potentials,normalizations,activations,W):
990
990
  infoPredictFromRamPLAN = """
991
991
  Function to make a prediction using a pruning learning artificial neural network (PLAN)
992
992
  from weights and parameters stored in memory.
993
993
 
994
994
  Arguments:
995
995
  Input (list or ndarray): Input data for the model (single vector or single matrix).
996
- Layers (list): Number and types of layers.
997
- MembranThresholds (list): MEMBRAN THRESHOLDS.
998
- MembranPotentials (list): MEMBRAN POTENTIALS.
996
+ layers (list): Number and types of layers.
997
+ membran_thresholds (list): MEMBRAN THRESHOLDS.
998
+ membran_potentials (list): MEMBRAN POTENTIALS.
999
999
  DoNormalization (str): Whether to normalize ('y' or 'n').
1000
- Activations (list): Activation functions for each layer.
1000
+ activations (list): Activation functions for each layer.
1001
1001
  W (list of ndarrays): Weights of the model.
1002
1002
 
1003
1003
  Returns:
@@ -1008,75 +1008,119 @@ def PredictFromRamPLAN(Input,Layers,MembranThresholds,MembranPotentials,Normaliz
1008
1008
  for i, w in enumerate(W):
1009
1009
  Wc[i] = np.copy(w)
1010
1010
  try:
1011
- NeuralLayer = Input
1012
- NeuralLayer = np.array(NeuralLayer)
1013
- NeuralLayer = NeuralLayer.ravel()
1014
- for index, Layer in enumerate(Layers):
1015
- if Normalizations[index] == 'y':
1016
- NeuralLayer = Normalization(NeuralLayer)
1017
- if Activations[index] == 'relu':
1018
- NeuralLayer = Relu(NeuralLayer)
1019
- elif Activations[index] == 'sigmoid':
1020
- NeuralLayer = Sigmoid(NeuralLayer)
1021
- elif Activations[index] == 'softmax':
1022
- NeuralLayer = Softmax(NeuralLayer)
1011
+ neural_layer = Input
1012
+ neural_layer = np.array(neural_layer)
1013
+ neural_layer = neural_layer.ravel()
1014
+ for index, Layer in enumerate(layers):
1015
+ if normalizations[index] == 'y':
1016
+ neural_layer = Normalization(neural_layer)
1017
+ if activations[index] == 'relu':
1018
+ neural_layer = Relu(neural_layer)
1019
+ elif activations[index] == 'sigmoid':
1020
+ neural_layer = Sigmoid(neural_layer)
1021
+ elif activations[index] == 'softmax':
1022
+ neural_layer = Softmax(neural_layer)
1023
1023
 
1024
- if Layers[index] == 'fex':
1025
- NeuralLayer,useless = Fex(NeuralLayer, W[index],
1026
- MembranThresholds[index],
1027
- MembranPotentials[index])
1028
- if Layers[index] == 'cat':
1029
- NeuralLayer,useless = Cat(NeuralLayer, W[index],
1030
- MembranThresholds[index],
1031
- MembranPotentials[index],0)
1024
+ if layers[index] == 'fex':
1025
+ neural_layer,useless = Fex(neural_layer, W[index],
1026
+ membran_thresholds[index],
1027
+ membran_potentials[index])
1028
+ if layers[index] == 'cat':
1029
+ neural_layer,useless = Cat(neural_layer, W[index],
1030
+ membran_thresholds[index],
1031
+ membran_potentials[index],0)
1032
1032
  except:
1033
1033
  print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: PredictFromRamPLAN." + infoPredictFromRamPLAN + Style.RESET_ALL)
1034
1034
  return 'e'
1035
1035
  for i, w in enumerate(Wc):
1036
1036
  W[i] = np.copy(w)
1037
- return NeuralLayer
1037
+ return neural_layer
1038
1038
 
1039
1039
 
1040
- def AutoBalancer(TrainInputs, TrainLabels, ClassCount):
1040
+ def AutoBalancer(x_train, y_train, class_count):
1041
1041
  infoAutoBalancer = """
1042
1042
  Function to balance the training data across different classes.
1043
1043
 
1044
1044
  Arguments:
1045
- TrainInputs (list): Input data for training.
1046
- TrainLabels (list): Labels corresponding to the input data.
1047
- ClassCount (int): Number of classes.
1045
+ x_train (list): Input data for training.
1046
+ y_train (list): Labels corresponding to the input data.
1047
+ class_count (int): Number of classes.
1048
1048
 
1049
1049
  Returns:
1050
1050
  tuple: A tuple containing balanced input data and labels.
1051
1051
  """
1052
1052
  try:
1053
- ClassIndices = {i: np.where(np.array(TrainLabels)[:, i] == 1)[0] for i in range(ClassCount)}
1054
- ClassCounts = [len(ClassIndices[i]) for i in range(ClassCount)]
1053
+ ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[0] for i in range(class_count)}
1054
+ class_counts = [len(ClassIndices[i]) for i in range(class_count)]
1055
1055
 
1056
- if len(set(ClassCounts)) == 1:
1056
+ if len(set(class_counts)) == 1:
1057
1057
  print(Fore.WHITE + "INFO: All training data have already balanced. from: AutoBalancer" + Style.RESET_ALL)
1058
- return TrainInputs, TrainLabels
1058
+ return x_train, y_train
1059
1059
 
1060
- MinCount = min(ClassCounts)
1060
+ MinCount = min(class_counts)
1061
1061
 
1062
1062
  BalancedIndices = []
1063
- for i in range(ClassCount):
1063
+ for i in range(class_count):
1064
1064
  if len(ClassIndices[i]) > MinCount:
1065
1065
  SelectedIndices = np.random.choice(ClassIndices[i], MinCount, replace=False)
1066
1066
  else:
1067
1067
  SelectedIndices = ClassIndices[i]
1068
1068
  BalancedIndices.extend(SelectedIndices)
1069
1069
 
1070
- BalancedInputs = [TrainInputs[idx] for idx in BalancedIndices]
1071
- BalancedLabels = [TrainLabels[idx] for idx in BalancedIndices]
1070
+ BalancedInputs = [x_train[idx] for idx in BalancedIndices]
1071
+ BalancedLabels = [y_train[idx] for idx in BalancedIndices]
1072
1072
 
1073
- print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(TrainInputs)) + " to: " + str(len(BalancedInputs)) + ". from: AutoBalancer " + Style.RESET_ALL)
1073
+ print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)) + " to: " + str(len(BalancedInputs)) + ". from: AutoBalancer " + Style.RESET_ALL)
1074
1074
  except:
1075
1075
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoAutoBalancer)
1076
1076
  return 'e'
1077
1077
 
1078
1078
  return BalancedInputs, BalancedLabels
1079
1079
 
1080
+ def SyntheticAugmentation(x, y, class_count):
1081
+ """
1082
+ Generates synthetic examples to balance classes with fewer examples.
1083
+
1084
+ Arguments:
1085
+ x -- Input dataset (examples) - list format
1086
+ y -- Class labels (one-hot encoded) - list format
1087
+ class_count -- Number of classes
1088
+
1089
+ Returns:
1090
+ x_balanced -- Balanced input dataset (list format)
1091
+ y_balanced -- Balanced class labels (one-hot encoded, list format)
1092
+ """
1093
+ # Calculate class distribution
1094
+ class_distribution = {i: 0 for i in range(class_count)}
1095
+ for label in y:
1096
+ class_distribution[np.argmax(label)] += 1
1097
+
1098
+ max_class_count = max(class_distribution.values())
1099
+
1100
+ x_balanced = list(x)
1101
+ y_balanced = list(y)
1102
+
1103
+ for class_label in range(class_count):
1104
+ class_indices = [i for i, label in enumerate(y) if np.argmax(label) == class_label]
1105
+ num_samples = len(class_indices)
1106
+
1107
+ if num_samples < max_class_count:
1108
+ while num_samples < max_class_count:
1109
+ # Select two random examples
1110
+ random_indices = np.random.choice(class_indices, 2, replace=False)
1111
+ sample1 = x[random_indices[0]]
1112
+ sample2 = x[random_indices[1]]
1113
+
1114
+ # Generate a new synthetic example between the two selected examples
1115
+ synthetic_sample = sample1 + (np.array(sample2) - np.array(sample1)) * np.random.rand()
1116
+
1117
+ x_balanced.append(synthetic_sample.tolist())
1118
+ y_balanced.append(y[class_indices[0]]) # The new example has the same class label
1119
+
1120
+ num_samples += 1
1121
+
1122
+ return np.array(x_balanced), np.array(y_balanced)
1123
+
1080
1124
 
1081
1125
  def GetWeights():
1082
1126
 
@@ -1090,6 +1134,6 @@ def GetPreds():
1090
1134
 
1091
1135
  return 1
1092
1136
 
1093
- def GetAcc():
1137
+ def Getacc():
1094
1138
 
1095
1139
  return 2