pyerualjetwork 2.7.1__py3-none-any.whl → 2.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/plan.py CHANGED
@@ -1,1619 +1,10 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  """
3
- Created on Tue Jun 18 23:32:16 2024
4
-
5
- @author: hasan
6
- """
7
-
8
- import pandas as pd
9
- import numpy as np
10
- import time
11
- from colorama import Fore, Style
12
- from typing import List, Union
13
- import math
14
- from scipy.special import expit, softmax
15
- import matplotlib.pyplot as plt
16
- import seaborn as sns
17
- from tqdm import tqdm
18
-
19
- # BUILD -----
20
-
21
-
22
- def fit(
23
- x_train: List[Union[int, float]],
24
- y_train: List[Union[int, float]], # At least two.. and one hot encoded
25
- val= None,
26
- val_count = None,
27
- activation_potentiation=None, # (float): Input activation_potentiation (optional)
28
- x_val= None,
29
- y_val= None,
30
- show_training = None,
31
- show_count= None
32
- ) -> str:
33
-
34
- infoPLAN = """
35
- Creates and configures a PLAN model.
36
-
37
- Args:
38
- x_train (list[num]): List of input data.
39
- y_train (list[num]): List of target labels. (one hot encoded)
40
- val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None (optional)
41
- val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
42
- activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
43
- x_val (list[num]): List of validation data. (optional) Default: 10% of x_train (auto_balanced)
44
- y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: 10% of y_train (auto_balanced)
45
- show_training (bool, str): True, None or'final'
46
- show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
47
- Returns:
48
- list([num]): (Weight matrices list, train_predictions list, Train_acc).
49
- error handled ?: Process status ('e')
50
- """
51
-
52
- if len(x_train) != len(y_train):
53
-
54
- print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
55
- return 'e'
56
-
57
- if val == True or val == 'final':
58
-
59
- try:
60
-
61
- if x_val == None and y_val == None:
62
-
63
- x_train, x_val, y_train, y_val = split(x_train, y_train, test_size=0.1, random_state=42)
64
-
65
- x_train, y_train = auto_balancer(x_train, y_train)
66
- x_val, y_val = auto_balancer(x_val, y_val)
67
-
68
- except:
69
- pass
70
-
71
- if val == True:
72
-
73
- if val_count == None:
74
-
75
- val_count = 0.1
76
-
77
- v_iter = 0
78
-
79
- if val == 'final':
80
-
81
- val_count = 0.99
82
-
83
- val_count = int(len(x_train) * val_count)
84
- val_count_copy = val_count
85
- val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
86
- val_list = [] * val_count
87
-
88
- if show_count == None:
89
-
90
- show_count = 10
91
-
92
- if show_training == True or show_training == 'final':
93
-
94
- row, col = shape_control(x_train)
95
-
96
- class_count = set()
97
-
98
- for sublist in y_train:
99
-
100
- class_count.add(tuple(sublist))
101
-
102
- class_count = list(class_count)
103
-
104
- y_train = [tuple(sublist) for sublist in y_train]
105
-
106
- neurons = [len(class_count), len(class_count)]
107
- layers = ['fex']
108
-
109
- x_train[0] = np.array(x_train[0])
110
- x_train[0] = x_train[0].ravel()
111
- x_train_size = len(x_train[0])
112
-
113
- STPW = weight_identification(
114
- len(layers) - 1, len(class_count), neurons, x_train_size) # STPW = SHORT TIME POTENTIATION WEIGHT
115
-
116
- LTPW = [1] * len(STPW) # LTPW = LONG TIME POTENTIATION WEIGHT
117
-
118
- y = decode_one_hot(y_train)
119
-
120
- train_progress = tqdm(total=len(x_train),leave=False, desc="Training",ncols= 120)
121
-
122
- for index, inp in enumerate(x_train):
123
-
124
- progress = index / len(x_train) * 100
125
-
126
- inp = np.array(inp)
127
- inp = inp.ravel()
128
-
129
- if x_train_size != len(inp):
130
- print(Fore.RED + "ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",
131
- infoPLAN + Style.RESET_ALL)
132
- return 'e'
133
-
134
- neural_layer = inp
135
-
136
- for Lindex, Layer in enumerate(layers):
137
-
138
- neural_layer = normalization(neural_layer)
139
-
140
- if Layer == 'fex':
141
- STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation)
142
-
143
- for i, w in enumerate(STPW):
144
- LTPW[i] = LTPW[i] + w
145
-
146
-
147
- if val == True and index == val_count:
148
-
149
-
150
- val_count += val_count_copy
151
-
152
- validation_model = evaluate(x_val, y_val, LTPW, activation_potentiation, None)
153
-
154
- val_acc = validation_model[get_acc()]
155
-
156
- val_list.append(val_acc)
157
-
158
- if v_iter == 0:
159
-
160
- val_bar.update(val_acc)
161
-
162
-
163
- if v_iter != 0:
164
-
165
- val_acc = val_acc - val_list[v_iter - 1]
166
- val_bar.update(val_acc)
167
-
168
- v_iter += 1
169
-
170
- if show_training == True:
171
-
172
- if index %show_count == 0:
173
-
174
-
175
- if index != 0:
176
- plt.close(fig)
177
-
178
- if row != 0:
179
-
180
- fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
181
-
182
- else:
183
-
184
- fig, ax = plt.subplots(1, 1, figsize=(18, 14))
185
-
186
- for j in range(len(class_count)):
187
-
188
-
189
- if row != 0:
190
-
191
- mat = LTPW[0][j,:].reshape(row, col)
192
- suptitle_info = 'Neurons Learning Progress: % '
193
- title_info = f'{j+1}. Neuron'
194
-
195
- mat = LTPW[0][j,:].reshape(row, col)
196
-
197
- ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
198
-
199
- ax[j].set_aspect('equal')
200
-
201
- ax[j].set_xticks([])
202
- ax[j].set_yticks([])
203
- ax[j].set_title(title_info)
204
-
205
- else:
206
-
207
- mat = LTPW[0]
208
- ax.imshow(mat, interpolation='sinc', cmap='viridis')
209
- suptitle_info = 'Weight Learning Progress: % '
210
- title_info = 'Weight Matrix Of Fex Layer'
211
-
212
-
213
-
214
-
215
- progress_status = f"{progress:.1f}"
216
- fig.suptitle(suptitle_info + progress_status)
217
- plt.draw()
218
- plt.pause(0.1)
219
-
220
-
221
- STPW = weight_identification(
222
- len(layers) - 1, len(class_count), neurons, x_train_size)
223
-
224
- train_progress.update(1)
225
-
226
- if show_training == 'final':
227
-
228
- fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
229
-
230
- for j in range(len(class_count)):
231
-
232
- mat = LTPW[0][j,:].reshape(row, col)
233
-
234
- ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
235
- ax[j].set_aspect('equal')
236
-
237
- ax[j].set_xticks([])
238
- ax[j].set_yticks([])
239
- ax[j].set_title(f'{j+1}. Neuron')
240
-
241
- progress_status = f"{progress:.1f}"
242
- fig.suptitle('Neurons Learning Progress: % ' + progress_status)
243
- plt.draw()
244
- plt.pause(0.1)
245
-
246
-
247
- if val == 'final':
248
-
249
- validation_model = evaluate(x_val, y_val, None, LTPW, activation_potentiation, None)
250
-
251
- val_acc = validation_model[get_acc()]
252
-
253
- val_list.append(val_acc)
254
-
255
- val_bar.update(val_acc)
256
-
257
- return LTPW
258
-
259
- # FUNCTIONS -----
260
-
261
- def shape_control(x_train):
262
-
263
- try:
264
- row = x_train[1].shape[0]
265
- col = x_train[1].shape[1]
266
-
267
- except:
268
-
269
- print(Fore.MAGENTA + 'WARNING: You trying show_training but inputs is raveled. x_train inputs should be reshaped for show_training.' + Style.RESET_ALL)
270
-
271
- try:
272
- row, col = find_numbers(len(x_train[0]))
273
-
274
- except:
275
-
276
- print(Fore.MAGENTA + 'WARNING: Input length cannot be reshaped. Neurons learning progression cannot be draw, weight learning progress drwaing started.' + Style.RESET_ALL)
277
- return [0, 0]
278
-
279
- return row, col
280
-
281
- def find_numbers(n):
282
- if n <= 1:
283
- raise ValueError("Parameter 'n' must be greater than 1.")
284
-
285
- for i in range(2, int(n**0.5) + 1):
286
- if n % i == 0:
287
- factor1 = i
288
- factor2 = n // i
289
- if factor1 == factor2:
290
- return factor1, factor2
291
-
292
- return None
293
-
294
- def weight_normalization(
295
- W,
296
- class_count
297
- ) -> str:
298
- """
299
- Row(Neuron) based normalization. For unbalanced models.
300
-
301
- Args:
302
- W (list(num)): Trained weight matrix list.
303
- class_count (int): Class count of model.
304
-
305
- Returns:
306
- list([numpy_arrays],[...]): posttrained weight matices of the model. .
307
- """
308
-
309
- for i in range(class_count):
310
-
311
- W[0][i,:] = normalization(W[0][i,:])
312
-
313
- return W
314
-
315
- def weight_identification(
316
- layer_count, # int: Number of layers in the neural network.
317
- class_count, # int: Number of classes in the classification task.
318
- neurons, # list[num]: List of neuron counts for each layer.
319
- x_train_size # int: Size of the input data.
320
- ) -> str:
321
- """
322
- Identifies the weights for a neural network model.
323
-
324
- Args:
325
- layer_count (int): Number of layers in the neural network.
326
- class_count (int): Number of classes in the classification task.
327
- neurons (list[num]): List of neuron counts for each layer.
328
- x_train_size (int): Size of the input data.
329
-
330
- Returns:
331
- list([numpy_arrays],[...]): pretrained weight matices of the model. .
332
- """
333
-
334
- Wlen = layer_count + 1
335
- W = [None] * Wlen
336
- W[0] = np.ones((neurons[0], x_train_size))
337
- ws = layer_count - 1
338
- for w in range(ws):
339
- W[w + 1] = np.ones((neurons[w + 1], neurons[w]))
340
-
341
- return W
342
-
343
-
344
- def fex(
345
- Input, # list[num]: Input data.
346
- w, # num: Weight matrix of the neural network.
347
- is_training, # bool: Flag indicating if the function is called during training (True or False).
348
- Class, # int: Which class is, if training.
349
- activation_potentiation # float or None: Input activation potentiation (optional)
350
- ) -> tuple:
351
- """
352
- Applies feature extraction process to the input data using synaptic potentiation.
353
-
354
- Args:
355
- Input (num): Input data.
356
- w (num): Weight matrix of the neural network.
357
- is_training (bool): Flag indicating if the function is called during training (True or False).
358
- Class (int): if is during training then which class(label) ? is isnt then put None.
359
- activation_potentiation (float or None): Threshold value for comparison. (optional)
360
-
361
- Returns:
362
- tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
363
- """
364
-
365
- if is_training == True and activation_potentiation == None:
366
-
367
- w[Class, :] = Input
368
-
369
- return w
370
-
371
- elif is_training == True and activation_potentiation != None:
372
-
373
-
374
- Input[Input < activation_potentiation] = 0
375
- Input[Input > activation_potentiation] = 1
376
-
377
- w[Class,:] = Input
378
-
379
- return w
380
-
381
- elif is_training == False and activation_potentiation == None:
382
-
383
- neural_layer = np.dot(w, Input)
384
-
385
- return neural_layer
386
-
387
- elif is_training == False and activation_potentiation != None:
388
-
389
- Input[Input < activation_potentiation] = 0
390
- Input[Input > activation_potentiation] = 1
391
-
392
- neural_layer = np.dot(w, Input)
393
-
394
- return neural_layer
395
-
396
-
397
-
398
- def normalization(
399
- Input # num: Input data to be normalized.
400
- ):
401
- """
402
- Normalizes the input data using maximum absolute scaling.
403
-
404
- Args:
405
- Input (num): Input data to be normalized.
406
-
407
- Returns:
408
- (num) Scaled input data after normalization.
409
- """
410
-
411
- AbsVector = np.abs(Input)
412
-
413
- MaxAbs = np.max(AbsVector)
414
-
415
- ScaledInput = Input / MaxAbs
416
-
417
- return ScaledInput
418
-
419
-
420
- def Softmax(
421
- x # num: Input data to be transformed using softmax function.
422
- ):
423
- """
424
- Applies the softmax function to the input data.
425
-
426
- Args:
427
- (num): Input data to be transformed using softmax function.
428
-
429
- Returns:
430
- (num): Transformed data after applying softmax function.
431
- """
432
-
433
- return softmax(x)
434
-
435
-
436
- def Sigmoid(
437
- x # num: Input data to be transformed using sigmoid function.
438
- ):
439
- """
440
- Applies the sigmoid function to the input data.
441
-
442
- Args:
443
- (num): Input data to be transformed using sigmoid function.
444
-
445
- Returns:
446
- (num): Transformed data after applying sigmoid function.
447
- """
448
- return expit(x)
449
-
450
-
451
- def Relu(
452
- x # num: Input data to be transformed using ReLU function.
453
- ):
454
- """
455
- Applies the Rectified Linear Unit (ReLU) function to the input data.
456
-
457
- Args:
458
- (num): Input data to be transformed using ReLU function.
459
-
460
- Returns:
461
- (num): Transformed data after applying ReLU function.
462
- """
463
-
464
- return np.maximum(0, x)
465
-
466
-
467
- def evaluate(
468
- x_test, # list[num]: Test input data.
469
- y_test, # list[num]: Test labels.
470
- W, # list[num]: Weight matrix list of the neural network.
471
- activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
472
- acc_bar_status=True, # acc_bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
473
- show_metrices=None # show_metrices (bool): (True or None) (optional) Default: None
474
- ) -> tuple:
475
- infoTestModel = """
476
- Tests the neural network model with the given test data.
477
-
478
- Args:
479
- x_test (list[num]): Test input data.
480
- y_test (list[num]): Test labels.
481
- W (list[num]): Weight matrix list of the neural network.
482
- activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
483
- acc_bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
484
- show_metrices (bool): (True or None) (optional) Default: None
485
-
486
- Returns:
487
- tuple: A tuple containing the predicted labels and the accuracy of the model.
488
- """
489
-
490
- layers = ['fex']
491
-
492
- try:
493
- Wc = [0] * len(W) # Wc = Weight copy
494
- true = 0
495
- y_preds = [-1] * len(y_test)
496
- acc_list = []
497
-
498
- for i, w in enumerate(W):
499
- Wc[i] = np.copy(w)
500
-
501
-
502
- if acc_bar_status == True:
503
-
504
- test_progress = tqdm(total=len(x_test),leave=False, desc='Testing',ncols=120)
505
- acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=120)
506
-
507
-
508
- for inpIndex, Input in enumerate(x_test):
509
- Input = np.array(Input)
510
- Input = Input.ravel()
511
- neural_layer = Input
512
-
513
- for index, Layer in enumerate(layers):
514
-
515
- neural_layer = normalization(neural_layer)
516
-
517
- if Layer == 'fex':
518
- neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
519
-
520
-
521
- for i, w in enumerate(Wc):
522
- W[i] = np.copy(w)
523
- RealOutput = np.argmax(y_test[inpIndex])
524
- PredictedOutput = np.argmax(neural_layer)
525
- if RealOutput == PredictedOutput:
526
- true += 1
527
- acc = true / len(y_test)
528
-
529
-
530
- acc_list.append(acc)
531
- y_preds[inpIndex] = PredictedOutput
532
-
533
- if acc_bar_status == True:
534
- test_progress.update(1)
535
- if inpIndex == 0:
536
- acc_bar.update(acc)
537
-
538
- else:
539
- acc = acc - acc_list[inpIndex - 1]
540
- acc_bar.update(acc)
541
-
542
- if show_metrices == True:
543
- plot_evaluate(y_test, y_preds, acc_list)
544
-
545
-
546
- for i, w in enumerate(Wc):
547
- W[i] = np.copy(w)
548
-
549
- except:
550
-
551
- print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" +
552
- infoTestModel + Style.RESET_ALL)
553
- return 'e'
554
-
555
- return W, y_preds, acc
556
-
557
-
558
- def multiple_evaluate(
559
- x_test, # list[num]: Test input data.
560
- y_test, # list[num]: Test labels.
561
- show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
562
- MW, # list[list[num]]: Weight matrix of the neural network.
563
- activation_potentiation=None # (float or None): Threshold value for comparison. (optional)
564
- ) -> tuple:
565
- infoTestModel = """
566
- Tests the neural network model with the given test data.
567
-
568
- Args:
569
- x_test (list[num]): Test input data.
570
- y_test (list[num]): Test labels.
571
- show_metrices (bool): (True or False)
572
- MW (list(list[num])): Multiple Weight matrix list of the neural network. (Multiple model testing)
573
-
574
- Returns:
575
- tuple: A tuple containing the predicted labels and the accuracy of the model.
576
- """
577
-
578
- layers = ['fex', 'cat']
579
-
580
- try:
581
- y_preds = [-1] * len(y_test)
582
- acc_list = []
583
- print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
584
- start_time = time.time()
585
- true = 0
586
- for inpIndex, Input in enumerate(x_test):
587
-
588
- output_layer = 0
589
-
590
- for m, Model in enumerate(MW):
591
-
592
- W = Model
593
-
594
- Wc = [0] * len(W) # Wc = weight copy
595
-
596
- y_preds = [None] * len(y_test)
597
- for i, w in enumerate(W):
598
- Wc[i] = np.copy(w)
599
-
600
- Input = np.array(Input)
601
- Input = Input.ravel()
602
- uni_start_time = time.time()
603
- neural_layer = Input
604
-
605
- for index, Layer in enumerate(layers):
606
-
607
- neural_layer = normalization(neural_layer)
608
-
609
- if Layer == 'fex':
610
- neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
611
-
612
- output_layer += neural_layer
613
-
614
- for i, w in enumerate(Wc):
615
- W[i] = np.copy(w)
616
- for i, w in enumerate(Wc):
617
- W[i] = np.copy(w)
618
- RealOutput = np.argmax(y_test[inpIndex])
619
- PredictedOutput = np.argmax(output_layer)
620
- if RealOutput == PredictedOutput:
621
- true += 1
622
- acc = true / len(y_test)
623
- if show_metrices == True:
624
- acc_list.append(acc)
625
- y_preds[inpIndex] = PredictedOutput
626
-
627
-
628
- uni_end_time = time.time()
629
-
630
- calculating_est = round(
631
- (uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
632
-
633
- if calculating_est < 60:
634
- print('\rest......(sec):', calculating_est, '\n', end="")
635
- print('\rTest accuracy: ', acc, "\n", end="")
636
-
637
- elif calculating_est > 60 and calculating_est < 3600:
638
- print('\rest......(min):', calculating_est/60, '\n', end="")
639
- print('\rTest accuracy: ', acc, "\n", end="")
640
-
641
- elif calculating_est > 3600:
642
- print('\rest......(h):', calculating_est/3600, '\n', end="")
643
- print('\rTest accuracy: ', acc, "\n", end="")
644
- if show_metrices == True:
645
- plot_evaluate(y_test, y_preds, acc_list)
646
-
647
- EndTime = time.time()
648
- for i, w in enumerate(Wc):
649
- W[i] = np.copy(w)
650
-
651
- calculating_est = round(EndTime - start_time, 2)
652
-
653
- print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
654
-
655
- if calculating_est < 60:
656
- print('Total testing time(sec): ', calculating_est)
657
-
658
- elif calculating_est > 60 and calculating_est < 3600:
659
- print('Total testing time(min): ', calculating_est/60)
660
-
661
- elif calculating_est > 3600:
662
- print('Total testing time(h): ', calculating_est/3600)
663
-
664
- if acc >= 0.8:
665
- print(Fore.GREEN + '\nTotal Test accuracy: ',
666
- acc, '\n' + Style.RESET_ALL)
667
-
668
- elif acc < 0.8 and acc > 0.6:
669
- print(Fore.MAGENTA + '\nTotal Test accuracy: ',
670
- acc, '\n' + Style.RESET_ALL)
671
-
672
- elif acc <= 0.6:
673
- print(Fore.RED + '\nTotal Test accuracy: ',
674
- acc, '\n' + Style.RESET_ALL)
675
-
676
- except:
677
-
678
- print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
679
- return 'e'
680
-
681
- return W, y_preds, acc
682
-
683
-
684
- def save_model(model_name,
685
- model_type,
686
- class_count,
687
- test_acc,
688
- weights_type,
689
- weights_format,
690
- model_path,
691
- scaler_params,
692
- W,
693
- activation_potentiation=None
694
- ):
695
-
696
- infosave_model = """
697
- Function to save a potentiation learning model.
698
-
699
- Arguments:
700
- model_name (str): Name of the model.
701
- model_type (str): Type of the model.(options: PLAN)
702
- class_count (int): Number of classes.
703
- test_acc (float): Test accuracy of the model.
704
- weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
705
- WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
706
- model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
707
- scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
708
- W: Weights of the model.
709
- activation_potentiation (float or None): Threshold value for comparison. (optional)
710
-
711
- Returns:
712
- str: Message indicating if the model was saved successfully or encountered an error.
713
- """
714
-
715
- # Operations to be performed by the function will be written here
716
- pass
717
-
718
- layers = ['fex']
719
-
720
- if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
721
- print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
722
- infosave_model + Style.RESET_ALL)
723
- return 'e'
724
-
725
- if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
726
- print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" +
727
- infosave_model + Style.RESET_ALL)
728
- return 'e'
729
-
730
- NeuronCount = 0
731
- SynapseCount = 0
732
-
733
- try:
734
- for w in W:
735
- NeuronCount += np.shape(w)[0]
736
- SynapseCount += np.shape(w)[0] * np.shape(w)[1]
737
- except:
738
-
739
- print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
740
- infosave_model + Style.RESET_ALL)
741
- return 'e'
742
- import pandas as pd
743
- from datetime import datetime
744
- from scipy import io
745
-
746
- data = {'MODEL NAME': model_name,
747
- 'MODEL TYPE': model_type,
748
- 'LAYERS': layers,
749
- 'LAYER COUNT': len(layers),
750
- 'CLASS COUNT': class_count,
751
- 'NEURON COUNT': NeuronCount,
752
- 'SYNAPSE COUNT': SynapseCount,
753
- 'TEST ACCURACY': test_acc,
754
- 'SAVE DATE': datetime.now(),
755
- 'WEIGHTS TYPE': weights_type,
756
- 'WEIGHTS FORMAT': weights_format,
757
- 'MODEL PATH': model_path,
758
- 'STANDARD SCALER': scaler_params,
759
- 'ACTIVATION POTENTIATION': activation_potentiation
760
- }
761
- try:
762
-
763
- df = pd.DataFrame(data)
764
-
765
- df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
766
-
767
- except:
768
-
769
- print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
770
- infosave_model + Style.RESET_ALL)
771
- return 'e'
772
- try:
773
-
774
- if weights_type == 'txt' and weights_format == 'd':
775
-
776
- for i, w in enumerate(W):
777
- np.savetxt(model_path + model_name +
778
- str(i+1) + 'w.txt', w, fmt='%d')
779
-
780
- if weights_type == 'txt' and weights_format == 'f':
781
-
782
- for i, w in enumerate(W):
783
- np.savetxt(model_path + model_name +
784
- str(i+1) + 'w.txt', w, fmt='%f')
785
-
786
- if weights_type == 'txt' and weights_format == 'raw':
787
-
788
- for i, w in enumerate(W):
789
- np.savetxt(model_path + model_name + str(i+1) + 'w.txt', w)
790
-
791
- ###
792
-
793
- if weights_type == 'npy' and weights_format == 'd':
794
-
795
- for i, w in enumerate(W):
796
- np.save(model_path + model_name +
797
- str(i+1) + 'w.npy', w.astype(int))
798
-
799
- if weights_type == 'npy' and weights_format == 'f':
800
-
801
- for i, w in enumerate(W):
802
- np.save(model_path + model_name + str(i+1) +
803
- 'w.npy', w, w.astype(float))
804
-
805
- if weights_type == 'npy' and weights_format == 'raw':
806
-
807
- for i, w in enumerate(W):
808
- np.save(model_path + model_name + str(i+1) + 'w.npy', w)
809
-
810
- ###
811
-
812
- if weights_type == 'mat' and weights_format == 'd':
813
-
814
- for i, w in enumerate(W):
815
- w = {'w': w.astype(int)}
816
- io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
817
-
818
- if weights_type == 'mat' and weights_format == 'f':
819
-
820
- for i, w in enumerate(W):
821
- w = {'w': w.astype(float)}
822
- io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
823
-
824
- if weights_type == 'mat' and weights_format == 'raw':
825
-
826
- for i, w in enumerate(W):
827
- w = {'w': w}
828
- io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
829
-
830
- except:
831
-
832
- print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
833
- return 'e'
834
- print(df)
835
- message = (
836
- Fore.GREEN + "Model Saved Successfully\n" +
837
- Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
838
- Style.RESET_ALL
839
- )
840
-
841
- return print(message)
842
-
843
-
844
- def load_model(model_name,
845
- model_path,
846
- ):
847
- infoload_model = """
848
- Function to load a potentiation learning model.
849
-
850
- Arguments:
851
- model_name (str): Name of the model.
852
- model_path (str): Path where the model is saved.
853
-
854
- Returns:
855
- lists: W(list[num]), activation_potentiation, DataFrame of the model
856
- """
857
- pass
858
-
859
- import scipy.io as sio
860
-
861
- try:
862
-
863
- df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
864
-
865
- except:
866
-
867
- print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" +
868
- infoload_model + Style.RESET_ALL)
869
-
870
- model_name = str(df['MODEL NAME'].iloc[0])
871
- layer_count = int(df['LAYER COUNT'].iloc[0])
872
- WeightType = str(df['WEIGHTS TYPE'].iloc[0])
873
-
874
- W = [0] * layer_count
875
-
876
- if WeightType == 'txt':
877
- for i in range(layer_count):
878
- W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
879
- elif WeightType == 'npy':
880
- for i in range(layer_count):
881
- W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
882
- elif WeightType == 'mat':
883
- for i in range(layer_count):
884
- W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
885
- else:
886
- raise ValueError(
887
- Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
888
- print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
889
- return W, df
890
-
891
-
892
- def predict_model_ssd(Input, model_name, model_path):
893
-
894
- infopredict_model_ssd = """
895
- Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
896
-
897
- Arguments:
898
- Input (list or ndarray): Input data for the model (single vector or single matrix).
899
- model_name (str): Name of the model.
900
- Returns:
901
- ndarray: Output from the model.
902
- """
903
- W, df = load_model(model_name, model_path)
904
-
905
- activation_potentiation = str(df['ACTIVATION POTENTIATION'].iloc[0])
906
-
907
- if activation_potentiation != 'nan':
908
-
909
- activation_potentiation = float(activation_potentiation)
910
-
911
- else:
912
-
913
- activation_potentiation = None
914
-
915
- try:
916
-
917
- scaler_params = df['STANDARD SCALER'].tolist()
918
-
919
-
920
- scaler_params = [np.fromstring(arr.strip('[]'), sep=' ') for arr in scaler_params]
921
-
922
- Input = standard_scaler(None, Input, scaler_params)
923
-
924
- except:
925
-
926
- pass
927
-
928
-
929
- layers = ['fex']
930
-
931
- Wc = [0] * len(W)
932
- for i, w in enumerate(W):
933
- Wc[i] = np.copy(w)
934
- try:
935
- neural_layer = Input
936
- neural_layer = np.array(neural_layer)
937
- neural_layer = neural_layer.ravel()
938
- for index, Layer in enumerate(layers):
939
-
940
- neural_layer = normalization(neural_layer)
941
-
942
- if Layer == 'fex':
943
- neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
944
- elif Layer == 'cat':
945
- neural_layer = np.dot(W[index], neural_layer)
946
- except:
947
- print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
948
- infopredict_model_ssd + Style.RESET_ALL)
949
- return 'e'
950
- for i, w in enumerate(Wc):
951
- W[i] = np.copy(w)
952
- return neural_layer
953
-
954
-
955
- def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=None):
956
-
957
- infopredict_model_ram = """
958
- Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
959
- from weights and parameters stored in memory.
960
-
961
- Arguments:
962
- Input (list or ndarray): Input data for the model (single vector or single matrix).
963
- W (list of ndarrays): Weights of the model.
964
- scaler_params (int, float): standard scaler params list: mean,std. (optional) Default: None.
965
- activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
966
-
967
- Returns:
968
- ndarray: Output from the model.
969
- """
970
- try:
971
- if scaler_params != None:
972
-
973
- Input = standard_scaler(None, Input, scaler_params)
974
- except:
975
- Input = standard_scaler(None, Input, scaler_params)
976
-
977
- layers = ['fex']
978
-
979
- Wc = [0] * len(W)
980
- for i, w in enumerate(W):
981
- Wc[i] = np.copy(w)
982
- try:
983
- neural_layer = Input
984
- neural_layer = np.array(neural_layer)
985
- neural_layer = neural_layer.ravel()
986
- for index, Layer in enumerate(layers):
987
-
988
- neural_layer = normalization(neural_layer)
989
-
990
- if Layer == 'fex':
991
- neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
992
- elif Layer == 'cat':
993
- neural_layer = np.dot(W[index], neural_layer)
994
-
995
- except:
996
- print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
997
- infopredict_model_ram + Style.RESET_ALL)
998
- return 'e'
999
- for i, w in enumerate(Wc):
1000
- W[i] = np.copy(w)
1001
- return neural_layer
1002
-
1003
-
1004
- def auto_balancer(x_train, y_train):
1005
-
1006
- infoauto_balancer = """
1007
- Function to balance the training data across different classes.
1008
-
1009
- Arguments:
1010
- x_train (list): Input data for training.
1011
- y_train (list): Labels corresponding to the input data.
1012
-
1013
- Returns:
1014
- tuple: A tuple containing balanced input data and labels.
1015
- """
1016
- classes = np.arange(y_train.shape[1])
1017
- class_count = len(classes)
1018
-
1019
- try:
1020
- ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[
1021
- 0] for i in range(class_count)}
1022
- classes = [len(ClassIndices[i]) for i in range(class_count)]
1023
-
1024
- if len(set(classes)) == 1:
1025
- print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
1026
- return x_train, y_train
1027
-
1028
- MinCount = min(classes)
1029
-
1030
- BalancedIndices = []
1031
- for i in tqdm(range(class_count),leave=False,desc='Balancing Data',ncols=120):
1032
- if len(ClassIndices[i]) > MinCount:
1033
- SelectedIndices = np.random.choice(
1034
- ClassIndices[i], MinCount, replace=False)
1035
- else:
1036
- SelectedIndices = ClassIndices[i]
1037
- BalancedIndices.extend(SelectedIndices)
1038
-
1039
- BalancedInputs = [x_train[idx] for idx in BalancedIndices]
1040
- BalancedLabels = [y_train[idx] for idx in BalancedIndices]
1041
-
1042
- print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)
1043
- ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
1044
- except:
1045
- print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
1046
- return 'e'
1047
-
1048
- return np.array(BalancedInputs), np.array(BalancedLabels)
1049
-
1050
-
1051
- def synthetic_augmentation(x_train, y_train):
1052
- """
1053
- Generates synthetic examples to balance classes with fewer examples.
1054
-
1055
- Arguments:
1056
- x -- Input dataset (examples) - list format
1057
- y -- Class labels (one-hot encoded) - list format
1058
-
1059
- Returns:
1060
- x_balanced -- Balanced input dataset (list format)
1061
- y_balanced -- Balanced class labels (one-hot encoded, list format)
1062
- """
1063
- x = x_train
1064
- y = y_train
1065
- classes = np.arange(y_train.shape[1])
1066
- class_count = len(classes)
1067
-
1068
- # Calculate class distribution
1069
- class_distribution = {i: 0 for i in range(class_count)}
1070
- for label in y:
1071
- class_distribution[np.argmax(label)] += 1
1072
-
1073
- max_class_count = max(class_distribution.values())
1074
-
1075
- x_balanced = list(x)
1076
- y_balanced = list(y)
1077
-
1078
- for class_label in tqdm(range(class_count), leave=False, desc='Augmenting Data',ncols= 120):
1079
- class_indices = [i for i, label in enumerate(
1080
- y) if np.argmax(label) == class_label]
1081
- num_samples = len(class_indices)
1082
-
1083
- if num_samples < max_class_count:
1084
- while num_samples < max_class_count:
1085
-
1086
- random_indices = np.random.choice(
1087
- class_indices, 2, replace=False)
1088
- sample1 = x[random_indices[0]]
1089
- sample2 = x[random_indices[1]]
1090
-
1091
- synthetic_sample = sample1 + \
1092
- (np.array(sample2) - np.array(sample1)) * np.random.rand()
1093
-
1094
- x_balanced.append(synthetic_sample.tolist())
1095
- y_balanced.append(y[class_indices[0]])
1096
-
1097
- num_samples += 1
1098
-
1099
- return np.array(x_balanced), np.array(y_balanced)
1100
-
1101
-
1102
- def standard_scaler(x_train, x_test, scaler_params=None):
1103
- info_standard_scaler = """
1104
- Standardizes training and test datasets. x_test may be None.
1105
-
1106
- Args:
1107
- train_data: numpy.ndarray
1108
- Training data
1109
- test_data: numpy.ndarray
1110
- Test data
1111
-
1112
- Returns:
1113
- list:
1114
- Scaler parameters: mean and std
1115
- tuple
1116
- Standardized training and test datasets
1117
- """
1118
- try:
1119
-
1120
- x_train = x_train.tolist()
1121
- x_test = x_test.tolist()
1122
-
1123
- except:
1124
-
1125
- pass
1126
-
1127
- try:
1128
-
1129
- if scaler_params == None and x_test != None:
1130
-
1131
- mean = np.mean(x_train, axis=0)
1132
- std = np.std(x_train, axis=0)
1133
- train_data_scaled = (x_train - mean) / std
1134
- test_data_scaled = (x_test - mean) / std
1135
-
1136
- train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
1137
- test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
1138
-
1139
- scaler_params = [mean, std]
1140
-
1141
- return scaler_params, train_data_scaled, test_data_scaled
1142
-
1143
- if scaler_params == None and x_test == None:
1144
-
1145
- mean = np.mean(x_train, axis=0)
1146
- std = np.std(x_train, axis=0)
1147
- train_data_scaled = (x_train - mean) / std
1148
-
1149
- train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
1150
-
1151
- scaler_params = [mean, std]
1152
-
1153
- return scaler_params, train_data_scaled
1154
-
1155
- if scaler_params != None:
1156
-
1157
- test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
1158
- test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
1159
-
1160
- return test_data_scaled
1161
-
1162
- except:
1163
- print(
1164
- Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
1165
-
1166
-
1167
- def encode_one_hot(y_train, y_test):
1168
- info_one_hot_encode = """
1169
- Performs one-hot encoding on y_train and y_test data..
1170
-
1171
- Args:
1172
- y_train (numpy.ndarray): Eğitim etiketi verisi.
1173
- y_test (numpy.ndarray): Test etiketi verisi.
1174
-
1175
- Returns:
1176
- tuple: One-hot encoded y_train ve y_test verileri.
1177
- """
1178
- try:
1179
- classes = np.unique(y_train)
1180
- class_count = len(classes)
1181
-
1182
- class_to_index = {cls: idx for idx, cls in enumerate(classes)}
1183
-
1184
- y_train_encoded = np.zeros((y_train.shape[0], class_count))
1185
- for i, label in enumerate(y_train):
1186
- y_train_encoded[i, class_to_index[label]] = 1
1187
-
1188
- y_test_encoded = np.zeros((y_test.shape[0], class_count))
1189
- for i, label in enumerate(y_test):
1190
- y_test_encoded[i, class_to_index[label]] = 1
1191
- except:
1192
- print(Fore.RED + 'ERROR: y_train and y_test must be numpy array. from: one_hot_encode' + info_one_hot_encode)
1193
-
1194
- return y_train_encoded, y_test_encoded
1195
-
1196
-
1197
- def split(X, y, test_size, random_state):
1198
- """
1199
- Splits the given X (features) and y (labels) data into training and testing subsets.
1200
-
1201
- Args:
1202
- X (numpy.ndarray): Features data.
1203
- y (numpy.ndarray): Labels data.
1204
- test_size (float or int): Proportion or number of samples for the test subset.
1205
- random_state (int or None): Seed for random state.
1206
-
1207
- Returns:
1208
- tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
1209
- """
1210
- num_samples = X.shape[0]
1211
-
1212
- if isinstance(test_size, float):
1213
- test_size = int(test_size * num_samples)
1214
- elif isinstance(test_size, int):
1215
- if test_size > num_samples:
1216
- raise ValueError(
1217
- "test_size cannot be larger than the number of samples.")
1218
- else:
1219
- raise ValueError("test_size should be float or int.")
1220
-
1221
- if random_state is not None:
1222
- np.random.seed(random_state)
1223
-
1224
- indices = np.arange(num_samples)
1225
- np.random.shuffle(indices)
1226
-
1227
- test_indices = indices[:test_size]
1228
- train_indices = indices[test_size:]
1229
-
1230
- x_train, x_test = X[train_indices], X[test_indices]
1231
- y_train, y_test = y[train_indices], y[test_indices]
1232
-
1233
- return x_train, x_test, y_train, y_test
1234
-
1235
-
1236
- def metrics(y_ts, test_preds, average='weighted'):
1237
- """
1238
- Calculates precision, recall and F1 score for a classification task.
1239
-
1240
- Args:
1241
- y_ts (list or numpy.ndarray): True labels.
1242
- test_preds (list or numpy.ndarray): Predicted labels.
1243
- average (str): Type of averaging ('micro', 'macro', 'weighted').
1244
-
1245
- Returns:
1246
- tuple: Precision, recall, F1 score.
1247
- """
1248
- y_test_d = decode_one_hot(y_ts)
1249
- y_test_d = np.array(y_test_d)
1250
- y_pred = np.array(test_preds)
1251
-
1252
- if y_test_d.ndim > 1:
1253
- y_test_d = y_test_d.reshape(-1)
1254
- if y_pred.ndim > 1:
1255
- y_pred = y_pred.reshape(-1)
1256
-
1257
- tp = {}
1258
- fp = {}
1259
- fn = {}
1260
-
1261
- classes = np.unique(np.concatenate((y_test_d, y_pred)))
1262
-
1263
- for c in classes:
1264
- tp[c] = 0
1265
- fp[c] = 0
1266
- fn[c] = 0
1267
-
1268
- for c in classes:
1269
- for true, pred in zip(y_test_d, y_pred):
1270
- if true == c and pred == c:
1271
- tp[c] += 1
1272
- elif true != c and pred == c:
1273
- fp[c] += 1
1274
- elif true == c and pred != c:
1275
- fn[c] += 1
1276
-
1277
- precision = {}
1278
- recall = {}
1279
- f1 = {}
1280
-
1281
- for c in classes:
1282
- precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
1283
- recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
1284
- f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
1285
-
1286
- if average == 'micro':
1287
- precision_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fp.values()))) if (np.sum(list(tp.values())) + np.sum(list(fp.values()))) > 0 else 0
1288
- recall_val = np.sum(list(tp.values())) / (np.sum(list(tp.values())) + np.sum(list(fn.values()))) if (np.sum(list(tp.values())) + np.sum(list(fn.values()))) > 0 else 0
1289
- f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
1290
-
1291
- elif average == 'macro':
1292
- precision_val = np.mean(list(precision.values()))
1293
- recall_val = np.mean(list(recall.values()))
1294
- f1_val = np.mean(list(f1.values()))
1295
-
1296
- elif average == 'weighted':
1297
- weights = np.array([np.sum(y_test_d == c) for c in classes])
1298
- weights = weights / np.sum(weights)
1299
- precision_val = np.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
1300
- recall_val = np.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
1301
- f1_val = np.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
1302
-
1303
- else:
1304
- raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
1305
-
1306
- return precision_val, recall_val, f1_val
1307
-
1308
-
1309
- def decode_one_hot(encoded_data):
1310
- """
1311
- Decodes one-hot encoded data to original categorical labels.
1312
-
1313
- Args:
1314
- encoded_data (numpy.ndarray): One-hot encoded data with shape (n_samples, n_classes).
1315
-
1316
- Returns:
1317
- numpy.ndarray: Decoded categorical labels with shape (n_samples,).
1318
- """
1319
-
1320
- decoded_labels = np.argmax(encoded_data, axis=1)
1321
-
1322
- return decoded_labels
1323
-
1324
-
1325
- def roc_curve(y_true, y_score):
1326
- """
1327
- Compute Receiver Operating Characteristic (ROC) curve.
1328
-
1329
- Parameters:
1330
- y_true : array, shape = [n_samples]
1331
- True binary labels in range {0, 1} or {-1, 1}.
1332
- y_score : array, shape = [n_samples]
1333
- Target scores, can either be probability estimates of the positive class,
1334
- confidence values, or non-thresholded measure of decisions (as returned
1335
- by decision_function on some classifiers).
1336
-
1337
- Returns:
1338
- fpr : array, shape = [n]
1339
- Increasing false positive rates such that element i is the false positive rate
1340
- of predictions with score >= thresholds[i].
1341
- tpr : array, shape = [n]
1342
- Increasing true positive rates such that element i is the true positive rate
1343
- of predictions with score >= thresholds[i].
1344
- thresholds : array, shape = [n]
1345
- Decreasing thresholds on the decision function used to compute fpr and tpr.
1346
- """
1347
-
1348
- y_true = np.asarray(y_true)
1349
- y_score = np.asarray(y_score)
1350
-
1351
- if len(np.unique(y_true)) != 2:
1352
- raise ValueError("Only binary classification is supported.")
1353
-
1354
-
1355
- desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
1356
- y_score = y_score[desc_score_indices]
1357
- y_true = y_true[desc_score_indices]
1358
-
1359
-
1360
- fpr = []
1361
- tpr = []
1362
- thresholds = []
1363
- n_pos = np.sum(y_true)
1364
- n_neg = len(y_true) - n_pos
1365
-
1366
- tp = 0
1367
- fp = 0
1368
- prev_score = None
1369
-
1370
-
1371
- for i, score in enumerate(y_score):
1372
- if score != prev_score:
1373
- fpr.append(fp / n_neg)
1374
- tpr.append(tp / n_pos)
1375
- thresholds.append(score)
1376
- prev_score = score
1377
-
1378
- if y_true[i] == 1:
1379
- tp += 1
1380
- else:
1381
- fp += 1
1382
-
1383
- fpr.append(fp / n_neg)
1384
- tpr.append(tp / n_pos)
1385
- thresholds.append(score)
1386
-
1387
- return np.array(fpr), np.array(tpr), np.array(thresholds)
1388
-
1389
-
1390
- def confusion_matrix(y_true, y_pred, class_count):
1391
- """
1392
- Computes confusion matrix.
1393
-
1394
- Args:
1395
- y_true (numpy.ndarray): True class labels (1D array).
1396
- y_pred (numpy.ndarray): Predicted class labels (1D array).
1397
- num_classes (int): Number of classes.
1398
-
1399
- Returns:
1400
- numpy.ndarray: Confusion matrix of shape (num_classes, num_classes).
1401
- """
1402
- confusion = np.zeros((class_count, class_count), dtype=int)
1403
-
1404
- for i in range(len(y_true)):
1405
- true_label = y_true[i]
1406
- pred_label = y_pred[i]
1407
- confusion[true_label, pred_label] += 1
1408
-
1409
- return confusion
1410
-
1411
-
1412
- def plot_evaluate(y_test, y_preds, acc_list):
1413
-
1414
- acc = acc_list[len(acc_list) - 1]
1415
- y_true = decode_one_hot(y_test)
1416
-
1417
- y_true = np.array(y_true)
1418
- y_preds = np.array(y_preds)
1419
- Class = np.unique(decode_one_hot(y_test))
1420
-
1421
- precision, recall, f1 = metrics(y_test, y_preds)
1422
-
1423
-
1424
- # Confusion matrix
1425
- cm = confusion_matrix(y_true, y_preds, len(Class))
1426
- fig, axs = plt.subplots(2, 2, figsize=(16, 12))
1427
-
1428
- # Confusion Matrix
1429
- sns.heatmap(cm, annot=True, fmt='d', ax=axs[0, 0])
1430
- axs[0, 0].set_title("Confusion Matrix")
1431
- axs[0, 0].set_xlabel("Predicted Class")
1432
- axs[0, 0].set_ylabel("Actual Class")
1433
-
1434
- if len(Class) == 2:
1435
- fpr, tpr, thresholds = roc_curve(y_true, y_preds)
1436
- # ROC Curve
1437
- roc_auc = np.trapz(tpr, fpr)
1438
- axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
1439
- axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
1440
- axs[1, 0].set_xlim([0.0, 1.0])
1441
- axs[1, 0].set_ylim([0.0, 1.05])
1442
- axs[1, 0].set_xlabel('False Positive Rate')
1443
- axs[1, 0].set_ylabel('True Positive Rate')
1444
- axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
1445
- axs[1, 0].legend(loc="lower right")
1446
- axs[1, 0].legend(loc="lower right")
1447
- else:
1448
-
1449
- for i in range(len(Class)):
1450
-
1451
- y_true_copy = np.copy(y_true)
1452
- y_preds_copy = np.copy(y_preds)
1453
-
1454
- y_true_copy[y_true_copy == i] = 0
1455
- y_true_copy[y_true_copy != 0] = 1
1456
-
1457
- y_preds_copy[y_preds_copy == i] = 0
1458
- y_preds_copy[y_preds_copy != 0] = 1
1459
-
3
+ Created on Fri Jun 21 05:21:35 2024
1460
4
 
1461
- fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
1462
-
1463
- roc_auc = np.trapz(tpr, fpr)
1464
- axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
1465
- axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
1466
- axs[1, 0].set_xlim([0.0, 1.0])
1467
- axs[1, 0].set_ylim([0.0, 1.05])
1468
- axs[1, 0].set_xlabel('False Positive Rate')
1469
- axs[1, 0].set_ylabel('True Positive Rate')
1470
- axs[1, 0].set_title('Receiver Operating Characteristic (ROC) Curve')
1471
- axs[1, 0].legend(loc="lower right")
1472
- axs[1, 0].legend(loc="lower right")
1473
-
1474
-
1475
- """
1476
- accuracy_per_class = []
1477
-
1478
- for cls in Class:
1479
- correct = np.sum((y_true == cls) & (y_preds == cls))
1480
- total = np.sum(y_true == cls)
1481
- accuracy_cls = correct / total if total > 0 else 0.0
1482
- accuracy_per_class.append(accuracy_cls)
1483
-
1484
- axs[2, 0].bar(Class, accuracy_per_class, color='b', alpha=0.7)
1485
- axs[2, 0].set_xlabel('Class')
1486
- axs[2, 0].set_ylabel('Accuracy')
1487
- axs[2, 0].set_title('Class-wise Accuracy')
1488
- axs[2, 0].set_xticks(Class)
1489
- axs[2, 0].grid(True)
5
+ @author: hasan
1490
6
  """
1491
7
 
1492
-
1493
-
1494
-
1495
- # Precision, Recall, F1 Score, Accuracy
1496
- metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
1497
- values = [precision, recall, f1, acc]
1498
- colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
1499
-
1500
- #
1501
- bars = axs[0, 1].bar(metric, values, color=colors)
1502
-
1503
-
1504
- for bar, value in zip(bars, values):
1505
- axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
1506
- ha='center', va='bottom', fontsize=12, color='white', weight='bold')
1507
-
1508
- axs[0, 1].set_ylim(0, 1) # Y eksenini 0 ile 1 arasında sınırla
1509
- axs[0, 1].set_xlabel('Metrics')
1510
- axs[0, 1].set_ylabel('Score')
1511
- axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Weighted)')
1512
- axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
1513
-
1514
- # Accuracy
1515
- plt.plot(acc_list, marker='o', linestyle='-',
1516
- color='r', label='Accuracy')
1517
-
1518
-
1519
- plt.axhline(y=1, color='g', linestyle='--', label='Maximum Accuracy')
1520
-
1521
-
1522
- plt.xlabel('Samples')
1523
- plt.ylabel('Accuracy')
1524
- plt.title('Accuracy History')
1525
- plt.legend()
1526
-
1527
-
1528
- plt.tight_layout()
1529
- plt.show()
1530
-
1531
- def manuel_balancer(x_train, y_train, target_samples_per_class):
1532
- """
1533
- Generates synthetic examples to balance classes to the specified number of examples per class.
1534
-
1535
- Arguments:
1536
- x_train -- Input dataset (examples) - NumPy array format
1537
- y_train -- Class labels (one-hot encoded) - NumPy array format
1538
- target_samples_per_class -- Desired number of samples per class
1539
-
1540
- Returns:
1541
- x_balanced -- Balanced input dataset (NumPy array format)
1542
- y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
1543
- """
1544
- try:
1545
- x_train = np.array(x_train)
1546
- y_train = np.array(y_train)
1547
- except:
1548
- print(Fore.GREEN + "x_tarin and y_train already numpyarray." + Style.RESET_ALL)
1549
- pass
1550
- classes = np.arange(y_train.shape[1])
1551
- class_count = len(classes)
1552
-
1553
- x_balanced = []
1554
- y_balanced = []
1555
-
1556
- for class_label in tqdm(range(class_count),leave=False, desc='Augmenting Data',ncols= 120):
1557
- class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
1558
- num_samples = len(class_indices)
1559
-
1560
- if num_samples > target_samples_per_class:
1561
-
1562
- selected_indices = np.random.choice(class_indices, target_samples_per_class, replace=False)
1563
- x_balanced.append(x_train[selected_indices])
1564
- y_balanced.append(y_train[selected_indices])
1565
-
1566
- else:
1567
-
1568
- x_balanced.append(x_train[class_indices])
1569
- y_balanced.append(y_train[class_indices])
1570
-
1571
- if num_samples < target_samples_per_class:
1572
-
1573
- samples_to_add = target_samples_per_class - num_samples
1574
- additional_samples = np.zeros((samples_to_add, x_train.shape[1]))
1575
- additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
1576
-
1577
- for i in range(samples_to_add):
1578
-
1579
- random_indices = np.random.choice(class_indices, 2, replace=False)
1580
- sample1 = x_train[random_indices[0]]
1581
- sample2 = x_train[random_indices[1]]
1582
-
1583
-
1584
- synthetic_sample = sample1 + (sample2 - sample1) * np.random.rand()
1585
-
1586
- additional_samples[i] = synthetic_sample
1587
- additional_labels[i] = y_train[class_indices[0]]
1588
-
1589
-
1590
- x_balanced.append(additional_samples)
1591
- y_balanced.append(additional_labels)
1592
-
1593
- x_balanced = np.vstack(x_balanced)
1594
- y_balanced = np.vstack(y_balanced)
1595
-
1596
- return x_balanced, y_balanced
1597
-
1598
- def get_weights():
1599
-
1600
- return 0
1601
-
1602
-
1603
- def get_df():
1604
-
1605
- return 2
1606
-
1607
-
1608
- def get_preds():
1609
-
1610
- return 1
1611
-
1612
-
1613
- def get_acc():
1614
-
1615
- return 2
1616
-
1617
8
  # -*- coding: utf-8 -*-
1618
9
  """
1619
10
  Created on Tue Jun 18 23:32:16 2024
@@ -1656,8 +47,8 @@ def fit(
1656
47
  val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None (optional)
1657
48
  val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
1658
49
  activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
1659
- x_val (list[num]): List of validation data. (optional) Default: 10% of x_train (auto_balanced)
1660
- y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: 10% of y_train (auto_balanced)
50
+ x_val (list[num]): List of validation data. (optional) Default: 1% of x_train (auto_balanced) it means every %1 of train progress starts validation
51
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: 1% of y_train (auto_balanced) it means every %1 of train progress starts validation
1661
52
  show_training (bool, str): True, None or'final'
1662
53
  show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
1663
54
  Returns:
@@ -1676,21 +67,19 @@ def fit(
1676
67
 
1677
68
  if x_val == None and y_val == None:
1678
69
 
1679
- x_train, x_val, y_train, y_val = split(x_train, y_train,test_size=0.1,random_state=42)
70
+ x_train, x_val, y_train, y_val = split(x_train, y_train, test_size=0.1, random_state=42)
1680
71
 
1681
72
  x_train, y_train = auto_balancer(x_train, y_train)
1682
73
  x_val, y_val = auto_balancer(x_val, y_val)
1683
74
 
1684
- y_train, y_val = encode_one_hot(y_train, y_val)
1685
-
1686
75
  except:
1687
- pass
76
+ pass
1688
77
 
1689
78
  if val == True:
1690
79
 
1691
80
  if val_count == None:
1692
81
 
1693
- val_count = 0.1
82
+ val_count = 0.01
1694
83
 
1695
84
  v_iter = 0
1696
85
 
@@ -1864,7 +253,7 @@ def fit(
1864
253
 
1865
254
  if val == 'final':
1866
255
 
1867
- validation_model = evaluate(x_val, y_val, None, LTPW, activation_potentiation, None)
256
+ validation_model = evaluate(x_val, y_val, LTPW, activation_potentiation, bar_status=None, show_metrices=None)
1868
257
 
1869
258
  val_acc = validation_model[get_acc()]
1870
259
 
@@ -2087,7 +476,7 @@ def evaluate(
2087
476
  y_test, # list[num]: Test labels.
2088
477
  W, # list[num]: Weight matrix list of the neural network.
2089
478
  activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
2090
- acc_bar_status=True, # acc_bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
479
+ bar_status=True, # bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
2091
480
  show_metrices=None # show_metrices (bool): (True or None) (optional) Default: None
2092
481
  ) -> tuple:
2093
482
  infoTestModel = """
@@ -2098,7 +487,7 @@ def evaluate(
2098
487
  y_test (list[num]): Test labels.
2099
488
  W (list[num]): Weight matrix list of the neural network.
2100
489
  activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
2101
- acc_bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
490
+ bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
2102
491
  show_metrices (bool): (True or None) (optional) Default: None
2103
492
 
2104
493
  Returns:
@@ -2117,7 +506,7 @@ def evaluate(
2117
506
  Wc[i] = np.copy(w)
2118
507
 
2119
508
 
2120
- if acc_bar_status == True:
509
+ if bar_status == True:
2121
510
 
2122
511
  test_progress = tqdm(total=len(x_test),leave=False, desc='Testing',ncols=120)
2123
512
  acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=120)
@@ -2148,7 +537,7 @@ def evaluate(
2148
537
  acc_list.append(acc)
2149
538
  y_preds[inpIndex] = PredictedOutput
2150
539
 
2151
- if acc_bar_status == True:
540
+ if bar_status == True:
2152
541
  test_progress.update(1)
2153
542
  if inpIndex == 0:
2154
543
  acc_bar.update(acc)
@@ -2793,7 +1182,6 @@ def encode_one_hot(y_train, y_test):
2793
1182
  Returns:
2794
1183
  tuple: One-hot encoded y_train ve y_test verileri.
2795
1184
  """
2796
-
2797
1185
  classes = np.unique(y_train)
2798
1186
  class_count = len(classes)
2799
1187
 
@@ -2807,7 +1195,6 @@ def encode_one_hot(y_train, y_test):
2807
1195
  for i, label in enumerate(y_test):
2808
1196
  y_test_encoded[i, class_to_index[label]] = 1
2809
1197
 
2810
-
2811
1198
  return y_train_encoded, y_test_encoded
2812
1199
 
2813
1200