pyerualjetwork 2.5.8__py3-none-any.whl → 2.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/plan.py CHANGED
@@ -1,4 +1,3 @@
1
-
2
1
  # -*- coding: utf-8 -*-
3
2
  """
4
3
  Created on Tue Jun 18 23:32:16 2024
@@ -6,34 +5,15 @@ Created on Tue Jun 18 23:32:16 2024
6
5
  @author: hasan
7
6
  """
8
7
 
9
- # optic
10
-
11
- # -*- coding: utf-8 -*-
12
- """
13
- Created on Fri Jun 21 05:21:35 2024
14
-
15
- @author: hasan
16
- """
17
-
18
- # -*- coding: utf-8 -*-
19
-
20
-
21
-
22
- """
23
- Created on Thu Jun 12 00:00:00 2024
24
-
25
- @author: hasan can beydili
26
- """
27
-
28
8
  import pandas as pd
29
9
  import numpy as np
30
10
  import time
31
11
  from colorama import Fore, Style
32
12
  from typing import List, Union
33
- import math
34
13
  from scipy.special import expit, softmax
35
14
  import matplotlib.pyplot as plt
36
15
  import seaborn as sns
16
+ from tqdm import tqdm
37
17
 
38
18
  # BUILD -----
39
19
 
@@ -41,13 +21,13 @@ import seaborn as sns
41
21
  def fit(
42
22
  x_train: List[Union[int, float]],
43
23
  y_train: List[Union[int, float]], # At least two.. and one hot encoded
44
- show_training,
45
- show_count= None,
46
- val_count= None,
47
24
  val= None,
25
+ val_count = None,
26
+ activation_potentiation=None, # (float): Input activation_potentiation (optional)
48
27
  x_val= None,
49
28
  y_val= None,
50
- activation_potential=None # (float): Input activation potential (optional)
29
+ show_training = None,
30
+ show_count= None
51
31
  ) -> str:
52
32
 
53
33
  infoPLAN = """
@@ -56,40 +36,55 @@ def fit(
56
36
  Args:
57
37
  x_train (list[num]): List of input data.
58
38
  y_train (list[num]): List of target labels. (one hot encoded)
39
+ val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None (optional)
40
+ val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
41
+ activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
42
+ x_val (list[num]): List of validation data. (optional) Default: 1% of x_train (auto_balanced) it means every %1 of train progress starts validation
43
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: 1% of y_train (auto_balanced) it means every %1 of train progress starts validation
59
44
  show_training (bool, str): True, None or'final'
60
45
  show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
61
- val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
62
- x_val (list[num]): List of validation data. (optional) Default: x_train
63
- y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
64
- activation_potential (float): Input activation potential (for binary injection) (optional) in range: -1, 1
65
46
  Returns:
66
47
  list([num]): (Weight matrices list, train_predictions list, Train_acc).
67
48
  error handled ?: Process status ('e')
68
49
  """
69
-
70
50
 
71
51
  if len(x_train) != len(y_train):
72
52
 
73
- print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
53
+ print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
74
54
  return 'e'
75
-
76
- if x_val == None and y_val == None:
77
55
 
78
- x_val = x_train
79
- y_val = y_train
56
+ if val == True or val == 'final':
57
+
58
+ try:
80
59
 
81
- if val == True and val_count == None:
60
+ if x_val == None and y_val == None:
82
61
 
83
- val_count = 0.1
84
- val_count_copy = val_count
62
+ x_train, x_val, y_train, y_val = split(x_train, y_train, test_size=0.1, random_state=42)
63
+
64
+ x_train, y_train = auto_balancer(x_train, y_train)
65
+ x_val, y_val = auto_balancer(x_val, y_val)
85
66
 
86
- if val == True:
67
+ except:
68
+ pass
69
+
70
+ if val == True:
71
+
72
+ if val_count == None:
73
+
74
+ val_count = 0.01
75
+
76
+ v_iter = 0
77
+
78
+ if val == 'final':
79
+
80
+ val_count = 0.99
87
81
 
88
82
  val_count = int(len(x_train) * val_count)
89
-
90
83
  val_count_copy = val_count
84
+ val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
85
+ val_list = [] * val_count
91
86
 
92
- if show_training == True or show_training == 'final' and show_count == None:
87
+ if show_count == None:
93
88
 
94
89
  show_count = 10
95
90
 
@@ -109,25 +104,24 @@ def fit(
109
104
 
110
105
  neurons = [len(class_count), len(class_count)]
111
106
  layers = ['fex']
112
- val_list = [None]
113
107
 
114
108
  x_train[0] = np.array(x_train[0])
115
109
  x_train[0] = x_train[0].ravel()
116
110
  x_train_size = len(x_train[0])
117
111
 
118
- W = weight_identification(
119
- len(layers) - 1, len(class_count), neurons, x_train_size)
112
+ STPW = weight_identification(
113
+ len(layers) - 1, len(class_count), neurons, x_train_size) # STPW = SHORT TIME POTENTIATION WEIGHT
114
+
115
+ LTPW = [0] * len(STPW) # LTPW = LONG TIME POTENTIATION WEIGHT
120
116
 
121
- trained_W = [1] * len(W)
122
- print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
123
- start_time = time.time()
124
117
  y = decode_one_hot(y_train)
125
118
 
119
+ train_progress = tqdm(total=len(x_train),leave=False, desc="Training",ncols= 120)
120
+
126
121
  for index, inp in enumerate(x_train):
127
122
 
128
123
  progress = index / len(x_train) * 100
129
124
 
130
- uni_start_time = time.time()
131
125
  inp = np.array(inp)
132
126
  inp = inp.ravel()
133
127
 
@@ -140,44 +134,35 @@ def fit(
140
134
 
141
135
  for Lindex, Layer in enumerate(layers):
142
136
 
143
- neural_layer = normalization(neural_layer)
144
-
145
137
  if Layer == 'fex':
146
- W[Lindex] = fex(neural_layer, W[Lindex], True, y[index], activation_potential)
138
+ STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation)
147
139
 
148
- for i, w in enumerate(W):
149
- trained_W[i] = trained_W[i] + w
140
+ for i, w in enumerate(STPW):
141
+ LTPW[i] = LTPW[i] + w
150
142
 
151
143
 
152
- if val == True:
153
-
154
- if index == val_count:
155
-
156
- val_count += val_count_copy
157
-
158
- layers.append('cat')
159
- trained_W.append(np.eye(len(class_count)))
160
-
161
- validation_model = evaluate(x_val, y_val, None, trained_W)
162
-
163
- layers.pop()
164
- trained_W.pop()
144
+ if val == True and index == val_count:
145
+
165
146
 
166
- val_acc = validation_model[get_acc()]
147
+ val_count += val_count_copy
167
148
 
168
- val_list.append(val_acc)
149
+ validation_model = evaluate(x_val, y_val, LTPW, activation_potentiation, None)
150
+
151
+ val_acc = validation_model[get_acc()]
169
152
 
153
+ val_list.append(val_acc)
154
+
155
+ if v_iter == 0:
170
156
 
171
- plt.plot(val_list, linestyle='-',
172
- color='r')
157
+ val_bar.update(val_acc)
173
158
 
174
- progress_status = f"{progress:.1f}"
175
- plt.title('Validation accuracy graph. Amount of data learned by the model: % ' + progress_status)
176
- plt.xlabel('Learning Progress')
177
- plt.ylabel('Accuracy')
178
- plt.ylim(0, 1)
179
- plt.draw()
180
- plt.pause(0.1)
159
+
160
+ if v_iter != 0:
161
+
162
+ val_acc = val_acc - val_list[v_iter - 1]
163
+ val_bar.update(val_acc)
164
+
165
+ v_iter += 1
181
166
 
182
167
  if show_training == True:
183
168
 
@@ -200,11 +185,11 @@ def fit(
200
185
 
201
186
  if row != 0:
202
187
 
203
- mat = trained_W[0][j,:].reshape(row, col)
188
+ mat = LTPW[0][j,:].reshape(row, col)
204
189
  suptitle_info = 'Neurons Learning Progress: % '
205
190
  title_info = f'{j+1}. Neuron'
206
191
 
207
- mat = trained_W[0][j,:].reshape(row, col)
192
+ mat = LTPW[0][j,:].reshape(row, col)
208
193
 
209
194
  ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
210
195
 
@@ -216,38 +201,24 @@ def fit(
216
201
 
217
202
  else:
218
203
 
219
- mat = trained_W[0]
204
+ mat = LTPW[0]
220
205
  ax.imshow(mat, interpolation='sinc', cmap='viridis')
221
206
  suptitle_info = 'Weight Learning Progress: % '
222
207
  title_info = 'Weight Matrix Of Fex Layer'
223
208
 
224
-
225
-
209
+
210
+
226
211
 
227
- progress_status = f"{progress:.1f}"
228
- fig.suptitle(suptitle_info + progress_status)
229
- plt.draw()
230
- plt.pause(0.1)
231
-
232
-
233
- W = weight_identification(
212
+ progress_status = f"{progress:.1f}"
213
+ fig.suptitle(suptitle_info + progress_status)
214
+ plt.draw()
215
+ plt.pause(0.1)
216
+
217
+
218
+ STPW = weight_identification(
234
219
  len(layers) - 1, len(class_count), neurons, x_train_size)
235
220
 
236
- uni_end_time = time.time()
237
-
238
- calculating_est = round(
239
- (uni_end_time - uni_start_time) * (len(x_train) - index), 3)
240
-
241
- if calculating_est < 60:
242
- print('\rest......(sec):', calculating_est, '\n', end="")
243
-
244
- elif calculating_est > 60 and calculating_est < 3600:
245
- print('\rest......(min):', calculating_est/60, '\n', end="")
246
-
247
- elif calculating_est > 3600:
248
- print('\rest......(h):', calculating_est/3600, '\n', end="")
249
-
250
- print('\rTraining: ', index, "/", len(x_train), "\n", end="")
221
+ train_progress.update(1)
251
222
 
252
223
  if show_training == 'final':
253
224
 
@@ -255,7 +226,7 @@ def fit(
255
226
 
256
227
  for j in range(len(class_count)):
257
228
 
258
- mat = trained_W[0][j,:].reshape(row, col)
229
+ mat = LTPW[0][j,:].reshape(row, col)
259
230
 
260
231
  ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
261
232
  ax[j].set_aspect('equal')
@@ -270,26 +241,19 @@ def fit(
270
241
  plt.pause(0.1)
271
242
 
272
243
 
273
- EndTime = time.time()
274
-
275
- calculating_est = round(EndTime - start_time, 2)
276
-
277
- print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n" + Style.RESET_ALL)
278
-
279
- if calculating_est < 60:
280
- print('Total training time(sec): ', calculating_est)
281
-
282
- elif calculating_est > 60 and calculating_est < 3600:
283
- print('Total training time(min): ', calculating_est/60)
284
-
285
- elif calculating_est > 3600:
286
- print('Total training time(h): ', calculating_est/3600)
244
+ if val == 'final':
245
+
246
+ validation_model = evaluate(x_val, y_val, LTPW, activation_potentiation, bar_status=None, show_metrices=None)
247
+
248
+ val_acc = validation_model[get_acc()]
287
249
 
288
- layers.append('cat')
289
- trained_W.append(np.eye(len(class_count)))
290
-
291
-
292
- return trained_W
250
+ val_list.append(val_acc)
251
+
252
+ val_bar.update(val_acc)
253
+
254
+ LTPW = normalization(LTPW)
255
+
256
+ return LTPW
293
257
 
294
258
  # FUNCTIONS -----
295
259
 
@@ -381,48 +345,48 @@ def fex(
381
345
  w, # num: Weight matrix of the neural network.
382
346
  is_training, # bool: Flag indicating if the function is called during training (True or False).
383
347
  Class, # int: Which class is, if training.
384
- activation_potential # float or None: Input activation potential (optional)
348
+ activation_potentiation # float or None: Input activation potentiation (optional)
385
349
  ) -> tuple:
386
350
  """
387
- Applies feature extraction process to the input data using synaptic pruning.
351
+ Applies feature extraction process to the input data using synaptic potentiation.
388
352
 
389
353
  Args:
390
354
  Input (num): Input data.
391
355
  w (num): Weight matrix of the neural network.
392
356
  is_training (bool): Flag indicating if the function is called during training (True or False).
393
357
  Class (int): if is during training then which class(label) ? is isnt then put None.
394
- activation_potential (float or None): Threshold value for comparison. (optional)
358
+ activation_potentiation (float or None): Threshold value for comparison. (optional)
395
359
 
396
360
  Returns:
397
361
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
398
362
  """
399
363
 
400
- if is_training == True and activation_potential == None:
364
+ if is_training == True and activation_potentiation == None:
401
365
 
402
366
  w[Class, :] = Input
403
367
 
404
368
  return w
405
369
 
406
- elif is_training == True and activation_potential != None:
370
+ elif is_training == True and activation_potentiation != None:
407
371
 
408
372
 
409
- Input[Input < activation_potential] = 0
410
- Input[Input > activation_potential] = 1
373
+ Input[Input < activation_potentiation] = 0
374
+ Input[Input > activation_potentiation] = 1
411
375
 
412
376
  w[Class,:] = Input
413
377
 
414
378
  return w
415
379
 
416
- elif is_training == False and activation_potential == None:
380
+ elif is_training == False and activation_potentiation == None:
417
381
 
418
382
  neural_layer = np.dot(w, Input)
419
383
 
420
384
  return neural_layer
421
385
 
422
- elif is_training == False and activation_potential != None:
386
+ elif is_training == False and activation_potentiation != None:
423
387
 
424
- Input[Input < activation_potential] = 0
425
- Input[Input > activation_potential] = 1
388
+ Input[Input < activation_potentiation] = 0
389
+ Input[Input > activation_potentiation] = 1
426
390
 
427
391
  neural_layer = np.dot(w, Input)
428
392
 
@@ -502,9 +466,10 @@ def Relu(
502
466
  def evaluate(
503
467
  x_test, # list[num]: Test input data.
504
468
  y_test, # list[num]: Test labels.
505
- show_metrices, # show_metrices (bool): (True or False)
506
469
  W, # list[num]: Weight matrix list of the neural network.
507
- activation_potential=None # activation_potential (float or None): Threshold value for comparison. (optional)
470
+ activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
471
+ bar_status=True, # bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
472
+ show_metrices=None # show_metrices (bool): (True or None) (optional) Default: None
508
473
  ) -> tuple:
509
474
  infoTestModel = """
510
475
  Tests the neural network model with the given test data.
@@ -512,17 +477,17 @@ def evaluate(
512
477
  Args:
513
478
  x_test (list[num]): Test input data.
514
479
  y_test (list[num]): Test labels.
515
- show_metrices (bool): (True or False)
516
480
  W (list[num]): Weight matrix list of the neural network.
517
- activation_potential (float or None): Threshold value for comparison. (optional)
481
+ activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
482
+ bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
483
+ show_metrices (bool): (True or None) (optional) Default: None
518
484
 
519
485
  Returns:
520
486
  tuple: A tuple containing the predicted labels and the accuracy of the model.
521
487
  """
522
-
523
- layers = ['fex', 'cat']
524
-
525
488
  try:
489
+ layers = ['fex']
490
+
526
491
  Wc = [0] * len(W) # Wc = Weight copy
527
492
  true = 0
528
493
  y_preds = [-1] * len(y_test)
@@ -530,25 +495,25 @@ def evaluate(
530
495
 
531
496
  for i, w in enumerate(W):
532
497
  Wc[i] = np.copy(w)
533
- print('\rCopying weights.....', i+1, '/', len(W), end="")
534
-
535
- print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
536
- start_time = time.time()
498
+
499
+
500
+ if bar_status == True:
501
+
502
+ test_progress = tqdm(total=len(x_test),leave=False, desc='Testing',ncols=120)
503
+ acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=120)
504
+
505
+
537
506
  for inpIndex, Input in enumerate(x_test):
538
507
  Input = np.array(Input)
539
508
  Input = Input.ravel()
540
- uni_start_time = time.time()
541
509
  neural_layer = Input
542
-
510
+
543
511
  for index, Layer in enumerate(layers):
544
-
545
- neural_layer = normalization(neural_layer)
546
-
512
+
547
513
  if Layer == 'fex':
548
- neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
549
- elif Layer == 'cat':
550
- neural_layer = np.dot(W[index], neural_layer)
551
-
514
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
515
+
516
+
552
517
  for i, w in enumerate(Wc):
553
518
  W[i] = np.copy(w)
554
519
  RealOutput = np.argmax(y_test[inpIndex])
@@ -556,62 +521,30 @@ def evaluate(
556
521
  if RealOutput == PredictedOutput:
557
522
  true += 1
558
523
  acc = true / len(y_test)
559
- if show_metrices == True:
560
- acc_list.append(acc)
524
+
525
+
526
+ acc_list.append(acc)
561
527
  y_preds[inpIndex] = PredictedOutput
562
-
563
- uni_end_time = time.time()
564
-
565
- calculating_est = round(
566
- (uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
567
-
568
- if calculating_est < 60:
569
- print('\rest......(sec):', calculating_est, '\n', end="")
570
- print('\rTest accuracy: ', acc, "\n", end="")
571
-
572
- elif calculating_est > 60 and calculating_est < 3600:
573
- print('\rest......(min):', calculating_est/60, '\n', end="")
574
- print('\rTest accuracy: ', acc, "\n", end="")
575
-
576
- elif calculating_est > 3600:
577
- print('\rest......(h):', calculating_est/3600, '\n', end="")
578
- print('\rTest accuracy: ', acc, "\n", end="")
579
- if show_metrices == True:
528
+
529
+ if bar_status == True:
530
+ test_progress.update(1)
531
+ if inpIndex == 0:
532
+ acc_bar.update(acc)
533
+
534
+ else:
535
+ acc = acc - acc_list[inpIndex - 1]
536
+ acc_bar.update(acc)
537
+
538
+ if show_metrices == True:
580
539
  plot_evaluate(y_test, y_preds, acc_list)
581
540
 
582
- EndTime = time.time()
541
+
583
542
  for i, w in enumerate(Wc):
584
543
  W[i] = np.copy(w)
585
544
 
586
- calculating_est = round(EndTime - start_time, 2)
587
-
588
- print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
589
-
590
- if calculating_est < 60:
591
- print('Total testing time(sec): ', calculating_est)
592
-
593
- elif calculating_est > 60 and calculating_est < 3600:
594
- print('Total testing time(min): ', calculating_est/60)
595
-
596
- elif calculating_est > 3600:
597
- print('Total testing time(h): ', calculating_est/3600)
598
-
599
- if acc >= 0.8:
600
- print(Fore.GREEN + '\nTotal Test accuracy: ',
601
- acc, '\n' + Style.RESET_ALL)
602
-
603
- elif acc < 0.8 and acc > 0.6:
604
- print(Fore.MAGENTA + '\nTotal Test accuracy: ',
605
- acc, '\n' + Style.RESET_ALL)
606
-
607
- elif acc <= 0.6:
608
- print(Fore.RED + '\nTotal Test accuracy: ', acc, '\n' + Style.RESET_ALL)
609
-
610
545
  except:
611
-
612
- print(Fore.RED + "ERROR: Are you sure weights are loaded ? from: evaluate" +
613
- infoTestModel + Style.RESET_ALL)
614
- return 'e'
546
+
547
+ print(Fore.RED + 'ERROR:' + infoTestModel + Style.RESET_ALL)
615
548
 
616
549
  return W, y_preds, acc
617
550
 
@@ -621,7 +554,7 @@ def multiple_evaluate(
621
554
  y_test, # list[num]: Test labels.
622
555
  show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
623
556
  MW, # list[list[num]]: Weight matrix of the neural network.
624
- activation_potential=None # (float or None): Threshold value for comparison. (optional)
557
+ activation_potentiation=None # (float or None): Threshold value for comparison. (optional)
625
558
  ) -> tuple:
626
559
  infoTestModel = """
627
560
  Tests the neural network model with the given test data.
@@ -668,9 +601,7 @@ def multiple_evaluate(
668
601
  neural_layer = normalization(neural_layer)
669
602
 
670
603
  if Layer == 'fex':
671
- neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
672
- elif Layer == 'cat':
673
- neural_layer = np.dot(W[index], neural_layer)
604
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
674
605
 
675
606
  output_layer += neural_layer
676
607
 
@@ -738,7 +669,7 @@ def multiple_evaluate(
738
669
 
739
670
  except:
740
671
 
741
- print(Fore.RED + "ERROR: Testing model parameters like 'activation_potential' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
672
+ print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
742
673
  return 'e'
743
674
 
744
675
  return W, y_preds, acc
@@ -753,11 +684,11 @@ def save_model(model_name,
753
684
  model_path,
754
685
  scaler_params,
755
686
  W,
756
- activation_potential=None
687
+ activation_potentiation=None
757
688
  ):
758
689
 
759
690
  infosave_model = """
760
- Function to save a pruning learning model.
691
+ Function to save a potentiation learning model.
761
692
 
762
693
  Arguments:
763
694
  model_name (str): Name of the model.
@@ -769,7 +700,7 @@ def save_model(model_name,
769
700
  model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
770
701
  scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
771
702
  W: Weights of the model.
772
- activation_potential (float or None): Threshold value for comparison. (optional)
703
+ activation_potentiation (float or None): Threshold value for comparison. (optional)
773
704
 
774
705
  Returns:
775
706
  str: Message indicating if the model was saved successfully or encountered an error.
@@ -778,7 +709,7 @@ def save_model(model_name,
778
709
  # Operations to be performed by the function will be written here
779
710
  pass
780
711
 
781
- layers = ['fex', 'cat']
712
+ layers = ['fex']
782
713
 
783
714
  if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
784
715
  print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
@@ -819,7 +750,7 @@ def save_model(model_name,
819
750
  'WEIGHTS FORMAT': weights_format,
820
751
  'MODEL PATH': model_path,
821
752
  'STANDARD SCALER': scaler_params,
822
- 'ACTIVATION POTENTIAL': activation_potential
753
+ 'ACTIVATION POTENTIATION': activation_potentiation
823
754
  }
824
755
  try:
825
756
 
@@ -908,14 +839,14 @@ def load_model(model_name,
908
839
  model_path,
909
840
  ):
910
841
  infoload_model = """
911
- Function to load a pruning learning model.
842
+ Function to load a potentiation learning model.
912
843
 
913
844
  Arguments:
914
845
  model_name (str): Name of the model.
915
846
  model_path (str): Path where the model is saved.
916
847
 
917
848
  Returns:
918
- lists: W(list[num]), activation_potential, DataFrame of the model
849
+ lists: W(list[num]), activation_potentiation, DataFrame of the model
919
850
  """
920
851
  pass
921
852
 
@@ -955,7 +886,7 @@ def load_model(model_name,
955
886
  def predict_model_ssd(Input, model_name, model_path):
956
887
 
957
888
  infopredict_model_ssd = """
958
- Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
889
+ Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
959
890
 
960
891
  Arguments:
961
892
  Input (list or ndarray): Input data for the model (single vector or single matrix).
@@ -965,15 +896,15 @@ def predict_model_ssd(Input, model_name, model_path):
965
896
  """
966
897
  W, df = load_model(model_name, model_path)
967
898
 
968
- activation_potential = str(df['ACTIVATION POTENTIAL'].iloc[0])
899
+ activation_potentiation = str(df['ACTIVATION POTENTIATION'].iloc[0])
969
900
 
970
- if activation_potential != 'nan':
901
+ if activation_potentiation != 'nan':
971
902
 
972
- activation_potential = float(activation_potential)
903
+ activation_potentiation = float(activation_potentiation)
973
904
 
974
905
  else:
975
906
 
976
- activation_potential = None
907
+ activation_potentiation = None
977
908
 
978
909
  try:
979
910
 
@@ -986,10 +917,10 @@ def predict_model_ssd(Input, model_name, model_path):
986
917
 
987
918
  except:
988
919
 
989
- non_scaled = True
920
+ pass
990
921
 
991
922
 
992
- layers = ['fex', 'cat']
923
+ layers = ['fex']
993
924
 
994
925
  Wc = [0] * len(W)
995
926
  for i, w in enumerate(W):
@@ -999,11 +930,9 @@ def predict_model_ssd(Input, model_name, model_path):
999
930
  neural_layer = np.array(neural_layer)
1000
931
  neural_layer = neural_layer.ravel()
1001
932
  for index, Layer in enumerate(layers):
1002
-
1003
- neural_layer = normalization(neural_layer)
1004
-
933
+
1005
934
  if Layer == 'fex':
1006
- neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
935
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
1007
936
  elif Layer == 'cat':
1008
937
  neural_layer = np.dot(W[index], neural_layer)
1009
938
  except:
@@ -1015,27 +944,29 @@ def predict_model_ssd(Input, model_name, model_path):
1015
944
  return neural_layer
1016
945
 
1017
946
 
1018
- def predict_model_ram(Input, scaler_params, W, activation_potential=None):
947
+ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=None):
1019
948
 
1020
949
  infopredict_model_ram = """
1021
- Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
950
+ Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
1022
951
  from weights and parameters stored in memory.
1023
952
 
1024
953
  Arguments:
1025
954
  Input (list or ndarray): Input data for the model (single vector or single matrix).
1026
- scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
1027
955
  W (list of ndarrays): Weights of the model.
1028
- activation_potential (float or None): Threshold value for comparison. (optional)
956
+ scaler_params (int, float): standard scaler params list: mean,std. (optional) Default: None.
957
+ activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
1029
958
 
1030
959
  Returns:
1031
960
  ndarray: Output from the model.
1032
961
  """
1033
-
1034
- if scaler_params != None:
962
+ try:
963
+ if scaler_params != None:
1035
964
 
1036
- Input = standard_scaler(None, Input, scaler_params)
965
+ Input = standard_scaler(None, Input, scaler_params)
966
+ except:
967
+ Input = standard_scaler(None, Input, scaler_params)
1037
968
 
1038
- layers = ['fex', 'cat']
969
+ layers = ['fex']
1039
970
 
1040
971
  Wc = [0] * len(W)
1041
972
  for i, w in enumerate(W):
@@ -1046,10 +977,8 @@ def predict_model_ram(Input, scaler_params, W, activation_potential=None):
1046
977
  neural_layer = neural_layer.ravel()
1047
978
  for index, Layer in enumerate(layers):
1048
979
 
1049
- neural_layer = normalization(neural_layer)
1050
-
1051
980
  if Layer == 'fex':
1052
- neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
981
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
1053
982
  elif Layer == 'cat':
1054
983
  neural_layer = np.dot(W[index], neural_layer)
1055
984
 
@@ -1089,7 +1018,7 @@ def auto_balancer(x_train, y_train):
1089
1018
  MinCount = min(classes)
1090
1019
 
1091
1020
  BalancedIndices = []
1092
- for i in range(class_count):
1021
+ for i in tqdm(range(class_count),leave=False,desc='Balancing Data',ncols=120):
1093
1022
  if len(ClassIndices[i]) > MinCount:
1094
1023
  SelectedIndices = np.random.choice(
1095
1024
  ClassIndices[i], MinCount, replace=False)
@@ -1106,7 +1035,7 @@ def auto_balancer(x_train, y_train):
1106
1035
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
1107
1036
  return 'e'
1108
1037
 
1109
- return BalancedInputs, BalancedLabels
1038
+ return np.array(BalancedInputs), np.array(BalancedLabels)
1110
1039
 
1111
1040
 
1112
1041
  def synthetic_augmentation(x_train, y_train):
@@ -1136,7 +1065,7 @@ def synthetic_augmentation(x_train, y_train):
1136
1065
  x_balanced = list(x)
1137
1066
  y_balanced = list(y)
1138
1067
 
1139
- for class_label in range(class_count):
1068
+ for class_label in tqdm(range(class_count), leave=False, desc='Augmenting Data',ncols= 120):
1140
1069
  class_indices = [i for i, label in enumerate(
1141
1070
  y) if np.argmax(label) == class_label]
1142
1071
  num_samples = len(class_indices)
@@ -1160,7 +1089,7 @@ def synthetic_augmentation(x_train, y_train):
1160
1089
  return np.array(x_balanced), np.array(y_balanced)
1161
1090
 
1162
1091
 
1163
- def standard_scaler(x_train, x_test, scaler_params=None):
1092
+ def standard_scaler(x_train, x_test=None, scaler_params=None):
1164
1093
  info_standard_scaler = """
1165
1094
  Standardizes training and test datasets. x_test may be None.
1166
1095
 
@@ -1168,7 +1097,7 @@ def standard_scaler(x_train, x_test, scaler_params=None):
1168
1097
  train_data: numpy.ndarray
1169
1098
  Training data
1170
1099
  test_data: numpy.ndarray
1171
- Test data
1100
+ Test data (optional)
1172
1101
 
1173
1102
  Returns:
1174
1103
  list:
@@ -1176,6 +1105,15 @@ def standard_scaler(x_train, x_test, scaler_params=None):
1176
1105
  tuple
1177
1106
  Standardized training and test datasets
1178
1107
  """
1108
+ try:
1109
+
1110
+ x_train = x_train.tolist()
1111
+ x_test = x_test.tolist()
1112
+
1113
+ except:
1114
+
1115
+ pass
1116
+
1179
1117
  try:
1180
1118
 
1181
1119
  if scaler_params == None and x_test != None:
@@ -1213,7 +1151,7 @@ def standard_scaler(x_train, x_test, scaler_params=None):
1213
1151
 
1214
1152
  except:
1215
1153
  print(
1216
- Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
1154
+ Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler + Style.RESET_ALL)
1217
1155
 
1218
1156
 
1219
1157
  def encode_one_hot(y_train, y_test):
@@ -1227,21 +1165,18 @@ def encode_one_hot(y_train, y_test):
1227
1165
  Returns:
1228
1166
  tuple: One-hot encoded y_train ve y_test verileri.
1229
1167
  """
1230
- try:
1231
- classes = np.unique(y_train)
1232
- class_count = len(classes)
1168
+ classes = np.unique(y_train)
1169
+ class_count = len(classes)
1233
1170
 
1234
- class_to_index = {cls: idx for idx, cls in enumerate(classes)}
1171
+ class_to_index = {cls: idx for idx, cls in enumerate(classes)}
1235
1172
 
1236
- y_train_encoded = np.zeros((y_train.shape[0], class_count))
1237
- for i, label in enumerate(y_train):
1238
- y_train_encoded[i, class_to_index[label]] = 1
1173
+ y_train_encoded = np.zeros((y_train.shape[0], class_count))
1174
+ for i, label in enumerate(y_train):
1175
+ y_train_encoded[i, class_to_index[label]] = 1
1239
1176
 
1240
- y_test_encoded = np.zeros((y_test.shape[0], class_count))
1241
- for i, label in enumerate(y_test):
1242
- y_test_encoded[i, class_to_index[label]] = 1
1243
- except:
1244
- print(Fore.RED + 'ERROR: y_train and y_test must be numpy array. from: one_hot_encode' + info_one_hot_encode)
1177
+ y_test_encoded = np.zeros((y_test.shape[0], class_count))
1178
+ for i, label in enumerate(y_test):
1179
+ y_test_encoded[i, class_to_index[label]] = 1
1245
1180
 
1246
1181
  return y_train_encoded, y_test_encoded
1247
1182
 
@@ -1259,7 +1194,6 @@ def split(X, y, test_size, random_state):
1259
1194
  Returns:
1260
1195
  tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
1261
1196
  """
1262
- # Size of the dataset
1263
1197
  num_samples = X.shape[0]
1264
1198
 
1265
1199
  if isinstance(test_size, float):
@@ -1476,7 +1410,6 @@ def plot_evaluate(y_test, y_preds, acc_list):
1476
1410
 
1477
1411
  # Confusion matrix
1478
1412
  cm = confusion_matrix(y_true, y_preds, len(Class))
1479
- # Subplot içinde düzenleme
1480
1413
  fig, axs = plt.subplots(2, 2, figsize=(16, 12))
1481
1414
 
1482
1415
  # Confusion Matrix
@@ -1595,16 +1528,19 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
1595
1528
  x_balanced -- Balanced input dataset (NumPy array format)
1596
1529
  y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
1597
1530
  """
1598
- start_time = time.time()
1599
- x_train = np.array(x_train)
1600
- y_train = np.array(y_train)
1531
+ try:
1532
+ x_train = np.array(x_train)
1533
+ y_train = np.array(y_train)
1534
+ except:
1535
+ print(Fore.GREEN + "x_tarin and y_train already numpyarray." + Style.RESET_ALL)
1536
+ pass
1601
1537
  classes = np.arange(y_train.shape[1])
1602
1538
  class_count = len(classes)
1603
1539
 
1604
1540
  x_balanced = []
1605
1541
  y_balanced = []
1606
1542
 
1607
- for class_label in range(class_count):
1543
+ for class_label in tqdm(range(class_count),leave=False, desc='Augmenting Data',ncols= 120):
1608
1544
  class_indices = np.where(np.argmax(y_train, axis=1) == class_label)[0]
1609
1545
  num_samples = len(class_indices)
1610
1546
 
@@ -1626,8 +1562,7 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
1626
1562
  additional_labels = np.zeros((samples_to_add, y_train.shape[1]))
1627
1563
 
1628
1564
  for i in range(samples_to_add):
1629
- uni_start_time = time.time()
1630
-
1565
+
1631
1566
  random_indices = np.random.choice(class_indices, 2, replace=False)
1632
1567
  sample1 = x_train[random_indices[0]]
1633
1568
  sample2 = x_train[random_indices[1]]
@@ -1638,41 +1573,10 @@ def manuel_balancer(x_train, y_train, target_samples_per_class):
1638
1573
  additional_samples[i] = synthetic_sample
1639
1574
  additional_labels[i] = y_train[class_indices[0]]
1640
1575
 
1641
- uni_end_time = time.time()
1642
- calculating_est = round(
1643
- (uni_end_time - uni_start_time) * (samples_to_add - i), 3)
1644
-
1645
- if calculating_est < 60:
1646
- print('\rest......(sec):', calculating_est, '\n', end="")
1647
-
1648
- elif calculating_est > 60 and calculating_est < 3600:
1649
- print('\rest......(min):', calculating_est/60, '\n', end="")
1650
-
1651
- elif calculating_est > 3600:
1652
- print('\rest......(h):', calculating_est/3600, '\n', end="")
1653
-
1654
- print('Augmenting: ', class_label, '/', class_count, '...', i, "/", samples_to_add, "\n", end="")
1655
-
1576
+
1656
1577
  x_balanced.append(additional_samples)
1657
1578
  y_balanced.append(additional_labels)
1658
-
1659
-
1660
- EndTime = time.time()
1661
-
1662
- calculating_est = round(EndTime - start_time, 2)
1663
-
1664
- print(Fore.GREEN + " \nBalancing Finished with 0 ERROR\n" + Style.RESET_ALL)
1665
-
1666
- if calculating_est < 60:
1667
- print('Total balancing time(sec): ', calculating_est)
1668
-
1669
- elif calculating_est > 60 and calculating_est < 3600:
1670
- print('Total balancing time(min): ', calculating_est/60)
1671
-
1672
- elif calculating_est > 3600:
1673
- print('Total balancing time(h): ', calculating_est/3600)
1674
1579
 
1675
- # Stack the balanced arrays
1676
1580
  x_balanced = np.vstack(x_balanced)
1677
1581
  y_balanced = np.vstack(y_balanced)
1678
1582
 
@@ -1695,4 +1599,4 @@ def get_preds():
1695
1599
 
1696
1600
  def get_acc():
1697
1601
 
1698
- return 2
1602
+ return 2
@@ -0,0 +1,8 @@
1
+ Metadata-Version: 2.1
2
+ Name: pyerualjetwork
3
+ Version: 2.7.8
4
+ Summary: Code improvements
5
+ Author: Hasan Can Beydili
6
+ Author-email: tchasancan@gmail.com
7
+ Keywords: model evaluation,classifcation,potentiation learning artficial neural networks
8
+
@@ -0,0 +1,6 @@
1
+ plan/__init__.py,sha256=gmaz8lnQfl18MbOQwabBUPmShajK5S99jfyY-hQe8tc,502
2
+ plan/plan.py,sha256=h1u6hk_wTjbCGkKhnAz6jidxTogmMTM2AEm7nxE9WQQ,52805
3
+ pyerualjetwork-2.7.8.dist-info/METADATA,sha256=921fYXjv_44__3jjG8aZWt_iHE3I6rbNdHEar_vIDOw,244
4
+ pyerualjetwork-2.7.8.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
5
+ pyerualjetwork-2.7.8.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
+ pyerualjetwork-2.7.8.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.38.4)
2
+ Generator: bdist_wheel (0.41.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,8 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: pyerualjetwork
3
- Version: 2.5.8
4
- Summary: New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document
5
- Author: Hasan Can Beydili
6
- Author-email: tchasancan@gmail.com
7
- Keywords: model evaluation,classifcation,pruning learning artficial neural networks
8
-
@@ -1,6 +0,0 @@
1
- plan/__init__.py,sha256=gmaz8lnQfl18MbOQwabBUPmShajK5S99jfyY-hQe8tc,502
2
- plan/plan.py,sha256=3jSj2QXjLhinm_5BH0-pVhHP6mHyrXzLKm4m5GaV6Yw,56167
3
- pyerualjetwork-2.5.8.dist-info/METADATA,sha256=ILdOu0T__HUunUBsrZcnxEjeyAzP_jl122hO-OVPR38,357
4
- pyerualjetwork-2.5.8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
- pyerualjetwork-2.5.8.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
- pyerualjetwork-2.5.8.dist-info/RECORD,,