pyerualjetwork 2.8.4__py3-none-any.whl → 3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/__init__.py CHANGED
@@ -2,4 +2,4 @@
2
2
 
3
3
  # Bu dosya, plan modülünün ana giriş noktasıdır.
4
4
 
5
- from .plan import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer, weight_normalization, plot_decision_boundry
5
+ from .plan import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer, weight_normalization, plot_decision_space, plot_decision_boundary
plan/plan.py CHANGED
@@ -15,6 +15,9 @@ import matplotlib.pyplot as plt
15
15
  import seaborn as sns
16
16
  from tqdm import tqdm
17
17
  from scipy.spatial import ConvexHull
18
+ from datetime import datetime
19
+ from scipy import io
20
+ import scipy.io as sio
18
21
 
19
22
  # BUILD -----
20
23
 
@@ -24,11 +27,12 @@ def fit(
24
27
  y_train: List[Union[int, float]], # At least two.. and one hot encoded
25
28
  val= None,
26
29
  val_count = None,
27
- activation_potentiation=None, # (float): Input activation_potentiation (optional)
30
+ activation_potentiation=[None], # (float): Input activation_potentiation (optional)
28
31
  x_val= None,
29
32
  y_val= None,
30
33
  show_training = None,
31
- show_count= None
34
+ show_count= None, # [DISABLED]
35
+ visible_layer=None # For future [DISABLED]
32
36
  ) -> str:
33
37
 
34
38
  infoPLAN = """
@@ -40,15 +44,19 @@ def fit(
40
44
  val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None (optional)
41
45
  val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
42
46
  activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
43
- x_val (list[num]): List of validation data. (optional) Default: 1% of x_train (auto_balanced) it means every %1 of train progress starts validation
44
- y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: 1% of y_train (auto_balanced) it means every %1 of train progress starts validation
47
+ x_val (list[num]): List of validation data. (optional) Default: %10 of x_train (auto_balanced) it means every %1 of train progress starts validation
48
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: %10 of y_train (auto_balanced) it means every %1 of train progress starts validation
45
49
  show_training (bool, str): True, None or'final'
46
- show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
50
+ show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional) [DISABLED]
51
+ visible_layer: For future [DISABLED]
52
+
47
53
  Returns:
48
54
  list([num]): (Weight matrices list, train_predictions list, Train_acc).
49
55
  error handled ?: Process status ('e')
50
56
  """
51
57
 
58
+ visible_layer = None
59
+
52
60
  if len(x_train) != len(y_train):
53
61
 
54
62
  print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
@@ -102,16 +110,37 @@ def fit(
102
110
  class_count = list(class_count)
103
111
 
104
112
  y_train = [tuple(sublist) for sublist in y_train]
113
+
114
+ if visible_layer == None:
105
115
 
106
- neurons = [len(class_count), len(class_count)]
107
- layers = ['fex']
116
+ layers = ['fex']
117
+ else:
118
+
119
+ layers = ['fex'] * visible_layer
108
120
 
109
121
  x_train[0] = np.array(x_train[0])
110
122
  x_train[0] = x_train[0].ravel()
111
123
  x_train_size = len(x_train[0])
124
+
125
+ if visible_layer == None:
126
+ STPW = [None]
127
+ STPW[0] = np.ones((len(class_count), x_train_size)) # STPW = SHORT TIME POTENTIATION WEIGHT
128
+
129
+ else:
130
+ if visible_layer == 1:
131
+ fex_count = visible_layer
132
+ else:
133
+ fex_count = visible_layer - 1
112
134
 
113
- STPW = weight_identification(
114
- len(layers) - 1, len(class_count), neurons, x_train_size) # STPW = SHORT TIME POTENTIATION WEIGHT
135
+ fex_neurons = [None] * fex_count
136
+
137
+ for i in range(fex_count):
138
+ fex_neurons[i] = [x_train_size]
139
+
140
+ cat_neurons = [len(class_count), x_train_size]
141
+
142
+ STPW = weight_identification(
143
+ len(layers), len(class_count), fex_neurons, cat_neurons, x_train_size) # STPW = SHORT TIME POTENTIATION WEIGHT
115
144
 
116
145
  LTPW = [0] * len(STPW) # LTPW = LONG TIME POTENTIATION WEIGHT
117
146
 
@@ -119,6 +148,8 @@ def fit(
119
148
 
120
149
  train_progress = tqdm(total=len(x_train),leave=False, desc="Training",ncols= 120)
121
150
 
151
+ max_w = len(STPW) - 1
152
+
122
153
  for index, inp in enumerate(x_train):
123
154
 
124
155
  progress = index / len(x_train) * 100
@@ -133,12 +164,14 @@ def fit(
133
164
 
134
165
  neural_layer = inp
135
166
 
136
- for Lindex, Layer in enumerate(layers):
167
+ for Lindex, Layer in enumerate(STPW):
137
168
 
138
- if Layer == 'fex':
139
- STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation)
140
-
141
- STPW = normalization(STPW)
169
+
170
+ STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation, index=Lindex, max_w=max_w)
171
+
172
+
173
+ for i in range(len(STPW)):
174
+ STPW[i] = normalization(STPW[i])
142
175
 
143
176
  for i, w in enumerate(STPW):
144
177
  LTPW[i] = LTPW[i] + w
@@ -146,85 +179,84 @@ def fit(
146
179
 
147
180
  if val == True:
148
181
 
149
- validation_model = evaluate(x_val, y_val, LTPW,bar_status=False, activation_potentiation=activation_potentiation, show_metrices=None)
150
-
151
- val_acc = validation_model[get_acc()]
152
- val_preds = validation_model[get_preds()]
153
-
154
- plt.clf()
155
-
156
- plot_decision_boundry(x_val, y_val, y_preds=val_preds, s=100, color='tab20')
182
+ try:
157
183
 
184
+ if round(progress) % 10 == 1:
158
185
 
159
- plt.pause(0.0001)
160
-
161
- val_list.append(val_acc)
162
-
163
- if v_iter == 0:
164
-
165
- val_bar.update(val_acc)
166
- pass
167
-
168
- if v_iter != 0:
169
-
170
- val_acc = val_acc - val_list[v_iter - 1]
171
- val_bar.update(val_acc)
172
-
173
- v_iter += 1
174
186
 
175
- if show_training == True:
187
+ validation_model = evaluate(x_val, y_val, LTPW ,bar_status=False, activation_potentiation=activation_potentiation, show_metrices=None)
176
188
 
177
- if index %show_count == 0:
178
-
189
+ val_acc = validation_model[get_acc()]
190
+
191
+ plot_decision_boundary(x_val, y_val, activation_potentiation, LTPW)
179
192
 
180
- if index != 0:
181
- plt.close(fig)
193
+ plt.pause(0.0001)
182
194
 
183
- if row != 0:
184
-
185
- fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
195
+ plt.clf()
196
+
197
+ val_list.append(val_acc)
198
+
199
+ if v_iter == 0:
186
200
 
187
- else:
188
-
189
- fig, ax = plt.subplots(1, 1, figsize=(18, 14))
201
+ val_bar.update(val_acc)
190
202
 
191
- for j in range(len(class_count)):
192
-
193
-
194
- if row != 0:
195
-
196
- mat = LTPW[0][j,:].reshape(row, col)
197
- suptitle_info = 'Neurons Learning Progress: % '
198
- title_info = f'{j+1}. Neuron'
203
+ if v_iter != 0:
204
+
205
+ val_acc = val_acc - val_list[v_iter - 1]
206
+ val_bar.update(val_acc)
199
207
 
200
- mat = LTPW[0][j,:].reshape(row, col)
201
-
202
- ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
208
+ v_iter += 1
209
+ except:
210
+ pass
203
211
 
204
- ax[j].set_aspect('equal')
212
+ if show_training == True:
213
+ if index == 0:
214
+ if row != 0:
215
+
216
+ ax = plt.subplots(1, len(class_count), figsize=(18, 14))
205
217
 
206
- ax[j].set_xticks([])
207
- ax[j].set_yticks([])
208
- ax[j].set_title(title_info)
218
+ if round(progress) % 2 == 1:
219
+
220
+ for j in range(len(class_count)):
209
221
 
210
- else:
211
-
212
- mat = LTPW[0]
213
- ax.imshow(mat, interpolation='sinc', cmap='viridis')
214
- suptitle_info = 'Weight Learning Progress: % '
215
- title_info = 'Weight Matrix Of Fex Layer'
216
222
 
223
+ if row != 0:
217
224
 
225
+ mat = LTPW[0][j,:].reshape(row, col)
226
+ suptitle_info = 'Neurons Learning Progress: % '
227
+ title_info = f'{j+1}. Neuron'
228
+
229
+ mat = LTPW[0][j,:].reshape(row, col)
230
+
231
+ ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
232
+
233
+ ax[j].set_aspect('equal')
234
+
235
+ ax[j].set_xticks([])
236
+ ax[j].set_yticks([])
237
+ ax[j].set_title(title_info)
238
+
239
+ else:
240
+
241
+
242
+ mat = LTPW[0]
243
+ plt.imshow(mat, interpolation='sinc', cmap='viridis')
244
+ suptitle_info = 'Weight Learning Progress: % '
245
+ title_info = 'Weight Matrix Of Fex Layer'
246
+
247
+ progress_status = f"{progress:.1f}"
248
+ plt.title(suptitle_info + progress_status)
249
+ plt.draw()
250
+ plt.pause(0.0001)
251
+ plt.clf()
218
252
 
253
+ if visible_layer == None:
254
+ STPW = [None]
255
+ STPW[0] = np.ones((len(class_count), x_train_size)) # STPW = SHORT TIME POTENTIATION WEIGHT
219
256
 
220
- progress_status = f"{progress:.1f}"
221
- fig.suptitle(suptitle_info + progress_status)
222
- plt.draw()
223
- plt.pause(0.1)
224
-
225
-
226
- STPW = weight_identification(
227
- len(layers) - 1, len(class_count), neurons, x_train_size)
257
+ else:
258
+ STPW = weight_identification(
259
+ len(layers), len(class_count), fex_neurons, cat_neurons, x_train_size)
228
260
 
229
261
  train_progress.update(1)
230
262
 
@@ -246,26 +278,23 @@ def fit(
246
278
  progress_status = f"{progress:.1f}"
247
279
  fig.suptitle('Neurons Learning Progress: % ' + progress_status)
248
280
  plt.draw()
249
- plt.pause(0.1)
250
-
281
+ plt.pause(0.0001)
251
282
 
252
283
  if val == 'final':
253
284
 
254
285
  validation_model = evaluate(x_val, y_val, LTPW,bar_status=False, activation_potentiation=activation_potentiation, show_metrices=None)
255
286
 
256
287
  val_acc = validation_model[get_acc()]
257
- val_preds = validation_model[get_preds()]
258
-
259
288
 
260
- plot_decision_boundry(x_val, y_val, y_preds=val_preds, s=100, color='tab20')
289
+ plot_decision_boundary(x_val, y_val, activation_potentiation, LTPW)
261
290
 
262
291
  val_list.append(val_acc)
263
292
 
264
293
  val_bar.update(val_acc)
265
294
 
266
-
267
- LTPW = normalization(LTPW)
268
-
295
+ for i in range(len(LTPW)):
296
+ LTPW[i] = normalization(LTPW[i])
297
+
269
298
  return LTPW
270
299
 
271
300
  # FUNCTIONS -----
@@ -327,7 +356,8 @@ def weight_normalization(
327
356
  def weight_identification(
328
357
  layer_count, # int: Number of layers in the neural network.
329
358
  class_count, # int: Number of classes in the classification task.
330
- neurons, # list[num]: List of neuron counts for each layer.
359
+ fex_neurons,
360
+ cat_neurons, # list[num]: List of neuron counts for each layer.
331
361
  x_train_size # int: Size of the input data.
332
362
  ) -> str:
333
363
  """
@@ -343,90 +373,46 @@ def weight_identification(
343
373
  list([numpy_arrays],[...]): pretrained weight matices of the model. .
344
374
  """
345
375
 
346
- Wlen = layer_count + 1
347
- W = [None] * Wlen
348
- W[0] = np.ones((neurons[0], x_train_size))
349
- ws = layer_count - 1
350
- for w in range(ws):
351
- W[w + 1] = np.ones((neurons[w + 1], neurons[w]))
352
-
353
- return W
354
-
355
-
356
- def fex(
357
- Input, # list[num]: Input data.
358
- w, # num: Weight matrix of the neural network.
359
- is_training, # bool: Flag indicating if the function is called during training (True or False).
360
- Class, # int: Which class is, if training.
361
- activation_potentiation # float or None: Input activation potentiation (optional)
362
- ) -> tuple:
363
- """
364
- Applies feature extraction process to the input data using synaptic potentiation.
376
+ W = [None] * (len(fex_neurons) + 1)
365
377
 
366
- Args:
367
- Input (num): Input data.
368
- w (num): Weight matrix of the neural network.
369
- is_training (bool): Flag indicating if the function is called during training (True or False).
370
- Class (int): if is during training then which class(label) ? is isnt then put None.
371
- activation_potentiation (float or None): Threshold value for comparison. (optional)
378
+ for i in range(len(fex_neurons)):
379
+ W[i] = np.ones((fex_neurons[i]))
372
380
 
373
- Returns:
374
- tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
375
- """
381
+ W[i + 1] = np.ones((cat_neurons[0], cat_neurons[1]))
376
382
 
377
- if is_training == True and activation_potentiation == None:
378
-
379
- w[Class, :] = Input
383
+ return W
380
384
 
381
- return w
385
+ # ACTIVATION FUNCTIONS -----
382
386
 
383
- elif is_training == True and activation_potentiation != None:
384
-
385
-
386
- Input[Input < activation_potentiation] = 0
387
- Input[Input > activation_potentiation] = 1
388
-
389
- w[Class,:] = Input
390
-
391
- return w
392
-
393
- elif is_training == False and activation_potentiation == None:
394
-
395
- neural_layer = np.dot(w, Input)
396
-
397
- return neural_layer
398
-
399
- elif is_training == False and activation_potentiation != None:
400
-
401
- Input[Input < activation_potentiation] = 0
402
- Input[Input > activation_potentiation] = 1
403
-
404
- neural_layer = np.dot(w, Input)
405
-
406
- return neural_layer
387
+ def tanh(x):
388
+ return np.tanh(x)
407
389
 
390
+ def swish(x):
391
+ return x * (1 / (1 + np.exp(-x)))
408
392
 
393
+ def circular_activation(x):
394
+ return (np.sin(x) + 1) / 2
409
395
 
410
- def normalization(
411
- Input # num: Input data to be normalized.
412
- ):
413
- """
414
- Normalizes the input data using maximum absolute scaling.
396
+ def modular_circular_activation(x, period=2*np.pi):
397
+ return np.mod(x, period) / period
415
398
 
416
- Args:
417
- Input (num): Input data to be normalized.
399
+ def tanh_circular_activation(x):
400
+ return (np.tanh(x) + 1) / 2
418
401
 
419
- Returns:
420
- (num) Scaled input data after normalization.
421
- """
402
+ def leaky_relu(x, alpha=0.01):
403
+ return np.where(x > 0, x, alpha * x)
422
404
 
423
- AbsVector = np.abs(Input)
405
+ def softplus(x):
406
+ return np.log(1 + np.exp(x))
424
407
 
425
- MaxAbs = np.max(AbsVector)
408
+ def elu(x, alpha=1.0):
409
+ return np.where(x > 0, x, alpha * (np.exp(x) - 1))
426
410
 
427
- ScaledInput = Input / MaxAbs
411
+ def gelu(x):
412
+ return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
428
413
 
429
- return ScaledInput
414
+ def selu(x, lambda_=1.0507, alpha=1.6733):
415
+ return lambda_ * np.where(x > 0, x, alpha * (np.exp(x) - 1))
430
416
 
431
417
 
432
418
  def Softmax(
@@ -476,11 +462,138 @@ def Relu(
476
462
  return np.maximum(0, x)
477
463
 
478
464
 
465
+ def fex(
466
+ Input, # list[num]: Input data.
467
+ w, # num: Weight matrix of the neural network.
468
+ is_training, # bool: Flag indicating if the function is called during training (True or False).
469
+ Class, # int: Which class is, if training.
470
+ activation_potentiation,
471
+ index,
472
+ max_w # float or None: Input activation potentiation (optional)
473
+ ) -> tuple:
474
+ """
475
+ Applies feature extraction process to the input data using synaptic potentiation.
476
+
477
+ Args:
478
+ Input (num): Input data.
479
+ w (num): Weight matrix of the neural network.
480
+ is_training (bool): Flag indicating if the function is called during training (True or False).
481
+ Class (int): if is during training then which class(label) ? is isnt then put None.
482
+ activation_potentiation (float or None): Threshold value for comparison. (optional)
483
+
484
+ Returns:
485
+ tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
486
+ """
487
+
488
+ Output = np.zeros(len(Input))
489
+
490
+ for activation in activation_potentiation:
491
+
492
+ if activation == 'sigmoid':
493
+ Output += Sigmoid(Input)
494
+
495
+ if activation == 'swish':
496
+ Output += swish(Input)
497
+
498
+ if activation == 'circular':
499
+ Output += circular_activation(Input)
500
+
501
+ if activation == 'mod_circular':
502
+ Output += modular_circular_activation(Input)
503
+
504
+ if activation == 'tanh_circular':
505
+ Output += tanh_circular_activation(Input)
506
+
507
+ if activation == 'leaky_relu':
508
+ Output += leaky_relu(Input)
509
+
510
+ if activation == 'relu':
511
+ Output += Relu(Input)
512
+
513
+ if activation == 'softplus':
514
+ Output += softplus(Input)
515
+
516
+ if activation == 'elu':
517
+ Output += elu(Input)
518
+
519
+ if activation == 'gelu':
520
+ Output += gelu(Input)
521
+
522
+ if activation == 'selu':
523
+ Output += selu(Input)
524
+
525
+ if activation == 'softmax':
526
+ Output += Softmax(Input)
527
+
528
+ if activation == 'tanh':
529
+ Output += tanh(Input)
530
+
531
+ if activation == None:
532
+ Output += Input
533
+
534
+ Input = Output
535
+
536
+ if is_training == True:
537
+
538
+ w[Class, :] = Input
539
+
540
+ return w
541
+
542
+
543
+ elif is_training == False:
544
+
545
+ neural_layer = np.dot(w, Input)
546
+
547
+ return neural_layer
548
+
549
+ elif is_training == False and max_w != 0:
550
+
551
+
552
+ if index == max_w:
553
+
554
+ neural_layer = np.dot(w, Input)
555
+ return neural_layer
556
+
557
+ else:
558
+
559
+ neural_layer = [None] * len(w)
560
+
561
+ for i in range(len(w)):
562
+
563
+ neural_layer[i] = Input[i] * w[i]
564
+
565
+ neural_layer = np.array(neural_layer)
566
+
567
+ return neural_layer
568
+
569
+
570
+ def normalization(
571
+ Input # num: Input data to be normalized.
572
+ ):
573
+ """
574
+ Normalizes the input data using maximum absolute scaling.
575
+
576
+ Args:
577
+ Input (num): Input data to be normalized.
578
+
579
+ Returns:
580
+ (num) Scaled input data after normalization.
581
+ """
582
+
583
+ AbsVector = np.abs(Input)
584
+
585
+ MaxAbs = np.max(AbsVector)
586
+
587
+ ScaledInput = Input / MaxAbs
588
+
589
+ return ScaledInput
590
+
591
+
479
592
  def evaluate(
480
593
  x_test, # list[num]: Test input data.
481
594
  y_test, # list[num]: Test labels.
482
595
  W, # list[num]: Weight matrix list of the neural network.
483
- activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
596
+ activation_potentiation=[None], # activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
484
597
  bar_status=True, # bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
485
598
  show_metrices=None # show_metrices (bool): (True or None) (optional) Default: None
486
599
  ) -> tuple:
@@ -503,14 +616,17 @@ def evaluate(
503
616
  real_classes = []
504
617
  predict_classes = []
505
618
 
619
+ layer_count = len(W)
620
+
506
621
  try:
507
- layers = ['fex']
622
+ layers = ['fex'] * layer_count
508
623
 
509
624
  Wc = [0] * len(W) # Wc = Weight copy
510
625
  true = 0
511
626
  y_preds = []
512
627
  acc_list = []
513
-
628
+ max_w = len(W) - 1
629
+
514
630
  for i, w in enumerate(W):
515
631
  Wc[i] = np.copy(w)
516
632
 
@@ -520,22 +636,23 @@ def evaluate(
520
636
  test_progress = tqdm(total=len(x_test),leave=False, desc='Testing',ncols=120)
521
637
  acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=120)
522
638
 
523
-
639
+
524
640
  for inpIndex, Input in enumerate(x_test):
525
641
  Input = np.array(Input)
526
642
  Input = Input.ravel()
527
643
  neural_layer = Input
528
644
 
529
- for index, Layer in enumerate(layers):
645
+ for index, Layer in enumerate(W):
530
646
 
531
- if Layer == 'fex':
532
- neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
647
+
648
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation, index=index, max_w=max_w)
533
649
 
534
650
 
535
651
  for i, w in enumerate(Wc):
536
652
  W[i] = np.copy(w)
537
653
 
538
654
  neural_layer = Softmax(neural_layer)
655
+
539
656
  max_value = max(neural_layer)
540
657
 
541
658
  predict_probabilitys.append(max_value)
@@ -564,12 +681,12 @@ def evaluate(
564
681
  acc_bar.update(acc)
565
682
 
566
683
  if show_metrices == True:
567
- plot_evaluate(x_test, y_test, y_preds, acc_list)
684
+ plot_evaluate(x_test, y_test, y_preds, acc_list, W=W, activation_potentiation=activation_potentiation)
568
685
 
569
686
 
570
687
  for i, w in enumerate(Wc):
571
688
  W[i] = np.copy(w)
572
-
689
+
573
690
  except:
574
691
 
575
692
  print(Fore.RED + 'ERROR:' + infoTestModel + Style.RESET_ALL)
@@ -712,7 +829,7 @@ def save_model(model_name,
712
829
  model_path,
713
830
  W,
714
831
  scaler_params=None,
715
- activation_potentiation=None
832
+ activation_potentiation=[None]
716
833
  ):
717
834
 
718
835
  infosave_model = """
@@ -752,6 +869,7 @@ def save_model(model_name,
752
869
  NeuronCount = 0
753
870
  SynapseCount = 0
754
871
 
872
+
755
873
  try:
756
874
  for w in W:
757
875
  NeuronCount += np.shape(w)[0]
@@ -761,13 +879,21 @@ def save_model(model_name,
761
879
  print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" +
762
880
  infosave_model + Style.RESET_ALL)
763
881
  return 'e'
764
- import pandas as pd
765
- from datetime import datetime
766
- from scipy import io
882
+
883
+ if scaler_params != None:
884
+
885
+ if len(scaler_params) > len(activation_potentiation):
886
+
887
+ activation_potentiation += ['']
888
+
889
+ elif len(activation_potentiation) > len(scaler_params):
890
+
891
+ for i in range(len(activation_potentiation) - len(scaler_params)):
892
+
893
+ scaler_params.append(' ')
767
894
 
768
895
  data = {'MODEL NAME': model_name,
769
896
  'MODEL TYPE': model_type,
770
- 'LAYERS': layers,
771
897
  'LAYER COUNT': len(layers),
772
898
  'CLASS COUNT': class_count,
773
899
  'NEURON COUNT': NeuronCount,
@@ -789,45 +915,42 @@ def save_model(model_name,
789
915
  except:
790
916
 
791
917
  print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" +
792
- infosave_model + Style.RESET_ALL)
918
+ infosave_model + Style.RESET_ALL)
793
919
  return 'e'
920
+
794
921
  try:
795
922
 
796
923
  if weights_type == 'txt' and weights_format == 'd':
797
924
 
798
925
  for i, w in enumerate(W):
799
- np.savetxt(model_path + model_name +
800
- str(i+1) + 'w.txt', w, fmt='%d')
926
+ np.savetxt(model_path + model_name + '_weights.txt', w, fmt='%d')
801
927
 
802
928
  if weights_type == 'txt' and weights_format == 'f':
803
929
 
804
930
  for i, w in enumerate(W):
805
- np.savetxt(model_path + model_name +
806
- str(i+1) + 'w.txt', w, fmt='%f')
931
+ np.savetxt(model_path + model_name + '_weights.txt', w, fmt='%f')
807
932
 
808
933
  if weights_type == 'txt' and weights_format == 'raw':
809
934
 
810
935
  for i, w in enumerate(W):
811
- np.savetxt(model_path + model_name + str(i+1) + 'w.txt', w)
936
+ np.savetxt(model_path + model_name + '_weights.txt', w)
812
937
 
813
938
  ###
814
939
 
815
940
  if weights_type == 'npy' and weights_format == 'd':
816
941
 
817
942
  for i, w in enumerate(W):
818
- np.save(model_path + model_name +
819
- str(i+1) + 'w.npy', w.astype(int))
943
+ np.save(model_path + model_name + '_weights.npy', w.astype(int))
820
944
 
821
945
  if weights_type == 'npy' and weights_format == 'f':
822
946
 
823
947
  for i, w in enumerate(W):
824
- np.save(model_path + model_name + str(i+1) +
825
- 'w.npy', w, w.astype(float))
948
+ np.save(model_path + model_name + '_weights.npy', w, w.astype(float))
826
949
 
827
950
  if weights_type == 'npy' and weights_format == 'raw':
828
951
 
829
952
  for i, w in enumerate(W):
830
- np.save(model_path + model_name + str(i+1) + 'w.npy', w)
953
+ np.save(model_path + model_name + '_weights.npy', w)
831
954
 
832
955
  ###
833
956
 
@@ -835,19 +958,19 @@ def save_model(model_name,
835
958
 
836
959
  for i, w in enumerate(W):
837
960
  w = {'w': w.astype(int)}
838
- io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
961
+ io.savemat(model_path + model_name + '_weights.mat', w)
839
962
 
840
963
  if weights_type == 'mat' and weights_format == 'f':
841
964
 
842
965
  for i, w in enumerate(W):
843
966
  w = {'w': w.astype(float)}
844
- io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
967
+ io.savemat(model_path + model_name + '_weights.mat', w)
845
968
 
846
969
  if weights_type == 'mat' and weights_format == 'raw':
847
970
 
848
971
  for i, w in enumerate(W):
849
972
  w = {'w': w}
850
- io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
973
+ io.savemat(model_path + model_name + '_weights.mat', w)
851
974
 
852
975
  except:
853
976
 
@@ -878,8 +1001,6 @@ def load_model(model_name,
878
1001
  """
879
1002
  pass
880
1003
 
881
- import scipy.io as sio
882
-
883
1004
  try:
884
1005
 
885
1006
  df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
@@ -897,13 +1018,13 @@ def load_model(model_name,
897
1018
 
898
1019
  if WeightType == 'txt':
899
1020
  for i in range(layer_count):
900
- W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
1021
+ W[i] = np.loadtxt(model_path + model_name + '_weights.txt')
901
1022
  elif WeightType == 'npy':
902
1023
  for i in range(layer_count):
903
- W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
1024
+ W[i] = np.load(model_path + model_name + '_weights.npy')
904
1025
  elif WeightType == 'mat':
905
1026
  for i in range(layer_count):
906
- W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
1027
+ W[i] = sio.loadmat(model_path + model_name + '_weights.mat')
907
1028
  else:
908
1029
  raise ValueError(
909
1030
  Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
@@ -924,29 +1045,21 @@ def predict_model_ssd(Input, model_name, model_path):
924
1045
  """
925
1046
  W, df = load_model(model_name, model_path)
926
1047
 
927
- activation_potentiation = str(df['ACTIVATION POTENTIATION'].iloc[0])
1048
+ activation_potentiation = list(df['ACTIVATION POTENTIATION'])
928
1049
 
929
- if activation_potentiation != 'nan':
1050
+ scaler_params = df['STANDARD SCALER'].tolist()
1051
+
1052
+ scaler_params = [item for item in scaler_params if item != ' ']
930
1053
 
931
- activation_potentiation = float(activation_potentiation)
932
-
933
- else:
934
-
935
- activation_potentiation = None
936
-
937
1054
  try:
938
-
939
- scaler_params = df['STANDARD SCALER'].tolist()
940
-
941
-
1055
+
942
1056
  scaler_params = [np.fromstring(arr.strip('[]'), sep=' ') for arr in scaler_params]
943
1057
 
944
1058
  Input = standard_scaler(None, Input, scaler_params)
945
-
1059
+
946
1060
  except:
947
1061
 
948
1062
  pass
949
-
950
1063
 
951
1064
  layers = ['fex']
952
1065
 
@@ -957,12 +1070,11 @@ def predict_model_ssd(Input, model_name, model_path):
957
1070
  neural_layer = Input
958
1071
  neural_layer = np.array(neural_layer)
959
1072
  neural_layer = neural_layer.ravel()
960
- for index, Layer in enumerate(layers):
1073
+ max_w = len(W) - 1
1074
+ for index, Layer in enumerate(W):
961
1075
 
962
- if Layer == 'fex':
963
- neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
964
- elif Layer == 'cat':
965
- neural_layer = np.dot(W[index], neural_layer)
1076
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation, index=index, max_w=max_w)
1077
+
966
1078
  except:
967
1079
  print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
968
1080
  infopredict_model_ssd + Style.RESET_ALL)
@@ -972,7 +1084,7 @@ def predict_model_ssd(Input, model_name, model_path):
972
1084
  return neural_layer
973
1085
 
974
1086
 
975
- def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=None):
1087
+ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=[None]):
976
1088
 
977
1089
  infopredict_model_ram = """
978
1090
  Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
@@ -999,25 +1111,26 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=None
999
1111
  Wc = [0] * len(W)
1000
1112
  for i, w in enumerate(W):
1001
1113
  Wc[i] = np.copy(w)
1002
- #try:
1003
- neural_layer = Input
1004
- neural_layer = np.array(neural_layer)
1005
- neural_layer = neural_layer.ravel()
1006
- for index, Layer in enumerate(layers):
1007
-
1008
- if Layer == 'fex':
1009
- neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
1010
- elif Layer == 'cat':
1011
- neural_layer = np.dot(W[index], neural_layer)
1012
-
1013
- """ except:
1014
- print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
1015
- infopredict_model_ram + Style.RESET_ALL)
1016
- return 'e'"""
1017
- for i, w in enumerate(Wc):
1018
- W[i] = np.copy(w)
1019
- return neural_layer
1114
+ try:
1115
+ neural_layer = Input
1116
+ neural_layer = np.array(neural_layer)
1117
+ neural_layer = neural_layer.ravel()
1020
1118
 
1119
+ max_w = len(W) - 1
1120
+
1121
+ for index, Layer in enumerate(W):
1122
+
1123
+
1124
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation, index=index, max_w=max_w)
1125
+
1126
+ for i, w in enumerate(Wc):
1127
+ W[i] = np.copy(w)
1128
+ return neural_layer
1129
+
1130
+ except:
1131
+ print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
1132
+ infopredict_model_ram + Style.RESET_ALL)
1133
+ return 'e'
1021
1134
 
1022
1135
  def auto_balancer(x_train, y_train):
1023
1136
 
@@ -1057,7 +1170,7 @@ def auto_balancer(x_train, y_train):
1057
1170
  BalancedInputs = [x_train[idx] for idx in BalancedIndices]
1058
1171
  BalancedLabels = [y_train[idx] for idx in BalancedIndices]
1059
1172
 
1060
- print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)
1173
+ print(Fore.GREEN + "All Data Succesfully Balanced from: " + str(len(x_train)
1061
1174
  ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
1062
1175
  except:
1063
1176
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
@@ -1147,6 +1260,7 @@ def standard_scaler(x_train, x_test=None, scaler_params=None):
1147
1260
 
1148
1261
  mean = np.mean(x_train, axis=0)
1149
1262
  std = np.std(x_train, axis=0)
1263
+
1150
1264
  train_data_scaled = (x_train - mean) / std
1151
1265
  test_data_scaled = (x_test - mean) / std
1152
1266
 
@@ -1154,7 +1268,7 @@ def standard_scaler(x_train, x_test=None, scaler_params=None):
1154
1268
  test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
1155
1269
 
1156
1270
  scaler_params = [mean, std]
1157
-
1271
+
1158
1272
  return scaler_params, train_data_scaled, test_data_scaled
1159
1273
 
1160
1274
  if scaler_params == None and x_test == None:
@@ -1170,16 +1284,25 @@ def standard_scaler(x_train, x_test=None, scaler_params=None):
1170
1284
  return scaler_params, train_data_scaled
1171
1285
 
1172
1286
  if scaler_params != None:
1173
-
1174
- test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
1175
- test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
1287
+
1288
+ try:
1289
+
1290
+ test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
1291
+ test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
1292
+
1293
+ except:
1294
+
1295
+ test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
1296
+ test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
1176
1297
 
1177
1298
  return test_data_scaled
1178
1299
 
1179
1300
  except:
1301
+
1180
1302
  print(
1181
- Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler + Style.RESET_ALL)
1182
-
1303
+ Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler + Style.RESET_ALL)
1304
+
1305
+ return('e')
1183
1306
 
1184
1307
  def encode_one_hot(y_train, y_test):
1185
1308
  info_one_hot_encode = """
@@ -1423,7 +1546,7 @@ def confusion_matrix(y_true, y_pred, class_count):
1423
1546
  return confusion
1424
1547
 
1425
1548
 
1426
- def plot_evaluate(x_test, y_test, y_preds, acc_list):
1549
+ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation):
1427
1550
 
1428
1551
 
1429
1552
  acc = acc_list[len(acc_list) - 1]
@@ -1524,51 +1647,67 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list):
1524
1647
  axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Weighted)')
1525
1648
  axs[0, 1].grid(True, axis='y', linestyle='--', alpha=0.7)
1526
1649
 
1527
-
1528
- if x_test.shape[1] > 2:
1650
+ feature_indices=[0, 1]
1529
1651
 
1530
- X_pca = pca(x_test, n_components=2)
1531
- else:
1532
- X_pca = x_test
1533
-
1534
- y_test = decode_one_hot(y_test)
1535
- num_classes = len(np.unique(y_test))
1652
+ h = .02
1653
+ x_min, x_max = x_test[:, feature_indices[0]].min() - 1, x_test[:, feature_indices[0]].max() + 1
1654
+ y_min, y_max = x_test[:, feature_indices[1]].min() - 1, x_test[:, feature_indices[1]].max() + 1
1655
+ xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
1656
+ np.arange(y_min, y_max, h))
1657
+
1658
+ grid = np.c_[xx.ravel(), yy.ravel()]
1659
+ grid_full = np.zeros((grid.shape[0], x_test.shape[1]))
1660
+ grid_full[:, feature_indices] = grid
1536
1661
 
1662
+ Z = [None] * len(grid_full)
1537
1663
 
1538
- cmap = plt.get_cmap('tab20')
1664
+ predict_progress = tqdm(total=len(grid_full),leave=False, desc="Predicts For Desicion Boundary",ncols= 120)
1539
1665
 
1666
+ for i in range(len(grid_full)):
1540
1667
 
1668
+ Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activation_potentiation=activation_potentiation))
1669
+ predict_progress.update(1)
1541
1670
 
1542
- norm = plt.Normalize(vmin=0, vmax=num_classes - 1)
1543
-
1671
+ Z = np.array(Z)
1672
+ Z = Z.reshape(xx.shape)
1544
1673
 
1545
- scatter = axs[1, 1].scatter(X_pca[:, 0], X_pca[:, 1], c=y_test, edgecolor='k', s=50, cmap=cmap, norm=norm)
1546
-
1674
+ axs[1,1].contourf(xx, yy, Z, alpha=0.8)
1675
+ axs[1,1].scatter(x_test[:, feature_indices[0]], x_test[:, feature_indices[1]], c=decode_one_hot(y_test), edgecolors='k', marker='o', s=20, alpha=0.9)
1676
+ axs[1,1].set_xlabel(f'Feature {0 + 1}')
1677
+ axs[1,1].set_ylabel(f'Feature {1 + 1}')
1678
+ axs[1,1].set_title('Decision Boundary')
1547
1679
 
1548
- for cls in range(num_classes):
1549
- class_points = []
1680
+ plt.show()
1550
1681
 
1551
-
1552
- for i in range(len(y_test)):
1553
- if y_preds[i] == cls:
1554
- class_points.append(X_pca[i])
1555
-
1556
- class_points = np.array(class_points)
1682
+ def plot_decision_boundary(x, y, activation_potentiation, W):
1557
1683
 
1558
- if len(class_points) > 2:
1559
- hull = ConvexHull(class_points)
1560
- hull_points = class_points[hull.vertices]
1561
-
1684
+ feature_indices=[0, 1]
1562
1685
 
1563
- hull_points = np.vstack([hull_points, hull_points[0]])
1564
-
1686
+ h = .02 # mesh grid adımı
1687
+ x_min, x_max = x[:, feature_indices[0]].min() - 1, x[:, feature_indices[0]].max() + 1
1688
+ y_min, y_max = x[:, feature_indices[1]].min() - 1, x[:, feature_indices[1]].max() + 1
1689
+ xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
1690
+ np.arange(y_min, y_max, h))
1691
+
1692
+ grid = np.c_[xx.ravel(), yy.ravel()]
1693
+ grid_full = np.zeros((grid.shape[0], x.shape[1]))
1694
+ grid_full[:, feature_indices] = grid
1695
+
1696
+ Z = [None] * len(grid_full)
1565
1697
 
1566
- axs[1, 1].fill(hull_points[:, 0], hull_points[:, 1], color=cmap(norm(cls)), alpha=0.3, edgecolor='k', label=f'Class {cls} Hull')
1698
+ for i in range(len(grid_full)):
1567
1699
 
1700
+ Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activation_potentiation=activation_potentiation))
1568
1701
 
1569
- axs[1, 1].set_title("Decision Boundry")
1702
+ Z = np.array(Z)
1703
+ Z = Z.reshape(xx.shape)
1570
1704
 
1571
- plt.show()
1705
+ plt.contourf(xx, yy, Z, alpha=0.8)
1706
+ plt.scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
1707
+ plt.xlabel(f'Feature {0 + 1}')
1708
+ plt.ylabel(f'Feature {1 + 1}')
1709
+ plt.title('Decision Boundary')
1710
+ plt.draw()
1572
1711
 
1573
1712
  def pca(X, n_components):
1574
1713
  """
@@ -1598,7 +1737,7 @@ def pca(X, n_components):
1598
1737
 
1599
1738
  return X_reduced
1600
1739
 
1601
- def plot_decision_boundry(x, y, y_preds=None, s=100, color='tab20'):
1740
+ def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
1602
1741
 
1603
1742
  if x.shape[1] > 2:
1604
1743
 
@@ -1,13 +1,8 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 2.8.4
4
- Summary: plot_decision_boundry function added
5
- Home-page: UNKNOWN
3
+ Version: 3.1
4
+ Summary: Deep PLAN Integreted. Version 3 document coming
6
5
  Author: Hasan Can Beydili
7
6
  Author-email: tchasancan@gmail.com
8
- License: UNKNOWN
9
7
  Keywords: model evaluation,classifcation,potentiation learning artficial neural networks
10
- Platform: UNKNOWN
11
-
12
- UNKNOWN
13
8
 
@@ -0,0 +1,6 @@
1
+ plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
+ plan/plan.py,sha256=Q7W2o5T4fLpme5ZG0kRPL0EA5SXA1Ccui3UfHWxkBHI,60472
3
+ pyerualjetwork-3.1.dist-info/METADATA,sha256=bqwcxFGQBi8b-0l8bAzDfIdrtUbZV-pIP-1yiwhETOo,272
4
+ pyerualjetwork-3.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
+ pyerualjetwork-3.1.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
+ pyerualjetwork-3.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.42.0)
2
+ Generator: bdist_wheel (0.38.4)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,6 +0,0 @@
1
- plan/__init__.py,sha256=SLgk4uuvhVr3CjscpcqN2TQMGyAd4Pe8YBiXfl3brd8,525
2
- plan/plan.py,sha256=YzmhHtaEyH51NcnfZLsAtUDBClag_TFCjy67554ouQM,56261
3
- pyerualjetwork-2.8.4.dist-info/METADATA,sha256=K9L9_hqmgaEIWjPRmTmihY6Kllp1L5mOwiGtT9fU9o0,331
4
- pyerualjetwork-2.8.4.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
5
- pyerualjetwork-2.8.4.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
- pyerualjetwork-2.8.4.dist-info/RECORD,,