pyerualjetwork 3.1__py3-none-any.whl → 3.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/plan.py CHANGED
@@ -27,7 +27,7 @@ def fit(
27
27
  y_train: List[Union[int, float]], # At least two.. and one hot encoded
28
28
  val= None,
29
29
  val_count = None,
30
- activation_potentiation=[None], # (float): Input activation_potentiation (optional)
30
+ activation_potentiation=[None], # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
31
31
  x_val= None,
32
32
  y_val= None,
33
33
  show_training = None,
@@ -42,7 +42,7 @@ def fit(
42
42
  x_train (list[num]): List of input data.
43
43
  y_train (list[num]): List of target labels. (one hot encoded)
44
44
  val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None (optional)
45
- val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
45
+ val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 10 (%10) (optional)
46
46
  activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
47
47
  x_val (list[num]): List of validation data. (optional) Default: %10 of x_train (auto_balanced) it means every %1 of train progress starts validation
48
48
  y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: %10 of y_train (auto_balanced) it means every %1 of train progress starts validation
@@ -80,16 +80,14 @@ def fit(
80
80
 
81
81
  if val_count == None:
82
82
 
83
- val_count = 0.01
83
+ val_count = 10
84
84
 
85
85
  v_iter = 0
86
86
 
87
87
  if val == 'final':
88
88
 
89
- val_count = 0.99
90
-
91
- val_count = int(len(x_train) * val_count)
92
- val_count_copy = val_count
89
+ val_count = 100
90
+
93
91
  val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
94
92
  val_list = [] * val_count
95
93
 
@@ -181,7 +179,7 @@ def fit(
181
179
 
182
180
  try:
183
181
 
184
- if round(progress) % 10 == 1:
182
+ if round(progress) % val_count == 1:
185
183
 
186
184
 
187
185
  validation_model = evaluate(x_val, y_val, LTPW ,bar_status=False, activation_potentiation=activation_potentiation, show_metrices=None)
@@ -288,6 +286,8 @@ def fit(
288
286
 
289
287
  plot_decision_boundary(x_val, y_val, activation_potentiation, LTPW)
290
288
 
289
+ plt.show()
290
+
291
291
  val_list.append(val_acc)
292
292
 
293
293
  val_bar.update(val_acc)
@@ -384,37 +384,6 @@ def weight_identification(
384
384
 
385
385
  # ACTIVATION FUNCTIONS -----
386
386
 
387
- def tanh(x):
388
- return np.tanh(x)
389
-
390
- def swish(x):
391
- return x * (1 / (1 + np.exp(-x)))
392
-
393
- def circular_activation(x):
394
- return (np.sin(x) + 1) / 2
395
-
396
- def modular_circular_activation(x, period=2*np.pi):
397
- return np.mod(x, period) / period
398
-
399
- def tanh_circular_activation(x):
400
- return (np.tanh(x) + 1) / 2
401
-
402
- def leaky_relu(x, alpha=0.01):
403
- return np.where(x > 0, x, alpha * x)
404
-
405
- def softplus(x):
406
- return np.log(1 + np.exp(x))
407
-
408
- def elu(x, alpha=1.0):
409
- return np.where(x > 0, x, alpha * (np.exp(x) - 1))
410
-
411
- def gelu(x):
412
- return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
413
-
414
- def selu(x, lambda_=1.0507, alpha=1.6733):
415
- return lambda_ * np.where(x > 0, x, alpha * (np.exp(x) - 1))
416
-
417
-
418
387
  def Softmax(
419
388
  x # num: Input data to be transformed using softmax function.
420
389
  ):
@@ -461,15 +430,144 @@ def Relu(
461
430
 
462
431
  return np.maximum(0, x)
463
432
 
433
+ def tanh(x):
434
+ return np.tanh(x)
435
+
436
+ def swish(x):
437
+ return x * (1 / (1 + np.exp(-x)))
438
+
439
+ def circular_activation(x):
440
+ return (np.sin(x) + 1) / 2
441
+
442
+ def modular_circular_activation(x, period=2*np.pi):
443
+ return np.mod(x, period) / period
444
+
445
+ def tanh_circular_activation(x):
446
+ return (np.tanh(x) + 1) / 2
447
+
448
+ def leaky_relu(x, alpha=0.01):
449
+ return np.where(x > 0, x, alpha * x)
450
+
451
+ def softplus(x):
452
+ return np.log(1 + np.exp(x))
453
+
454
+ def elu(x, alpha=1.0):
455
+ return np.where(x > 0, x, alpha * (np.exp(x) - 1))
456
+
457
+ def gelu(x):
458
+ return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
459
+
460
+ def selu(x, lambda_=1.0507, alpha=1.6733):
461
+ return lambda_ * np.where(x > 0, x, alpha * (np.exp(x) - 1))
462
+
463
+ # 1. Sinusoids Activation (SinAkt)
464
+ def sinakt(x):
465
+ return np.sin(x) + np.cos(x)
466
+
467
+ # 2. Parametric Squared Activation (P-Squared)
468
+ def p_squared(x, alpha=1.0, beta=0.0):
469
+ return alpha * x**2 + beta * x
470
+
471
+ def sglu(x, alpha=1.0):
472
+ return softmax(alpha * x) * x
473
+
474
+ # 4. Double Leaky ReLU (DLReLU)
475
+ def dlrelu(x):
476
+ return np.maximum(0.01 * x, x) + np.minimum(0.01 * x, 0.1 * x)
477
+
478
+ # 5. Exponential Sigmoid (ExSig)
479
+ def exsig(x):
480
+ return 1 / (1 + np.exp(-x**2))
481
+
482
+ # 6. Adaptive Cosine Activation (ACos)
483
+ def acos(x, alpha=1.0, beta=0.0):
484
+ return np.cos(alpha * x + beta)
485
+
486
+ # 7. Gaussian-like Activation (GLA)
487
+ def gla(x, alpha=1.0, mu=0.0):
488
+ return np.exp(-alpha * (x - mu)**2)
489
+
490
+ # 8. Swish ReLU (SReLU)
491
+ def srelu(x):
492
+ return x * (1 / (1 + np.exp(-x))) + np.maximum(0, x)
493
+
494
+ # 9. Quadratic Exponential Linear Unit (QELU)
495
+ def qelu(x):
496
+ return x**2 * np.exp(x) - 1
497
+
498
+ # 10. Inverse Square Root Activation (ISRA)
499
+ def isra(x):
500
+ return x / np.sqrt(np.abs(x) + 1)
501
+
502
+ def waveakt(x, alpha=1.0, beta=2.0, gamma=3.0):
503
+ return np.sin(alpha * x) * np.cos(beta * x) * np.sin(gamma * x)
504
+
505
+ def arctan(x):
506
+ return np.arctan(x)
507
+
508
+ def bent_identity(x):
509
+ return (np.sqrt(x**2 + 1) - 1) / 2 + x
510
+
511
+ def sech(x):
512
+ return 2 / (np.exp(x) + np.exp(-x))
513
+
514
+ def softsign(x):
515
+ return x / (1 + np.abs(x))
516
+
517
+ def pwl(x, alpha=0.5, beta=1.5):
518
+ return np.where(x <= 0, alpha * x, beta * x)
519
+
520
+ def cubic(x):
521
+ return x**3
522
+
523
+ def gaussian(x, alpha=1.0, mu=0.0):
524
+ return np.exp(-alpha * (x - mu)**2)
525
+
526
+ def sine(x, alpha=1.0):
527
+ return np.sin(alpha * x)
528
+
529
+ def tanh_square(x):
530
+ return np.tanh(x)**2
531
+
532
+ def mod_sigmoid(x, alpha=1.0, beta=0.0):
533
+ return 1 / (1 + np.exp(-alpha * x + beta))
534
+
535
+ def quartic(x):
536
+ return x**4
537
+
538
+ def square_quartic(x):
539
+ return (x**2)**2
540
+
541
+ def cubic_quadratic(x):
542
+ return x**3 * (x**2)
543
+
544
+ def exp_cubic(x):
545
+ return np.exp(x**3)
546
+
547
+ def sine_square(x):
548
+ return np.sin(x)**2
549
+
550
+ def logarithmic(x):
551
+ return np.log(x**2 + 1)
552
+
553
+ def power(x, p):
554
+ return x**p
555
+
556
+ def scaled_cubic(x, alpha=1.0):
557
+ return alpha * x**3
558
+
559
+ def sine_offset(x, beta=0.0):
560
+ return np.sin(x + beta)
561
+
464
562
 
465
563
  def fex(
466
564
  Input, # list[num]: Input data.
467
565
  w, # num: Weight matrix of the neural network.
468
566
  is_training, # bool: Flag indicating if the function is called during training (True or False).
469
567
  Class, # int: Which class is, if training.
470
- activation_potentiation,
568
+ activation_potentiation, # (list): Activation potentiation list for deep PLAN. (optional)
471
569
  index,
472
- max_w # float or None: Input activation potentiation (optional)
570
+ max_w
473
571
  ) -> tuple:
474
572
  """
475
573
  Applies feature extraction process to the input data using synaptic potentiation.
@@ -479,10 +577,12 @@ def fex(
479
577
  w (num): Weight matrix of the neural network.
480
578
  is_training (bool): Flag indicating if the function is called during training (True or False).
481
579
  Class (int): if is during training then which class(label) ? is isnt then put None.
482
- activation_potentiation (float or None): Threshold value for comparison. (optional)
580
+ # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
483
581
 
484
582
  Returns:
485
583
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
584
+ or
585
+ num: neural network output
486
586
  """
487
587
 
488
588
  Output = np.zeros(len(Input))
@@ -528,11 +628,100 @@ def fex(
528
628
  if activation == 'tanh':
529
629
  Output += tanh(Input)
530
630
 
531
- if activation == None:
631
+ if activation == 'sinakt':
632
+ Output += sinakt(Input)
633
+
634
+ if activation == 'p_squared':
635
+ Output += p_squared(Input)
636
+
637
+ if activation == 'sglu':
638
+ Output += sglu(Input, alpha=1.0)
639
+
640
+ if activation == 'dlrelu':
641
+ Output += dlrelu(Input)
642
+
643
+ if activation == 'exsig':
644
+ Output += exsig(Input)
645
+
646
+ if activation == 'acos':
647
+ Output += acos(Input, alpha=1.0, beta=0.0)
648
+
649
+ if activation == 'gla':
650
+ Output += gla(Input, alpha=1.0, mu=0.0)
651
+
652
+ if activation == 'srelu':
653
+ Output += srelu(Input)
654
+
655
+ if activation == 'qelu':
656
+ Output += qelu(Input)
657
+
658
+ if activation == 'isra':
659
+ Output += isra(Input)
660
+
661
+ if activation == 'waveakt':
662
+ Output += waveakt(Input)
663
+
664
+ if activation == 'arctan':
665
+ Output += arctan(Input)
666
+
667
+ if activation == 'bent_identity':
668
+ Output += bent_identity(Input)
669
+
670
+ if activation == 'sech':
671
+ Output += sech(Input)
672
+
673
+ if activation == 'softsign':
674
+ Output += softsign(Input)
675
+
676
+ if activation == 'pwl':
677
+ Output += pwl(Input)
678
+
679
+ if activation == 'cubic':
680
+ Output += cubic(Input)
681
+
682
+ if activation == 'gaussian':
683
+ Output += gaussian(Input)
684
+
685
+ if activation == 'sine':
686
+ Output += sine(Input)
687
+
688
+ if activation == 'tanh_square':
689
+ Output += tanh_square(Input)
690
+
691
+ if activation == 'mod_sigmoid':
692
+ Output += mod_sigmoid(Input)
693
+
694
+ if activation == None or activation == 'linear':
532
695
  Output += Input
533
696
 
697
+ if activation == 'quartic':
698
+ Output += quartic(Input)
699
+
700
+ if activation == 'square_quartic':
701
+ Output += square_quartic(Input)
702
+
703
+ if activation == 'cubic_quadratic':
704
+ Output += cubic_quadratic(Input)
705
+
706
+ if activation == 'exp_cubic':
707
+ Output += exp_cubic(Input)
708
+
709
+ if activation == 'sine_square':
710
+ Output += sine_square(Input)
711
+
712
+ if activation == 'logarithmic':
713
+ Output += logarithmic(Input)
714
+
715
+ if activation == 'scaled_cubic':
716
+ Output += scaled_cubic(Input, 1.0)
717
+
718
+ if activation == 'sine_offset':
719
+ Output += sine_offset(Input, 1.0)
720
+
721
+
534
722
  Input = Output
535
723
 
724
+
536
725
  if is_training == True:
537
726
 
538
727
  w[Class, :] = Input
@@ -593,7 +782,7 @@ def evaluate(
593
782
  x_test, # list[num]: Test input data.
594
783
  y_test, # list[num]: Test labels.
595
784
  W, # list[num]: Weight matrix list of the neural network.
596
- activation_potentiation=[None], # activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
785
+ activation_potentiation=[None], # (list): Activation potentiation list for deep PLAN. (optional)
597
786
  bar_status=True, # bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
598
787
  show_metrices=None # show_metrices (bool): (True or None) (optional) Default: None
599
788
  ) -> tuple:
@@ -604,7 +793,7 @@ def evaluate(
604
793
  x_test (list[num]): Test input data.
605
794
  y_test (list[num]): Test labels.
606
795
  W (list[num]): Weight matrix list of the neural network.
607
- activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
796
+ activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
608
797
  bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
609
798
  show_metrices (bool): (True or None) (optional) Default: None
610
799
 
@@ -845,7 +1034,7 @@ def save_model(model_name,
845
1034
  model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
846
1035
  scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
847
1036
  W: Weights of the model.
848
- activation_potentiation (float or None): Threshold value for comparison. (optional)
1037
+ activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
849
1038
 
850
1039
  Returns:
851
1040
  str: Message indicating if the model was saved successfully or encountered an error.
@@ -898,7 +1087,7 @@ def save_model(model_name,
898
1087
  'CLASS COUNT': class_count,
899
1088
  'NEURON COUNT': NeuronCount,
900
1089
  'SYNAPSE COUNT': SynapseCount,
901
- 'TEST ACCURACY': test_acc,
1090
+ 'TEST ACCURACY': float(test_acc),
902
1091
  'SAVE DATE': datetime.now(),
903
1092
  'WEIGHTS TYPE': weights_type,
904
1093
  'WEIGHTS FORMAT': weights_format,
@@ -1093,8 +1282,8 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=[Non
1093
1282
  Arguments:
1094
1283
  Input (list or ndarray): Input data for the model (single vector or single matrix).
1095
1284
  W (list of ndarrays): Weights of the model.
1096
- scaler_params (int, float): standard scaler params list: mean,std. (optional) Default: None.
1097
- activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
1285
+ scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
1286
+ activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
1098
1287
 
1099
1288
  Returns:
1100
1289
  ndarray: Output from the model.
@@ -1780,7 +1969,7 @@ def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
1780
1969
 
1781
1970
  plt.fill(hull_points[:, 0], hull_points[:, 1], color=cmap(norm(cls)), alpha=0.3, edgecolor='k', label=f'Class {cls} Hull')
1782
1971
 
1783
- plt.title("Decision Boundry")
1972
+ plt.title("Decision Space (Data Distribution)")
1784
1973
 
1785
1974
  plt.draw()
1786
1975
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 3.1
3
+ Version: 3.2.0
4
4
  Summary: Deep PLAN Integreted. Version 3 document coming
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -0,0 +1,6 @@
1
+ plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
+ plan/plan.py,sha256=ASQIHk-atzX48iH7RfyqSRo4rO2YyDKqLudo3l_9d78,65066
3
+ pyerualjetwork-3.2.0.dist-info/METADATA,sha256=HhmOci9_X7tjMcPKFw4ZMKOj3-BM70RQNNg569CJBtU,274
4
+ pyerualjetwork-3.2.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
+ pyerualjetwork-3.2.0.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
+ pyerualjetwork-3.2.0.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
- plan/plan.py,sha256=Q7W2o5T4fLpme5ZG0kRPL0EA5SXA1Ccui3UfHWxkBHI,60472
3
- pyerualjetwork-3.1.dist-info/METADATA,sha256=bqwcxFGQBi8b-0l8bAzDfIdrtUbZV-pIP-1yiwhETOo,272
4
- pyerualjetwork-3.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
- pyerualjetwork-3.1.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
- pyerualjetwork-3.1.dist-info/RECORD,,