pyerualjetwork 2.5.9__py3-none-any.whl → 2.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan/plan.py +32 -50
- {pyerualjetwork-2.5.9.dist-info → pyerualjetwork-2.6.0.dist-info}/METADATA +1 -1
- pyerualjetwork-2.6.0.dist-info/RECORD +6 -0
- pyerualjetwork-2.5.9.dist-info/RECORD +0 -6
- {pyerualjetwork-2.5.9.dist-info → pyerualjetwork-2.6.0.dist-info}/WHEEL +0 -0
- {pyerualjetwork-2.5.9.dist-info → pyerualjetwork-2.6.0.dist-info}/top_level.txt +0 -0
plan/plan.py
CHANGED
@@ -6,25 +6,6 @@ Created on Tue Jun 18 23:32:16 2024
|
|
6
6
|
@author: hasan
|
7
7
|
"""
|
8
8
|
|
9
|
-
# optic
|
10
|
-
|
11
|
-
# -*- coding: utf-8 -*-
|
12
|
-
"""
|
13
|
-
Created on Fri Jun 21 05:21:35 2024
|
14
|
-
|
15
|
-
@author: hasan
|
16
|
-
"""
|
17
|
-
|
18
|
-
# -*- coding: utf-8 -*-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
"""
|
23
|
-
Created on Thu Jun 12 00:00:00 2024
|
24
|
-
|
25
|
-
@author: hasan can beydili
|
26
|
-
"""
|
27
|
-
|
28
9
|
import pandas as pd
|
29
10
|
import numpy as np
|
30
11
|
import time
|
@@ -47,7 +28,7 @@ def fit(
|
|
47
28
|
val_count= None,
|
48
29
|
x_val= None,
|
49
30
|
y_val= None,
|
50
|
-
|
31
|
+
activation_potentiation=None # (float): Input activation_potentiation (optional)
|
51
32
|
) -> str:
|
52
33
|
|
53
34
|
infoPLAN = """
|
@@ -58,10 +39,11 @@ def fit(
|
|
58
39
|
y_train (list[num]): List of target labels. (one hot encoded)
|
59
40
|
show_training (bool, str): True, None or'final'
|
60
41
|
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
42
|
+
val (None or True): validation in training process ? None or True Default: None
|
61
43
|
val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
|
62
44
|
x_val (list[num]): List of validation data. (optional) Default: x_train
|
63
45
|
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
|
64
|
-
|
46
|
+
activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
|
65
47
|
Returns:
|
66
48
|
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
67
49
|
error handled ?: Process status ('e')
|
@@ -143,7 +125,7 @@ def fit(
|
|
143
125
|
neural_layer = normalization(neural_layer)
|
144
126
|
|
145
127
|
if Layer == 'fex':
|
146
|
-
W[Lindex] = fex(neural_layer, W[Lindex], True, y[index],
|
128
|
+
W[Lindex] = fex(neural_layer, W[Lindex], True, y[index], activation_potentiation)
|
147
129
|
|
148
130
|
for i, w in enumerate(W):
|
149
131
|
trained_W[i] = trained_W[i] + w
|
@@ -381,7 +363,7 @@ def fex(
|
|
381
363
|
w, # num: Weight matrix of the neural network.
|
382
364
|
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
383
365
|
Class, # int: Which class is, if training.
|
384
|
-
|
366
|
+
activation_potentiation # float or None: Input activation potentiation (optional)
|
385
367
|
) -> tuple:
|
386
368
|
"""
|
387
369
|
Applies feature extraction process to the input data using synaptic pruning.
|
@@ -391,38 +373,38 @@ def fex(
|
|
391
373
|
w (num): Weight matrix of the neural network.
|
392
374
|
is_training (bool): Flag indicating if the function is called during training (True or False).
|
393
375
|
Class (int): if is during training then which class(label) ? is isnt then put None.
|
394
|
-
|
376
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
395
377
|
|
396
378
|
Returns:
|
397
379
|
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
398
380
|
"""
|
399
381
|
|
400
|
-
if is_training == True and
|
382
|
+
if is_training == True and activation_potentiation == None:
|
401
383
|
|
402
384
|
w[Class, :] = Input
|
403
385
|
|
404
386
|
return w
|
405
387
|
|
406
|
-
elif is_training == True and
|
388
|
+
elif is_training == True and activation_potentiation != None:
|
407
389
|
|
408
390
|
|
409
|
-
Input[Input <
|
410
|
-
Input[Input >
|
391
|
+
Input[Input < activation_potentiation] = 0
|
392
|
+
Input[Input > activation_potentiation] = 1
|
411
393
|
|
412
394
|
w[Class,:] = Input
|
413
395
|
|
414
396
|
return w
|
415
397
|
|
416
|
-
elif is_training == False and
|
398
|
+
elif is_training == False and activation_potentiation == None:
|
417
399
|
|
418
400
|
neural_layer = np.dot(w, Input)
|
419
401
|
|
420
402
|
return neural_layer
|
421
403
|
|
422
|
-
elif is_training == False and
|
404
|
+
elif is_training == False and activation_potentiation != None:
|
423
405
|
|
424
|
-
Input[Input <
|
425
|
-
Input[Input >
|
406
|
+
Input[Input < activation_potentiation] = 0
|
407
|
+
Input[Input > activation_potentiation] = 1
|
426
408
|
|
427
409
|
neural_layer = np.dot(w, Input)
|
428
410
|
|
@@ -504,7 +486,7 @@ def evaluate(
|
|
504
486
|
y_test, # list[num]: Test labels.
|
505
487
|
show_metrices, # show_metrices (bool): (True or False)
|
506
488
|
W, # list[num]: Weight matrix list of the neural network.
|
507
|
-
|
489
|
+
activation_potentiation=None # activation_potentiation (float or None): Threshold value for comparison. (optional)
|
508
490
|
) -> tuple:
|
509
491
|
infoTestModel = """
|
510
492
|
Tests the neural network model with the given test data.
|
@@ -514,7 +496,7 @@ def evaluate(
|
|
514
496
|
y_test (list[num]): Test labels.
|
515
497
|
show_metrices (bool): (True or False)
|
516
498
|
W (list[num]): Weight matrix list of the neural network.
|
517
|
-
|
499
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
518
500
|
|
519
501
|
Returns:
|
520
502
|
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
@@ -545,7 +527,7 @@ def evaluate(
|
|
545
527
|
neural_layer = normalization(neural_layer)
|
546
528
|
|
547
529
|
if Layer == 'fex':
|
548
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
530
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
549
531
|
elif Layer == 'cat':
|
550
532
|
neural_layer = np.dot(W[index], neural_layer)
|
551
533
|
|
@@ -621,7 +603,7 @@ def multiple_evaluate(
|
|
621
603
|
y_test, # list[num]: Test labels.
|
622
604
|
show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
|
623
605
|
MW, # list[list[num]]: Weight matrix of the neural network.
|
624
|
-
|
606
|
+
activation_potentiation=None # (float or None): Threshold value for comparison. (optional)
|
625
607
|
) -> tuple:
|
626
608
|
infoTestModel = """
|
627
609
|
Tests the neural network model with the given test data.
|
@@ -668,7 +650,7 @@ def multiple_evaluate(
|
|
668
650
|
neural_layer = normalization(neural_layer)
|
669
651
|
|
670
652
|
if Layer == 'fex':
|
671
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
653
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
672
654
|
elif Layer == 'cat':
|
673
655
|
neural_layer = np.dot(W[index], neural_layer)
|
674
656
|
|
@@ -738,7 +720,7 @@ def multiple_evaluate(
|
|
738
720
|
|
739
721
|
except:
|
740
722
|
|
741
|
-
print(Fore.RED + "ERROR: Testing model parameters like '
|
723
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
|
742
724
|
return 'e'
|
743
725
|
|
744
726
|
return W, y_preds, acc
|
@@ -753,7 +735,7 @@ def save_model(model_name,
|
|
753
735
|
model_path,
|
754
736
|
scaler_params,
|
755
737
|
W,
|
756
|
-
|
738
|
+
activation_potentiation=None
|
757
739
|
):
|
758
740
|
|
759
741
|
infosave_model = """
|
@@ -769,7 +751,7 @@ def save_model(model_name,
|
|
769
751
|
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
770
752
|
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
771
753
|
W: Weights of the model.
|
772
|
-
|
754
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
773
755
|
|
774
756
|
Returns:
|
775
757
|
str: Message indicating if the model was saved successfully or encountered an error.
|
@@ -819,7 +801,7 @@ def save_model(model_name,
|
|
819
801
|
'WEIGHTS FORMAT': weights_format,
|
820
802
|
'MODEL PATH': model_path,
|
821
803
|
'STANDARD SCALER': scaler_params,
|
822
|
-
'ACTIVATION
|
804
|
+
'ACTIVATION POTENTIATION': activation_potentiation
|
823
805
|
}
|
824
806
|
try:
|
825
807
|
|
@@ -915,7 +897,7 @@ def load_model(model_name,
|
|
915
897
|
model_path (str): Path where the model is saved.
|
916
898
|
|
917
899
|
Returns:
|
918
|
-
lists: W(list[num]),
|
900
|
+
lists: W(list[num]), activation_potentiation, DataFrame of the model
|
919
901
|
"""
|
920
902
|
pass
|
921
903
|
|
@@ -965,15 +947,15 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
965
947
|
"""
|
966
948
|
W, df = load_model(model_name, model_path)
|
967
949
|
|
968
|
-
|
950
|
+
activation_potentiation = str(df['ACTIVATION POTENTIATION'].iloc[0])
|
969
951
|
|
970
|
-
if
|
952
|
+
if activation_potentiation != 'nan':
|
971
953
|
|
972
|
-
|
954
|
+
activation_potentiation = float(activation_potentiation)
|
973
955
|
|
974
956
|
else:
|
975
957
|
|
976
|
-
|
958
|
+
activation_potentiation = None
|
977
959
|
|
978
960
|
try:
|
979
961
|
|
@@ -1003,7 +985,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
1003
985
|
neural_layer = normalization(neural_layer)
|
1004
986
|
|
1005
987
|
if Layer == 'fex':
|
1006
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
988
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
1007
989
|
elif Layer == 'cat':
|
1008
990
|
neural_layer = np.dot(W[index], neural_layer)
|
1009
991
|
except:
|
@@ -1015,7 +997,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
1015
997
|
return neural_layer
|
1016
998
|
|
1017
999
|
|
1018
|
-
def predict_model_ram(Input, scaler_params, W,
|
1000
|
+
def predict_model_ram(Input, scaler_params, W, activation_potentiation=None):
|
1019
1001
|
|
1020
1002
|
infopredict_model_ram = """
|
1021
1003
|
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
@@ -1025,7 +1007,7 @@ def predict_model_ram(Input, scaler_params, W, activation_potential=None):
|
|
1025
1007
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
1026
1008
|
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
1027
1009
|
W (list of ndarrays): Weights of the model.
|
1028
|
-
|
1010
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
1029
1011
|
|
1030
1012
|
Returns:
|
1031
1013
|
ndarray: Output from the model.
|
@@ -1049,7 +1031,7 @@ def predict_model_ram(Input, scaler_params, W, activation_potential=None):
|
|
1049
1031
|
neural_layer = normalization(neural_layer)
|
1050
1032
|
|
1051
1033
|
if Layer == 'fex':
|
1052
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
1034
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
1053
1035
|
elif Layer == 'cat':
|
1054
1036
|
neural_layer = np.dot(W[index], neural_layer)
|
1055
1037
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.6.0
|
4
4
|
Summary: New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -0,0 +1,6 @@
|
|
1
|
+
plan/__init__.py,sha256=gmaz8lnQfl18MbOQwabBUPmShajK5S99jfyY-hQe8tc,502
|
2
|
+
plan/plan.py,sha256=fCd44dXAX2TbaBthlDGr32KJyqfN5E5iVPpvMuzjlZg,56157
|
3
|
+
pyerualjetwork-2.6.0.dist-info/METADATA,sha256=2ATlxVR785xcOYeU7HZMtdsk1w7CleaF8HhD9tobjvQ,357
|
4
|
+
pyerualjetwork-2.6.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
5
|
+
pyerualjetwork-2.6.0.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
|
6
|
+
pyerualjetwork-2.6.0.dist-info/RECORD,,
|
@@ -1,6 +0,0 @@
|
|
1
|
-
plan/__init__.py,sha256=gmaz8lnQfl18MbOQwabBUPmShajK5S99jfyY-hQe8tc,502
|
2
|
-
plan/plan.py,sha256=IM0dEsGiooGbRUhrCR3JBlZQ7oF10tnMfUEQqUxtAyU,56167
|
3
|
-
pyerualjetwork-2.5.9.dist-info/METADATA,sha256=u8ePHpaRB3I5QO7bjK7iKR4ffVp6qtg1LN8WjLT_FcM,357
|
4
|
-
pyerualjetwork-2.5.9.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
5
|
-
pyerualjetwork-2.5.9.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
|
6
|
-
pyerualjetwork-2.5.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|