pyerualjetwork 2.5.9__tar.gz → 2.6.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-2.5.9 → pyerualjetwork-2.6.3}/PKG-INFO +1 -1
- {pyerualjetwork-2.5.9 → pyerualjetwork-2.6.3}/plan/plan.py +54 -67
- {pyerualjetwork-2.5.9 → pyerualjetwork-2.6.3}/pyerualjetwork.egg-info/PKG-INFO +1 -1
- {pyerualjetwork-2.5.9 → pyerualjetwork-2.6.3}/setup.py +1 -1
- {pyerualjetwork-2.5.9 → pyerualjetwork-2.6.3}/plan/__init__.py +0 -0
- {pyerualjetwork-2.5.9 → pyerualjetwork-2.6.3}/pyerualjetwork.egg-info/SOURCES.txt +0 -0
- {pyerualjetwork-2.5.9 → pyerualjetwork-2.6.3}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-2.5.9 → pyerualjetwork-2.6.3}/pyerualjetwork.egg-info/top_level.txt +0 -0
- {pyerualjetwork-2.5.9 → pyerualjetwork-2.6.3}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.6.3
|
4
4
|
Summary: New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -6,25 +6,6 @@ Created on Tue Jun 18 23:32:16 2024
|
|
6
6
|
@author: hasan
|
7
7
|
"""
|
8
8
|
|
9
|
-
# optic
|
10
|
-
|
11
|
-
# -*- coding: utf-8 -*-
|
12
|
-
"""
|
13
|
-
Created on Fri Jun 21 05:21:35 2024
|
14
|
-
|
15
|
-
@author: hasan
|
16
|
-
"""
|
17
|
-
|
18
|
-
# -*- coding: utf-8 -*-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
"""
|
23
|
-
Created on Thu Jun 12 00:00:00 2024
|
24
|
-
|
25
|
-
@author: hasan can beydili
|
26
|
-
"""
|
27
|
-
|
28
9
|
import pandas as pd
|
29
10
|
import numpy as np
|
30
11
|
import time
|
@@ -47,7 +28,7 @@ def fit(
|
|
47
28
|
val_count= None,
|
48
29
|
x_val= None,
|
49
30
|
y_val= None,
|
50
|
-
|
31
|
+
activation_potentiation=None # (float): Input activation_potentiation (optional)
|
51
32
|
) -> str:
|
52
33
|
|
53
34
|
infoPLAN = """
|
@@ -58,10 +39,11 @@ def fit(
|
|
58
39
|
y_train (list[num]): List of target labels. (one hot encoded)
|
59
40
|
show_training (bool, str): True, None or'final'
|
60
41
|
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
42
|
+
val (None or True): validation in training process ? None or True Default: None
|
61
43
|
val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
|
62
44
|
x_val (list[num]): List of validation data. (optional) Default: x_train
|
63
45
|
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
|
64
|
-
|
46
|
+
activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
|
65
47
|
Returns:
|
66
48
|
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
67
49
|
error handled ?: Process status ('e')
|
@@ -81,15 +63,13 @@ def fit(
|
|
81
63
|
if val == True and val_count == None:
|
82
64
|
|
83
65
|
val_count = 0.1
|
84
|
-
val_count_copy = val_count
|
85
66
|
|
86
|
-
if val == True:
|
67
|
+
if val == True and val_count != None:
|
87
68
|
|
88
69
|
val_count = int(len(x_train) * val_count)
|
89
|
-
|
90
70
|
val_count_copy = val_count
|
91
71
|
|
92
|
-
if
|
72
|
+
if show_count == None:
|
93
73
|
|
94
74
|
show_count = 10
|
95
75
|
|
@@ -143,7 +123,7 @@ def fit(
|
|
143
123
|
neural_layer = normalization(neural_layer)
|
144
124
|
|
145
125
|
if Layer == 'fex':
|
146
|
-
W[Lindex] = fex(neural_layer, W[Lindex], True, y[index],
|
126
|
+
W[Lindex] = fex(neural_layer, W[Lindex], True, y[index], activation_potentiation)
|
147
127
|
|
148
128
|
for i, w in enumerate(W):
|
149
129
|
trained_W[i] = trained_W[i] + w
|
@@ -153,6 +133,13 @@ def fit(
|
|
153
133
|
|
154
134
|
if index == val_count:
|
155
135
|
|
136
|
+
if show_training == True:
|
137
|
+
try:
|
138
|
+
plt.close(fig)
|
139
|
+
|
140
|
+
except:
|
141
|
+
pass
|
142
|
+
|
156
143
|
val_count += val_count_copy
|
157
144
|
|
158
145
|
layers.append('cat')
|
@@ -221,14 +208,14 @@ def fit(
|
|
221
208
|
suptitle_info = 'Weight Learning Progress: % '
|
222
209
|
title_info = 'Weight Matrix Of Fex Layer'
|
223
210
|
|
224
|
-
|
225
|
-
|
211
|
+
|
212
|
+
|
226
213
|
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
214
|
+
progress_status = f"{progress:.1f}"
|
215
|
+
fig.suptitle(suptitle_info + progress_status)
|
216
|
+
plt.draw()
|
217
|
+
plt.pause(0.1)
|
218
|
+
|
232
219
|
|
233
220
|
W = weight_identification(
|
234
221
|
len(layers) - 1, len(class_count), neurons, x_train_size)
|
@@ -381,48 +368,48 @@ def fex(
|
|
381
368
|
w, # num: Weight matrix of the neural network.
|
382
369
|
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
383
370
|
Class, # int: Which class is, if training.
|
384
|
-
|
371
|
+
activation_potentiation # float or None: Input activation potentiation (optional)
|
385
372
|
) -> tuple:
|
386
373
|
"""
|
387
|
-
Applies feature extraction process to the input data using synaptic
|
374
|
+
Applies feature extraction process to the input data using synaptic potentiation.
|
388
375
|
|
389
376
|
Args:
|
390
377
|
Input (num): Input data.
|
391
378
|
w (num): Weight matrix of the neural network.
|
392
379
|
is_training (bool): Flag indicating if the function is called during training (True or False).
|
393
380
|
Class (int): if is during training then which class(label) ? is isnt then put None.
|
394
|
-
|
381
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
395
382
|
|
396
383
|
Returns:
|
397
384
|
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
398
385
|
"""
|
399
386
|
|
400
|
-
if is_training == True and
|
387
|
+
if is_training == True and activation_potentiation == None:
|
401
388
|
|
402
389
|
w[Class, :] = Input
|
403
390
|
|
404
391
|
return w
|
405
392
|
|
406
|
-
elif is_training == True and
|
393
|
+
elif is_training == True and activation_potentiation != None:
|
407
394
|
|
408
395
|
|
409
|
-
Input[Input <
|
410
|
-
Input[Input >
|
396
|
+
Input[Input < activation_potentiation] = 0
|
397
|
+
Input[Input > activation_potentiation] = 1
|
411
398
|
|
412
399
|
w[Class,:] = Input
|
413
400
|
|
414
401
|
return w
|
415
402
|
|
416
|
-
elif is_training == False and
|
403
|
+
elif is_training == False and activation_potentiation == None:
|
417
404
|
|
418
405
|
neural_layer = np.dot(w, Input)
|
419
406
|
|
420
407
|
return neural_layer
|
421
408
|
|
422
|
-
elif is_training == False and
|
409
|
+
elif is_training == False and activation_potentiation != None:
|
423
410
|
|
424
|
-
Input[Input <
|
425
|
-
Input[Input >
|
411
|
+
Input[Input < activation_potentiation] = 0
|
412
|
+
Input[Input > activation_potentiation] = 1
|
426
413
|
|
427
414
|
neural_layer = np.dot(w, Input)
|
428
415
|
|
@@ -504,7 +491,7 @@ def evaluate(
|
|
504
491
|
y_test, # list[num]: Test labels.
|
505
492
|
show_metrices, # show_metrices (bool): (True or False)
|
506
493
|
W, # list[num]: Weight matrix list of the neural network.
|
507
|
-
|
494
|
+
activation_potentiation=None # activation_potentiation (float or None): Threshold value for comparison. (optional)
|
508
495
|
) -> tuple:
|
509
496
|
infoTestModel = """
|
510
497
|
Tests the neural network model with the given test data.
|
@@ -514,7 +501,7 @@ def evaluate(
|
|
514
501
|
y_test (list[num]): Test labels.
|
515
502
|
show_metrices (bool): (True or False)
|
516
503
|
W (list[num]): Weight matrix list of the neural network.
|
517
|
-
|
504
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
518
505
|
|
519
506
|
Returns:
|
520
507
|
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
@@ -545,7 +532,7 @@ def evaluate(
|
|
545
532
|
neural_layer = normalization(neural_layer)
|
546
533
|
|
547
534
|
if Layer == 'fex':
|
548
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
535
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
549
536
|
elif Layer == 'cat':
|
550
537
|
neural_layer = np.dot(W[index], neural_layer)
|
551
538
|
|
@@ -621,7 +608,7 @@ def multiple_evaluate(
|
|
621
608
|
y_test, # list[num]: Test labels.
|
622
609
|
show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
|
623
610
|
MW, # list[list[num]]: Weight matrix of the neural network.
|
624
|
-
|
611
|
+
activation_potentiation=None # (float or None): Threshold value for comparison. (optional)
|
625
612
|
) -> tuple:
|
626
613
|
infoTestModel = """
|
627
614
|
Tests the neural network model with the given test data.
|
@@ -668,7 +655,7 @@ def multiple_evaluate(
|
|
668
655
|
neural_layer = normalization(neural_layer)
|
669
656
|
|
670
657
|
if Layer == 'fex':
|
671
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
658
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
672
659
|
elif Layer == 'cat':
|
673
660
|
neural_layer = np.dot(W[index], neural_layer)
|
674
661
|
|
@@ -738,7 +725,7 @@ def multiple_evaluate(
|
|
738
725
|
|
739
726
|
except:
|
740
727
|
|
741
|
-
print(Fore.RED + "ERROR: Testing model parameters like '
|
728
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
|
742
729
|
return 'e'
|
743
730
|
|
744
731
|
return W, y_preds, acc
|
@@ -753,11 +740,11 @@ def save_model(model_name,
|
|
753
740
|
model_path,
|
754
741
|
scaler_params,
|
755
742
|
W,
|
756
|
-
|
743
|
+
activation_potentiation=None
|
757
744
|
):
|
758
745
|
|
759
746
|
infosave_model = """
|
760
|
-
Function to save a
|
747
|
+
Function to save a potentiation learning model.
|
761
748
|
|
762
749
|
Arguments:
|
763
750
|
model_name (str): Name of the model.
|
@@ -769,7 +756,7 @@ def save_model(model_name,
|
|
769
756
|
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
770
757
|
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
771
758
|
W: Weights of the model.
|
772
|
-
|
759
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
773
760
|
|
774
761
|
Returns:
|
775
762
|
str: Message indicating if the model was saved successfully or encountered an error.
|
@@ -819,7 +806,7 @@ def save_model(model_name,
|
|
819
806
|
'WEIGHTS FORMAT': weights_format,
|
820
807
|
'MODEL PATH': model_path,
|
821
808
|
'STANDARD SCALER': scaler_params,
|
822
|
-
'ACTIVATION
|
809
|
+
'ACTIVATION POTENTIATION': activation_potentiation
|
823
810
|
}
|
824
811
|
try:
|
825
812
|
|
@@ -908,14 +895,14 @@ def load_model(model_name,
|
|
908
895
|
model_path,
|
909
896
|
):
|
910
897
|
infoload_model = """
|
911
|
-
Function to load a
|
898
|
+
Function to load a potentiation learning model.
|
912
899
|
|
913
900
|
Arguments:
|
914
901
|
model_name (str): Name of the model.
|
915
902
|
model_path (str): Path where the model is saved.
|
916
903
|
|
917
904
|
Returns:
|
918
|
-
lists: W(list[num]),
|
905
|
+
lists: W(list[num]), activation_potentiation, DataFrame of the model
|
919
906
|
"""
|
920
907
|
pass
|
921
908
|
|
@@ -955,7 +942,7 @@ def load_model(model_name,
|
|
955
942
|
def predict_model_ssd(Input, model_name, model_path):
|
956
943
|
|
957
944
|
infopredict_model_ssd = """
|
958
|
-
Function to make a prediction using a divided
|
945
|
+
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
959
946
|
|
960
947
|
Arguments:
|
961
948
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
@@ -965,15 +952,15 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
965
952
|
"""
|
966
953
|
W, df = load_model(model_name, model_path)
|
967
954
|
|
968
|
-
|
955
|
+
activation_potentiation = str(df['ACTIVATION POTENTIATION'].iloc[0])
|
969
956
|
|
970
|
-
if
|
957
|
+
if activation_potentiation != 'nan':
|
971
958
|
|
972
|
-
|
959
|
+
activation_potentiation = float(activation_potentiation)
|
973
960
|
|
974
961
|
else:
|
975
962
|
|
976
|
-
|
963
|
+
activation_potentiation = None
|
977
964
|
|
978
965
|
try:
|
979
966
|
|
@@ -1003,7 +990,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
1003
990
|
neural_layer = normalization(neural_layer)
|
1004
991
|
|
1005
992
|
if Layer == 'fex':
|
1006
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
993
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
1007
994
|
elif Layer == 'cat':
|
1008
995
|
neural_layer = np.dot(W[index], neural_layer)
|
1009
996
|
except:
|
@@ -1015,17 +1002,17 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
1015
1002
|
return neural_layer
|
1016
1003
|
|
1017
1004
|
|
1018
|
-
def predict_model_ram(Input, scaler_params, W,
|
1005
|
+
def predict_model_ram(Input, scaler_params, W, activation_potentiation=None):
|
1019
1006
|
|
1020
1007
|
infopredict_model_ram = """
|
1021
|
-
Function to make a prediction using a divided
|
1008
|
+
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
1022
1009
|
from weights and parameters stored in memory.
|
1023
1010
|
|
1024
1011
|
Arguments:
|
1025
1012
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
1026
1013
|
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
1027
1014
|
W (list of ndarrays): Weights of the model.
|
1028
|
-
|
1015
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
1029
1016
|
|
1030
1017
|
Returns:
|
1031
1018
|
ndarray: Output from the model.
|
@@ -1049,7 +1036,7 @@ def predict_model_ram(Input, scaler_params, W, activation_potential=None):
|
|
1049
1036
|
neural_layer = normalization(neural_layer)
|
1050
1037
|
|
1051
1038
|
if Layer == 'fex':
|
1052
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
1039
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
1053
1040
|
elif Layer == 'cat':
|
1054
1041
|
neural_layer = np.dot(W[index], neural_layer)
|
1055
1042
|
|
@@ -1695,4 +1682,4 @@ def get_preds():
|
|
1695
1682
|
|
1696
1683
|
def get_acc():
|
1697
1684
|
|
1698
|
-
return 2
|
1685
|
+
return 2
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.6.3
|
4
4
|
Summary: New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -5,7 +5,7 @@ from setuptools import setup, find_packages
|
|
5
5
|
setup(
|
6
6
|
|
7
7
|
name = "pyerualjetwork",
|
8
|
-
version = "2.
|
8
|
+
version = "2.6.3",
|
9
9
|
author = "Hasan Can Beydili",
|
10
10
|
author_email = "tchasancan@gmail.com",
|
11
11
|
description= "New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document",
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|