pyerualjetwork 2.2.0__tar.gz → 2.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-2.2.0 → pyerualjetwork-2.2.2}/PKG-INFO +1 -1
- {pyerualjetwork-2.2.0 → pyerualjetwork-2.2.2}/plan_bi/plan_bi.py +6 -6
- {pyerualjetwork-2.2.0 → pyerualjetwork-2.2.2}/plan_di/plan_di.py +7 -51
- {pyerualjetwork-2.2.0 → pyerualjetwork-2.2.2}/pyerualjetwork.egg-info/PKG-INFO +1 -1
- {pyerualjetwork-2.2.0 → pyerualjetwork-2.2.2}/setup.py +1 -1
- {pyerualjetwork-2.2.0 → pyerualjetwork-2.2.2}/plan_bi/__init__.py +0 -0
- {pyerualjetwork-2.2.0 → pyerualjetwork-2.2.2}/plan_di/__init__.py +0 -0
- {pyerualjetwork-2.2.0 → pyerualjetwork-2.2.2}/pyerualjetwork.egg-info/SOURCES.txt +0 -0
- {pyerualjetwork-2.2.0 → pyerualjetwork-2.2.2}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-2.2.0 → pyerualjetwork-2.2.2}/pyerualjetwork.egg-info/top_level.txt +0 -0
- {pyerualjetwork-2.2.0 → pyerualjetwork-2.2.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.2.
|
3
|
+
Version: 2.2.2
|
4
4
|
Summary: 8 new functions: multiple_evaluate , encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -95,8 +95,6 @@ def fit(
|
|
95
95
|
|
96
96
|
if Layer == 'fex':
|
97
97
|
W[Lindex] = fex(neural_layer, W[Lindex], activation_potential, Piece[Windex], True, y)
|
98
|
-
elif Layer == 'cat':
|
99
|
-
W[Lindex] = cat(neural_layer, W[Lindex], True, y)
|
100
98
|
|
101
99
|
|
102
100
|
for i, w in enumerate(W):
|
@@ -172,6 +170,8 @@ def weight_identification(
|
|
172
170
|
for w in range(ws):
|
173
171
|
W[w + 1] = np.ones((neurons[w + 1],neurons[w]))
|
174
172
|
W[layer_count] = np.ones((class_count,neurons[layer_count - 1]))
|
173
|
+
W[len(W) - 1] = np.eye(class_count)
|
174
|
+
|
175
175
|
return W
|
176
176
|
|
177
177
|
def synaptic_pruning(
|
@@ -471,7 +471,7 @@ def evaluate(
|
|
471
471
|
if layers[index] == 'fex':
|
472
472
|
neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
|
473
473
|
if layers[index] == 'cat':
|
474
|
-
neural_layer =
|
474
|
+
neural_layer = np.dot(W[index], neural_layer)
|
475
475
|
|
476
476
|
for i, w in enumerate(Wc):
|
477
477
|
W[i] = np.copy(w)
|
@@ -594,7 +594,7 @@ def multiple_evaluate(
|
|
594
594
|
if layers[index] == 'fex':
|
595
595
|
neural_layer = fex(neural_layer, W[index], activation_potentials[m], None, False, None)
|
596
596
|
if layers[index] == 'cat':
|
597
|
-
neural_layer =
|
597
|
+
neural_layer = np.dot(W[index], neural_layer)
|
598
598
|
|
599
599
|
output_layer += neural_layer
|
600
600
|
|
@@ -914,7 +914,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
914
914
|
if layers[index] == 'fex':
|
915
915
|
neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
|
916
916
|
if layers[index] == 'cat':
|
917
|
-
neural_layer =
|
917
|
+
neural_layer = np.dot(W[index], neural_layer)
|
918
918
|
except:
|
919
919
|
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" + infopredict_model_ssd + Style.RESET_ALL)
|
920
920
|
return 'e'
|
@@ -958,7 +958,7 @@ def predict_model_ram(Input, activation_potential, scaler, W):
|
|
958
958
|
if layers[index] == 'fex':
|
959
959
|
neural_layer = fex(neural_layer, W[index], activation_potential, None, False, None)
|
960
960
|
if layers[index] == 'cat':
|
961
|
-
neural_layer =
|
961
|
+
neural_layer = np.dot(W[index], neural_layer)
|
962
962
|
|
963
963
|
except:
|
964
964
|
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + infopredict_model_ram + Style.RESET_ALL)
|
@@ -58,7 +58,7 @@ def fit(
|
|
58
58
|
|
59
59
|
W = weight_identification(
|
60
60
|
len(layers) - 1, len(class_count), neurons, x_train_size)
|
61
|
-
|
61
|
+
|
62
62
|
trained_W = [1] * len(W)
|
63
63
|
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
|
64
64
|
start_time = time.time()
|
@@ -72,19 +72,6 @@ def fit(
|
|
72
72
|
infoPLAN + Style.RESET_ALL)
|
73
73
|
return 'e'
|
74
74
|
|
75
|
-
for Ulindex, Ul in enumerate(class_count):
|
76
|
-
|
77
|
-
if Ul == y_train[index]:
|
78
|
-
for Windex, w in enumerate(W):
|
79
|
-
for i, ul in enumerate(Ul):
|
80
|
-
if ul == 1.0:
|
81
|
-
k = i
|
82
|
-
|
83
|
-
cs = Divides[int(k)][Windex][0]
|
84
|
-
|
85
|
-
W[Windex] = synaptic_pruning(w, cs, 'row', int(
|
86
|
-
k), len(class_count), Piece[Windex], True)
|
87
|
-
|
88
75
|
neural_layer = inp
|
89
76
|
|
90
77
|
for Lindex, Layer in enumerate(layers):
|
@@ -94,8 +81,6 @@ def fit(
|
|
94
81
|
y = np.argmax(y_train[index])
|
95
82
|
if Layer == 'fex':
|
96
83
|
W[Lindex] = fex(neural_layer, W[Lindex], True, y)
|
97
|
-
elif Layer == 'cat':
|
98
|
-
W[Lindex] = cat(neural_layer, W[Lindex], True, y)
|
99
84
|
|
100
85
|
for i, w in enumerate(W):
|
101
86
|
trained_W[i] = trained_W[i] + w
|
@@ -165,6 +150,8 @@ def weight_identification(
|
|
165
150
|
for w in range(ws):
|
166
151
|
W[w + 1] = np.ones((neurons[w + 1], neurons[w]))
|
167
152
|
W[layer_count] = np.ones((class_count, neurons[layer_count - 1]))
|
153
|
+
W[len(W) - 1] = np.eye(class_count)
|
154
|
+
|
168
155
|
return W
|
169
156
|
|
170
157
|
|
@@ -302,37 +289,6 @@ def fex(
|
|
302
289
|
return neural_layer
|
303
290
|
|
304
291
|
|
305
|
-
def cat(
|
306
|
-
Input, # list[num]: Input data.
|
307
|
-
w, # list[num]: Weight matrix of the neural network.
|
308
|
-
# (bool): Flag indicating if the function is called during training (True or False).
|
309
|
-
is_training,
|
310
|
-
Class
|
311
|
-
) -> tuple:
|
312
|
-
"""
|
313
|
-
Applies categorization process to the input data using synaptic pruning if specified.
|
314
|
-
|
315
|
-
Args:
|
316
|
-
Input (list[num]): Input data.
|
317
|
-
w (list[num]): Weight matrix of the neural network.
|
318
|
-
is_training (bool): Flag indicating if the function is called during training (True or False).
|
319
|
-
Class (int): if is during training then which class(label) ? is isnt then put None.
|
320
|
-
Returns:
|
321
|
-
tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
|
322
|
-
"""
|
323
|
-
|
324
|
-
if is_training == True:
|
325
|
-
|
326
|
-
w[Class, Class] += 1
|
327
|
-
|
328
|
-
return w
|
329
|
-
|
330
|
-
else:
|
331
|
-
|
332
|
-
neural_layer = np.dot(w, Input)
|
333
|
-
|
334
|
-
return neural_layer
|
335
|
-
|
336
292
|
|
337
293
|
def normalization(
|
338
294
|
Input # num: Input data to be normalized.
|
@@ -449,7 +405,7 @@ def evaluate(
|
|
449
405
|
if Layer == 'fex':
|
450
406
|
neural_layer = fex(neural_layer, W[index], False, None)
|
451
407
|
elif Layer == 'cat':
|
452
|
-
neural_layer =
|
408
|
+
neural_layer = np.dot(W[index], neural_layer)
|
453
409
|
|
454
410
|
for i, w in enumerate(Wc):
|
455
411
|
W[i] = np.copy(w)
|
@@ -571,7 +527,7 @@ def multiple_evaluate(
|
|
571
527
|
if Layer == 'fex':
|
572
528
|
neural_layer = fex(neural_layer, W[index], False, None)
|
573
529
|
elif Layer == 'cat':
|
574
|
-
neural_layer =
|
530
|
+
neural_layer = np.dot(W[index], neural_layer)
|
575
531
|
|
576
532
|
output_layer += neural_layer
|
577
533
|
|
@@ -887,7 +843,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
887
843
|
if Layer == 'fex':
|
888
844
|
neural_layer = fex(neural_layer, W[index], False, None)
|
889
845
|
elif Layer == 'cat':
|
890
|
-
neural_layer =
|
846
|
+
neural_layer = np.dot(W[index], neural_layer)
|
891
847
|
except:
|
892
848
|
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" +
|
893
849
|
infopredict_model_ssd + Style.RESET_ALL)
|
@@ -932,7 +888,7 @@ def predict_model_ram(Input, scaler, W):
|
|
932
888
|
if Layer == 'fex':
|
933
889
|
neural_layer = fex(neural_layer, W[index], False, None)
|
934
890
|
elif Layer == 'cat':
|
935
|
-
neural_layer =
|
891
|
+
neural_layer = np.dot(W[index], neural_layer)
|
936
892
|
|
937
893
|
except:
|
938
894
|
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." +
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 2.2.
|
3
|
+
Version: 2.2.2
|
4
4
|
Summary: 8 new functions: multiple_evaluate , encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -5,7 +5,7 @@ from setuptools import setup, find_packages
|
|
5
5
|
setup(
|
6
6
|
|
7
7
|
name = "pyerualjetwork",
|
8
|
-
version = "2.2.
|
8
|
+
version = "2.2.2",
|
9
9
|
author = "Hasan Can Beydili",
|
10
10
|
author_email = "tchasancan@gmail.com",
|
11
11
|
description= " 8 new functions: multiple_evaluate , encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate And Code improvements (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)",
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|