pyerualjetwork 2.0.5__tar.gz → 2.0.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork-2.0.7/PKG-INFO +7 -0
- pyerualjetwork-2.0.7/plan_bi/__init__.py +5 -0
- pyerualjetwork-2.0.5/plan/plan.py → pyerualjetwork-2.0.7/plan_bi/plan_bi.py +27 -1
- pyerualjetwork-2.0.7/plan_di/__init__.py +5 -0
- pyerualjetwork-2.0.7/plan_di/plan_di.py +994 -0
- pyerualjetwork-2.0.7/pyerualjetwork.egg-info/PKG-INFO +7 -0
- {pyerualjetwork-2.0.5 → pyerualjetwork-2.0.7}/pyerualjetwork.egg-info/SOURCES.txt +4 -2
- pyerualjetwork-2.0.7/pyerualjetwork.egg-info/top_level.txt +2 -0
- {pyerualjetwork-2.0.5 → pyerualjetwork-2.0.7}/setup.py +2 -2
- pyerualjetwork-2.0.5/PKG-INFO +0 -7
- pyerualjetwork-2.0.5/plan/__init__.py +0 -5
- pyerualjetwork-2.0.5/pyerualjetwork.egg-info/PKG-INFO +0 -7
- pyerualjetwork-2.0.5/pyerualjetwork.egg-info/top_level.txt +0 -1
- {pyerualjetwork-2.0.5 → pyerualjetwork-2.0.7}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-2.0.5 → pyerualjetwork-2.0.7}/setup.cfg +0 -0
@@ -0,0 +1,7 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: pyerualjetwork
|
3
|
+
Version: 2.0.7
|
4
|
+
Summary: Advanced python deep learning library. New features: BINARY INJECTION (OLD) NOW ADDED NEW DIRECT FEATURE INJECTION. AND 'standard_scaler' func. Important Note: If there are any data smaller than 0 among the input data of the entry model, import plan_bi; otherwise, import plan_di. (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
|
+
Author: Hasan Can Beydili
|
6
|
+
Author-email: tchasancan@gmail.com
|
7
|
+
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -0,0 +1,5 @@
|
|
1
|
+
# pyerualjetwork/PLAN/__init__.py
|
2
|
+
|
3
|
+
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
|
+
|
5
|
+
from .plan_bi import auto_balancer, normalization, Softmax, Sigmoid, Relu, synaptic_pruning, synaptic_dividing, weight_identification, fex, cat, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler
|
@@ -324,7 +324,7 @@ def fex(
|
|
324
324
|
is_training # num: 1 or 0
|
325
325
|
) -> tuple:
|
326
326
|
"""
|
327
|
-
Applies feature
|
327
|
+
Applies feature extraction process to the input data using synaptic pruning.
|
328
328
|
|
329
329
|
Args:
|
330
330
|
Input (list[num]): Input data.
|
@@ -956,6 +956,32 @@ def synthetic_augmentation(x, y, class_count):
|
|
956
956
|
|
957
957
|
return np.array(x_balanced), np.array(y_balanced)
|
958
958
|
|
959
|
+
def standard_scaler(x_train, x_test):
|
960
|
+
info_standard_scaler = """
|
961
|
+
Standardizes training and test datasets.
|
962
|
+
|
963
|
+
Args:
|
964
|
+
train_data: numpy.ndarray
|
965
|
+
Training data (n_samples, n_features)
|
966
|
+
test_data: numpy.ndarray
|
967
|
+
Test data (n_samples, n_features)
|
968
|
+
|
969
|
+
Returns:
|
970
|
+
tuple
|
971
|
+
Standardized training and test datasets
|
972
|
+
"""
|
973
|
+
try:
|
974
|
+
mean = np.mean(x_train, axis=0)
|
975
|
+
std = np.std(x_train, axis=0)
|
976
|
+
|
977
|
+
|
978
|
+
train_data_scaled = (x_train - mean) / std
|
979
|
+
test_data_scaled = (x_test - mean) / std
|
980
|
+
|
981
|
+
except:
|
982
|
+
print(Fore.RED + "ERROR: x_train and x_test must be numpy array from standard_scaler" + info_standard_scaler)
|
983
|
+
|
984
|
+
return train_data_scaled, test_data_scaled
|
959
985
|
|
960
986
|
def get_weights():
|
961
987
|
|
@@ -0,0 +1,5 @@
|
|
1
|
+
# pyerualjetwork/PLAN/__init__.py
|
2
|
+
|
3
|
+
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
|
+
|
5
|
+
from .plan_di import auto_balancer, normalization, Softmax, Sigmoid, Relu, synaptic_pruning, synaptic_dividing, weight_identification, fex, cat, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler
|
@@ -0,0 +1,994 @@
|
|
1
|
+
"""
|
2
|
+
Created on Thu Jun 12 00:00:00 2024
|
3
|
+
|
4
|
+
@author: hasan can beydili
|
5
|
+
"""
|
6
|
+
import numpy as np
|
7
|
+
import time
|
8
|
+
from colorama import Fore,Style
|
9
|
+
from typing import List, Union
|
10
|
+
import math
|
11
|
+
from scipy.special import expit, softmax
|
12
|
+
import matplotlib.pyplot as plt
|
13
|
+
import seaborn as sns
|
14
|
+
|
15
|
+
# BUILD -----
|
16
|
+
def fit(
|
17
|
+
x_train: List[Union[int, float]],
|
18
|
+
y_train: List[Union[int, float, str]], # At least two.. and one hot encoded
|
19
|
+
) -> str:
|
20
|
+
|
21
|
+
infoPLAN = """
|
22
|
+
Creates and configures a PLAN model.
|
23
|
+
|
24
|
+
Args:
|
25
|
+
x_train (list[num]): List of input data.
|
26
|
+
y_train (list[num]): List of y_train. (one hot encoded)
|
27
|
+
activation_potential (float): Input activation potential
|
28
|
+
|
29
|
+
Returns:
|
30
|
+
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
31
|
+
error handled ?: Process status ('e')
|
32
|
+
"""
|
33
|
+
|
34
|
+
if len(x_train) != len(y_train):
|
35
|
+
print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit",infoPLAN)
|
36
|
+
return 'e'
|
37
|
+
|
38
|
+
class_count = set()
|
39
|
+
for sublist in y_train:
|
40
|
+
|
41
|
+
class_count.add(tuple(sublist))
|
42
|
+
|
43
|
+
|
44
|
+
class_count = list(class_count)
|
45
|
+
|
46
|
+
y_train = [tuple(sublist) for sublist in y_train]
|
47
|
+
|
48
|
+
neurons = [len(class_count),len(class_count)]
|
49
|
+
layers = ['fex','cat']
|
50
|
+
|
51
|
+
x_train[0] = np.array(x_train[0])
|
52
|
+
x_train[0] = x_train[0].ravel()
|
53
|
+
x_train_size = len(x_train[0])
|
54
|
+
|
55
|
+
W = weight_identification(len(layers) - 1,len(class_count),neurons,x_train_size)
|
56
|
+
Divides, Piece = synaptic_dividing(len(class_count),W)
|
57
|
+
trained_W = [1] * len(W)
|
58
|
+
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL,)
|
59
|
+
train_predictions = [None] * len(y_train)
|
60
|
+
true = 0
|
61
|
+
start_time = time.time()
|
62
|
+
for index, inp in enumerate(x_train):
|
63
|
+
uni_start_time = time.time()
|
64
|
+
inp = np.array(inp)
|
65
|
+
inp = inp.ravel()
|
66
|
+
|
67
|
+
if x_train_size != len(inp):
|
68
|
+
print(Fore.RED +"ERROR304: All input matrices or vectors in x_train list, must be same size. from: fit",infoPLAN + Style.RESET_ALL)
|
69
|
+
return 'e'
|
70
|
+
|
71
|
+
|
72
|
+
for Ulindex, Ul in enumerate(class_count):
|
73
|
+
|
74
|
+
if Ul == y_train[index]:
|
75
|
+
for Windex, w in enumerate(W):
|
76
|
+
for i, ul in enumerate(Ul):
|
77
|
+
if ul == 1.0:
|
78
|
+
k = i
|
79
|
+
|
80
|
+
cs = Divides[int(k)][Windex][0]
|
81
|
+
|
82
|
+
|
83
|
+
W[Windex] = synaptic_pruning(w, cs, 'row', int(k),len(class_count),Piece[Windex],1)
|
84
|
+
|
85
|
+
neural_layer = inp
|
86
|
+
|
87
|
+
for Lindex, Layer in enumerate(layers):
|
88
|
+
|
89
|
+
|
90
|
+
# neural_layer = normalization(neural_layer)
|
91
|
+
|
92
|
+
|
93
|
+
if Layer == 'fex':
|
94
|
+
y = np.argmax(y_train[index])
|
95
|
+
|
96
|
+
neural_layer,W[Lindex] = fex(neural_layer, W[Lindex], y , 1)
|
97
|
+
elif Layer == 'cat':
|
98
|
+
neural_layer,W[Lindex] = cat(neural_layer, W[Lindex], 1, Piece[Windex])
|
99
|
+
|
100
|
+
RealOutput = np.argmax(y_train[index])
|
101
|
+
PredictedOutput = np.argmax(neural_layer)
|
102
|
+
if RealOutput == PredictedOutput:
|
103
|
+
true += 1
|
104
|
+
acc = true / len(y_train)
|
105
|
+
train_predictions[index] = PredictedOutput
|
106
|
+
|
107
|
+
for i, w in enumerate(W):
|
108
|
+
trained_W[i] = trained_W[i] + w
|
109
|
+
|
110
|
+
|
111
|
+
W = weight_identification(len(layers) - 1, len(class_count), neurons, x_train_size)
|
112
|
+
|
113
|
+
|
114
|
+
uni_end_time = time.time()
|
115
|
+
|
116
|
+
calculating_est = round((uni_end_time - uni_start_time) * (len(x_train) - index),3)
|
117
|
+
|
118
|
+
if calculating_est < 60:
|
119
|
+
print('\rest......(sec):',calculating_est,'\n',end= "")
|
120
|
+
print('\rTrain accuracy: ' ,acc ,"\n", end="")
|
121
|
+
|
122
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
123
|
+
print('\rest......(min):',calculating_est/60,'\n',end= "")
|
124
|
+
print('\rTrain accuracy: ' ,acc ,"\n", end="")
|
125
|
+
|
126
|
+
elif calculating_est > 3600:
|
127
|
+
print('\rest......(h):',calculating_est/3600,'\n',end= "")
|
128
|
+
print('\rTrain accuracy: ' ,acc ,"\n", end="")
|
129
|
+
|
130
|
+
EndTime = time.time()
|
131
|
+
|
132
|
+
calculating_est = round(EndTime - start_time,2)
|
133
|
+
|
134
|
+
print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n")
|
135
|
+
|
136
|
+
if calculating_est < 60:
|
137
|
+
print('Total training time(sec): ',calculating_est)
|
138
|
+
|
139
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
140
|
+
print('Total training time(min): ',calculating_est/60)
|
141
|
+
|
142
|
+
elif calculating_est > 3600:
|
143
|
+
print('Total training time(h): ',calculating_est/3600)
|
144
|
+
|
145
|
+
if acc > 0.8:
|
146
|
+
print(Fore.GREEN + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
|
147
|
+
|
148
|
+
elif acc < 0.8 and acc > 0.6:
|
149
|
+
print(Fore.MAGENTA + '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
|
150
|
+
|
151
|
+
elif acc < 0.6:
|
152
|
+
print(Fore.RED+ '\nTotal Train accuracy: ' ,acc, '\n',Style.RESET_ALL)
|
153
|
+
|
154
|
+
|
155
|
+
|
156
|
+
|
157
|
+
return trained_W,train_predictions,acc
|
158
|
+
|
159
|
+
# FUNCTIONS -----
|
160
|
+
|
161
|
+
def weight_identification(
|
162
|
+
layer_count, # int: Number of layers in the neural network.
|
163
|
+
class_count, # int: Number of classes in the classification task.
|
164
|
+
neurons, # list[num]: List of neuron counts for each layer.
|
165
|
+
x_train_size # int: Size of the input data.
|
166
|
+
) -> str:
|
167
|
+
"""
|
168
|
+
Identifies the weights for a neural network model.
|
169
|
+
|
170
|
+
Args:
|
171
|
+
layer_count (int): Number of layers in the neural network.
|
172
|
+
class_count (int): Number of classes in the classification task.
|
173
|
+
neurons (list[num]): List of neuron counts for each layer.
|
174
|
+
x_train_size (int): Size of the input data.
|
175
|
+
|
176
|
+
Returns:
|
177
|
+
list([numpy_arrays],[...]): Weight matices of the model. .
|
178
|
+
"""
|
179
|
+
|
180
|
+
|
181
|
+
Wlen = layer_count + 1
|
182
|
+
W = [None] * Wlen
|
183
|
+
W[0] = np.ones((neurons[0],x_train_size))
|
184
|
+
ws = layer_count - 1
|
185
|
+
for w in range(ws):
|
186
|
+
W[w + 1] = np.ones((neurons[w + 1],neurons[w]))
|
187
|
+
W[layer_count] = np.ones((class_count,neurons[layer_count - 1]))
|
188
|
+
return W
|
189
|
+
|
190
|
+
def synaptic_pruning(
|
191
|
+
w, # list[list[num]]: Weight matrix of the neural network.
|
192
|
+
cs, # list[list[num]]: Synaptic connections between neurons.
|
193
|
+
key, # int: key for identifying synaptic connections.
|
194
|
+
Class, # int: Class label for the current training instance.
|
195
|
+
class_count, # int: Total number of classes in the dataset.
|
196
|
+
piece, # ???
|
197
|
+
is_training # int: 1 or 0
|
198
|
+
|
199
|
+
) -> str:
|
200
|
+
infoPruning = """
|
201
|
+
Performs synaptic pruning in a neural network model.
|
202
|
+
|
203
|
+
Args:
|
204
|
+
w (list[list[num]]): Weight matrix of the neural network.
|
205
|
+
cs (list[list[num]]): Synaptic connections between neurons.
|
206
|
+
key (str): key for identifying synaptic row or col connections.
|
207
|
+
Class (int): Class label for the current training instance.
|
208
|
+
class_count (int): Total number of classes in the dataset.
|
209
|
+
|
210
|
+
Returns:
|
211
|
+
numpy array: Weight matrix.
|
212
|
+
"""
|
213
|
+
|
214
|
+
|
215
|
+
Class += 1 # because index start 0
|
216
|
+
|
217
|
+
if Class != 1:
|
218
|
+
|
219
|
+
|
220
|
+
|
221
|
+
ce = cs / Class
|
222
|
+
|
223
|
+
if is_training == 1:
|
224
|
+
|
225
|
+
p = piece
|
226
|
+
|
227
|
+
for i in range(Class - 3):
|
228
|
+
|
229
|
+
piece+=p
|
230
|
+
|
231
|
+
if Class!= 2:
|
232
|
+
ce += piece
|
233
|
+
|
234
|
+
w[int(ce)-1::-1,:] = 0
|
235
|
+
|
236
|
+
|
237
|
+
w[cs:,:] = 0
|
238
|
+
|
239
|
+
else:
|
240
|
+
|
241
|
+
if Class == 1:
|
242
|
+
if key == 'row':
|
243
|
+
|
244
|
+
w[cs:,:] = 0
|
245
|
+
|
246
|
+
elif key == 'col':
|
247
|
+
|
248
|
+
w[:,cs] = 0
|
249
|
+
|
250
|
+
else:
|
251
|
+
print(Fore.RED + "ERROR103: synaptic_pruning func's key parameter must be 'row' or 'col' from: synaptic_pruning" + infoPruning)
|
252
|
+
return 'e'
|
253
|
+
else:
|
254
|
+
if key == 'row':
|
255
|
+
|
256
|
+
w[cs:,:] = 0
|
257
|
+
|
258
|
+
ce = int(round(w.shape[0] - cs / class_count))
|
259
|
+
w[ce-1::-1,:] = 0
|
260
|
+
|
261
|
+
elif key == 'col':
|
262
|
+
|
263
|
+
w[:,cs] = 0
|
264
|
+
|
265
|
+
else:
|
266
|
+
print(Fore.RED + "ERROR103: synaptic_pruning func's key parameter must be 'row' or 'col' from: synaptic_pruning" + infoPruning + Style.RESET_ALL)
|
267
|
+
return 'e'
|
268
|
+
return w
|
269
|
+
|
270
|
+
def synaptic_dividing(
|
271
|
+
class_count, # int: Total number of classes in the dataset.
|
272
|
+
W # list[list[num]]: Weight matrix of the neural network.
|
273
|
+
) -> str:
|
274
|
+
"""
|
275
|
+
Divides the synaptic weights of a neural network model based on class count.
|
276
|
+
|
277
|
+
Args:
|
278
|
+
class_count (int): Total number of classes in the dataset.
|
279
|
+
W (list[list[num]]): Weight matrix of the neural network.
|
280
|
+
|
281
|
+
Returns:
|
282
|
+
list: a 3D list holds informations of divided net.
|
283
|
+
"""
|
284
|
+
|
285
|
+
|
286
|
+
Piece = [1] * len(W)
|
287
|
+
#print('Piece:' + Piece)
|
288
|
+
#input()
|
289
|
+
# Boş bir üç boyutlu liste oluşturma
|
290
|
+
Divides = [[[0] for _ in range(len(W))] for _ in range(class_count)]
|
291
|
+
|
292
|
+
|
293
|
+
for i in range(len(W)):
|
294
|
+
|
295
|
+
|
296
|
+
Piece[i] = int(math.floor(W[i].shape[0] / class_count))
|
297
|
+
|
298
|
+
cs = 0
|
299
|
+
# j = Classes, i = Weights, [0] = CutStart.
|
300
|
+
|
301
|
+
for i in range(len(W)):
|
302
|
+
for j in range(class_count):
|
303
|
+
cs = cs + Piece[i]
|
304
|
+
Divides[j][i][0] = cs
|
305
|
+
#pruning_param[i] = cs
|
306
|
+
#print('Divides: ' + j + i + ' = ' + Divides[j][i][0])
|
307
|
+
#input()
|
308
|
+
|
309
|
+
j = 0
|
310
|
+
cs = 0
|
311
|
+
|
312
|
+
return Divides, Piece
|
313
|
+
|
314
|
+
|
315
|
+
def fex(
|
316
|
+
Input, # list[num]: Input data.
|
317
|
+
w, # list[list[num]]: Weight matrix of the neural network.,
|
318
|
+
Class, # Which class is, if training. num
|
319
|
+
is_training # num: 1 or 0
|
320
|
+
) -> tuple:
|
321
|
+
"""
|
322
|
+
Applies feature extraction process to the input data using synaptic pruning.
|
323
|
+
|
324
|
+
Args:
|
325
|
+
Input (list[num]): Input data.
|
326
|
+
w (list[list[num]]): Weight matrix of the neural network.
|
327
|
+
ACTIVATION_threshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
|
328
|
+
activation_potential (num): Threshold value for comparison.
|
329
|
+
|
330
|
+
Returns:
|
331
|
+
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
332
|
+
"""
|
333
|
+
|
334
|
+
if is_training == 1:
|
335
|
+
|
336
|
+
w[Class,:] = Input
|
337
|
+
|
338
|
+
neural_layer = np.dot(w, Input)
|
339
|
+
|
340
|
+
return neural_layer,w
|
341
|
+
|
342
|
+
def cat(
|
343
|
+
Input, # list[num]: Input data.
|
344
|
+
w, # list[list[num]]: Weight matrix of the neural network.
|
345
|
+
isTrain,
|
346
|
+
piece # int: Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
347
|
+
) -> tuple:
|
348
|
+
"""
|
349
|
+
Applies categorization process to the input data using synaptic pruning if specified.
|
350
|
+
|
351
|
+
Args:
|
352
|
+
Input (list[num]): Input data.
|
353
|
+
w (list[list[num]]): Weight matrix of the neural network.
|
354
|
+
ACTIVATION_threshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
|
355
|
+
activation_potential (num): Threshold value for comparison.
|
356
|
+
isTrain (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
357
|
+
|
358
|
+
Returns:
|
359
|
+
tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
|
360
|
+
"""
|
361
|
+
|
362
|
+
PruneIndex = np.where(Input == 0)
|
363
|
+
|
364
|
+
if isTrain == 1:
|
365
|
+
|
366
|
+
w = synaptic_pruning(w, PruneIndex, 'col', 0, 0, piece, isTrain)
|
367
|
+
|
368
|
+
|
369
|
+
neural_layer = np.dot(w, Input)
|
370
|
+
|
371
|
+
return neural_layer,w
|
372
|
+
|
373
|
+
|
374
|
+
def normalization(
|
375
|
+
Input # list[num]: Input data to be normalized.
|
376
|
+
):
|
377
|
+
"""
|
378
|
+
Normalizes the input data using maximum absolute scaling.
|
379
|
+
|
380
|
+
Args:
|
381
|
+
Input (list[num]): Input data to be normalized.
|
382
|
+
|
383
|
+
Returns:
|
384
|
+
list[num]: Scaled input data after normalization.
|
385
|
+
"""
|
386
|
+
|
387
|
+
|
388
|
+
AbsVector = np.abs(Input)
|
389
|
+
|
390
|
+
MaxAbs = np.max(AbsVector)
|
391
|
+
|
392
|
+
ScaledInput = Input / MaxAbs
|
393
|
+
|
394
|
+
return ScaledInput
|
395
|
+
|
396
|
+
|
397
|
+
def Softmax(
|
398
|
+
x # list[num]: Input data to be transformed using softmax function.
|
399
|
+
):
|
400
|
+
"""
|
401
|
+
Applies the softmax function to the input data.
|
402
|
+
|
403
|
+
Args:
|
404
|
+
x (list[num]): Input data to be transformed using softmax function.
|
405
|
+
|
406
|
+
Returns:
|
407
|
+
list[num]: Transformed data after applying softmax function.
|
408
|
+
"""
|
409
|
+
|
410
|
+
return softmax(x)
|
411
|
+
|
412
|
+
|
413
|
+
def Sigmoid(
|
414
|
+
x # list[num]: Input data to be transformed using sigmoid function.
|
415
|
+
):
|
416
|
+
"""
|
417
|
+
Applies the sigmoid function to the input data.
|
418
|
+
|
419
|
+
Args:
|
420
|
+
x (list[num]): Input data to be transformed using sigmoid function.
|
421
|
+
|
422
|
+
Returns:
|
423
|
+
list[num]: Transformed data after applying sigmoid function.
|
424
|
+
"""
|
425
|
+
return expit(x)
|
426
|
+
|
427
|
+
|
428
|
+
def Relu(
|
429
|
+
x # list[num]: Input data to be transformed using ReLU function.
|
430
|
+
):
|
431
|
+
"""
|
432
|
+
Applies the Rectified Linear Unit (ReLU) function to the input data.
|
433
|
+
|
434
|
+
Args:
|
435
|
+
x (list[num]): Input data to be transformed using ReLU function.
|
436
|
+
|
437
|
+
Returns:
|
438
|
+
list[num]: Transformed data after applying ReLU function.
|
439
|
+
"""
|
440
|
+
|
441
|
+
|
442
|
+
return np.maximum(0, x)
|
443
|
+
|
444
|
+
|
445
|
+
|
446
|
+
|
447
|
+
def evaluate(
|
448
|
+
x_test, # list[list[num]]: Test input data.
|
449
|
+
y_test, # list[num]: Test labels.
|
450
|
+
visualize, # visualize Testing procces or not visualize ('y' or 'n')
|
451
|
+
W # list[list[num]]: Weight matrix of the neural network.
|
452
|
+
) -> tuple:
|
453
|
+
infoTestModel = """
|
454
|
+
Tests the neural network model with the given test data.
|
455
|
+
|
456
|
+
Args:
|
457
|
+
x_test (list[list[num]]): Test input data.
|
458
|
+
y_test (list[num]): Test labels.
|
459
|
+
activation_potential (float): Input activation potential
|
460
|
+
visualize (str): Visualize test progress ? ('y' or 'n')
|
461
|
+
W (list[list[num]]): Weight matrix of the neural network.
|
462
|
+
|
463
|
+
Returns:
|
464
|
+
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
465
|
+
"""
|
466
|
+
|
467
|
+
layers = ['fex','cat']
|
468
|
+
|
469
|
+
|
470
|
+
try:
|
471
|
+
Wc = [0] * len(W)
|
472
|
+
true = 0
|
473
|
+
TestPredictions = [None] * len(y_test)
|
474
|
+
for i, w in enumerate(W):
|
475
|
+
Wc[i] = np.copy(w)
|
476
|
+
print('\rCopying weights.....',i+1,'/',len(W),end = "")
|
477
|
+
|
478
|
+
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
479
|
+
start_time = time.time()
|
480
|
+
for inpIndex,Input in enumerate(x_test):
|
481
|
+
Input = np.array(Input)
|
482
|
+
Input = Input.ravel()
|
483
|
+
uni_start_time = time.time()
|
484
|
+
neural_layer = Input
|
485
|
+
|
486
|
+
for index, Layer in enumerate(layers):
|
487
|
+
|
488
|
+
# neural_layer = normalization(neural_layer)
|
489
|
+
|
490
|
+
if layers[index] == 'fex':
|
491
|
+
neural_layer = fex(neural_layer, W[index], 0, 0)[0]
|
492
|
+
if layers[index] == 'cat':
|
493
|
+
neural_layer = cat(neural_layer, W[index], 0, 0)[0]
|
494
|
+
|
495
|
+
for i, w in enumerate(Wc):
|
496
|
+
W[i] = np.copy(w)
|
497
|
+
RealOutput = np.argmax(y_test[inpIndex])
|
498
|
+
PredictedOutput = np.argmax(neural_layer)
|
499
|
+
if RealOutput == PredictedOutput:
|
500
|
+
true += 1
|
501
|
+
acc = true / len(y_test)
|
502
|
+
TestPredictions[inpIndex] = PredictedOutput
|
503
|
+
|
504
|
+
if visualize == 'y':
|
505
|
+
|
506
|
+
y_testVisual = np.copy(y_test)
|
507
|
+
y_testVisual = np.argmax(y_testVisual, axis=1)
|
508
|
+
|
509
|
+
plt.figure(figsize=(12, 6))
|
510
|
+
sns.kdeplot(y_testVisual, label='Real Outputs', fill=True)
|
511
|
+
sns.kdeplot(TestPredictions, label='Predictions', fill=True)
|
512
|
+
plt.legend()
|
513
|
+
plt.xlabel('Class')
|
514
|
+
plt.ylabel('Data size')
|
515
|
+
plt.title('Predictions and Real Outputs for Testing KDE Plot')
|
516
|
+
plt.show()
|
517
|
+
|
518
|
+
if inpIndex + 1 != len(x_test):
|
519
|
+
|
520
|
+
plt.close('all')
|
521
|
+
|
522
|
+
uni_end_time = time.time()
|
523
|
+
|
524
|
+
calculating_est = round((uni_end_time - uni_start_time) * (len(x_test) - inpIndex),3)
|
525
|
+
|
526
|
+
if calculating_est < 60:
|
527
|
+
print('\rest......(sec):',calculating_est,'\n',end= "")
|
528
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
529
|
+
|
530
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
531
|
+
print('\rest......(min):',calculating_est/60,'\n',end= "")
|
532
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
533
|
+
|
534
|
+
elif calculating_est > 3600:
|
535
|
+
print('\rest......(h):',calculating_est/3600,'\n',end= "")
|
536
|
+
print('\rTest accuracy: ' ,acc ,"\n", end="")
|
537
|
+
|
538
|
+
EndTime = time.time()
|
539
|
+
for i, w in enumerate(Wc):
|
540
|
+
W[i] = np.copy(w)
|
541
|
+
|
542
|
+
calculating_est = round(EndTime - start_time,2)
|
543
|
+
|
544
|
+
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
545
|
+
|
546
|
+
if calculating_est < 60:
|
547
|
+
print('Total testing time(sec): ',calculating_est)
|
548
|
+
|
549
|
+
elif calculating_est > 60 and calculating_est < 3600:
|
550
|
+
print('Total testing time(min): ',calculating_est/60)
|
551
|
+
|
552
|
+
elif calculating_est > 3600:
|
553
|
+
print('Total testing time(h): ',calculating_est/3600)
|
554
|
+
|
555
|
+
if acc >= 0.8:
|
556
|
+
print(Fore.GREEN + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
557
|
+
|
558
|
+
elif acc < 0.8 and acc > 0.6:
|
559
|
+
print(Fore.MAGENTA + '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
560
|
+
|
561
|
+
elif acc <= 0.6:
|
562
|
+
print(Fore.RED+ '\nTotal Test accuracy: ' ,acc, '\n' + Style.RESET_ALL)
|
563
|
+
|
564
|
+
|
565
|
+
|
566
|
+
except:
|
567
|
+
|
568
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'activation_potential' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
|
569
|
+
return 'e'
|
570
|
+
|
571
|
+
|
572
|
+
|
573
|
+
return W,TestPredictions,acc
|
574
|
+
|
575
|
+
def save_model(model_name,
|
576
|
+
model_type,
|
577
|
+
class_count,
|
578
|
+
test_acc,
|
579
|
+
weights_type,
|
580
|
+
weights_format,
|
581
|
+
model_path,
|
582
|
+
W
|
583
|
+
):
|
584
|
+
|
585
|
+
infosave_model = """
|
586
|
+
Function to save a pruning learning model.
|
587
|
+
|
588
|
+
Arguments:
|
589
|
+
model_name (str): Name of the model.
|
590
|
+
model_type (str): Type of the model.(options: PLAN)
|
591
|
+
class_count (int): Number of classes.
|
592
|
+
activation_potential (float): Activation potential.
|
593
|
+
test_acc (float): Test accuracy of the model.
|
594
|
+
weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
595
|
+
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
596
|
+
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
597
|
+
W: Weights of the model.
|
598
|
+
|
599
|
+
Returns:
|
600
|
+
str: Message indicating if the model was saved successfully or encountered an error.
|
601
|
+
"""
|
602
|
+
|
603
|
+
# Operations to be performed by the function will be written here
|
604
|
+
pass
|
605
|
+
|
606
|
+
layers = ['fex','cat']
|
607
|
+
|
608
|
+
if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
|
609
|
+
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" + infosave_model + Style.RESET_ALL)
|
610
|
+
return 'e'
|
611
|
+
|
612
|
+
if weights_format != 'd' and weights_format != 'f' and weights_format != 'raw':
|
613
|
+
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: save_model" + infosave_model + Style.RESET_ALL)
|
614
|
+
return 'e'
|
615
|
+
|
616
|
+
NeuronCount = 0
|
617
|
+
SynapseCount = 0
|
618
|
+
|
619
|
+
try:
|
620
|
+
for w in W:
|
621
|
+
NeuronCount += np.shape(w)[0]
|
622
|
+
SynapseCount += np.shape(w)[0] * np.shape(w)[1]
|
623
|
+
except:
|
624
|
+
|
625
|
+
print(Fore.RED + "ERROR: Weight matrices has a problem from: save_model" + infosave_model + Style.RESET_ALL)
|
626
|
+
return 'e'
|
627
|
+
import pandas as pd
|
628
|
+
from datetime import datetime
|
629
|
+
from scipy import io
|
630
|
+
|
631
|
+
data = {'MODEL NAME': model_name,
|
632
|
+
'MODEL TYPE': model_type,
|
633
|
+
'LAYERS': layers,
|
634
|
+
'LAYER COUNT': len(layers),
|
635
|
+
'CLASS COUNT': class_count,
|
636
|
+
'NEURON COUNT': NeuronCount,
|
637
|
+
'SYNAPSE COUNT': SynapseCount,
|
638
|
+
'TEST ACCURACY': test_acc,
|
639
|
+
'SAVE DATE': datetime.now(),
|
640
|
+
'WEIGHTS TYPE': weights_type,
|
641
|
+
'WEIGHTS FORMAT': weights_format,
|
642
|
+
'MODEL PATH': model_path
|
643
|
+
}
|
644
|
+
try:
|
645
|
+
|
646
|
+
df = pd.DataFrame(data)
|
647
|
+
|
648
|
+
|
649
|
+
df.to_csv(model_path + model_name + '.txt', sep='\t', index=False)
|
650
|
+
|
651
|
+
|
652
|
+
except:
|
653
|
+
|
654
|
+
print(Fore.RED + "ERROR: Model log not saved probably model_path incorrect. Check the log parameters from: save_model" + infosave_model + Style.RESET_ALL)
|
655
|
+
return 'e'
|
656
|
+
try:
|
657
|
+
|
658
|
+
if weights_type == 'txt' and weights_format == 'd':
|
659
|
+
|
660
|
+
for i, w in enumerate(W):
|
661
|
+
np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w, fmt='%d')
|
662
|
+
|
663
|
+
if weights_type == 'txt' and weights_format == 'f':
|
664
|
+
|
665
|
+
for i, w in enumerate(W):
|
666
|
+
np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w, fmt='%f')
|
667
|
+
|
668
|
+
if weights_type == 'txt' and weights_format == 'raw':
|
669
|
+
|
670
|
+
for i, w in enumerate(W):
|
671
|
+
np.savetxt(model_path + model_name + str(i+1) + 'w.txt' , w)
|
672
|
+
|
673
|
+
|
674
|
+
###
|
675
|
+
|
676
|
+
|
677
|
+
if weights_type == 'npy' and weights_format == 'd':
|
678
|
+
|
679
|
+
for i, w in enumerate(W):
|
680
|
+
np.save(model_path + model_name + str(i+1) + 'w.npy', w.astype(int))
|
681
|
+
|
682
|
+
if weights_type == 'npy' and weights_format == 'f':
|
683
|
+
|
684
|
+
for i, w in enumerate(W):
|
685
|
+
np.save(model_path + model_name + str(i+1) + 'w.npy' , w, w.astype(float))
|
686
|
+
|
687
|
+
if weights_type == 'npy' and weights_format == 'raw':
|
688
|
+
|
689
|
+
for i, w in enumerate(W):
|
690
|
+
np.save(model_path + model_name + str(i+1) + 'w.npy' , w)
|
691
|
+
|
692
|
+
|
693
|
+
###
|
694
|
+
|
695
|
+
|
696
|
+
if weights_type == 'mat' and weights_format == 'd':
|
697
|
+
|
698
|
+
for i, w in enumerate(W):
|
699
|
+
w = {'w': w.astype(int)}
|
700
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
701
|
+
|
702
|
+
if weights_type == 'mat' and weights_format == 'f':
|
703
|
+
|
704
|
+
for i, w in enumerate(W):
|
705
|
+
w = {'w': w.astype(float)}
|
706
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
707
|
+
|
708
|
+
if weights_type == 'mat' and weights_format == 'raw':
|
709
|
+
|
710
|
+
for i, w in enumerate(W):
|
711
|
+
w = {'w': w}
|
712
|
+
io.savemat(model_path + model_name + str(i+1) + 'w.mat', w)
|
713
|
+
|
714
|
+
except:
|
715
|
+
|
716
|
+
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: save_model" + infosave_model + Style.RESET_ALL)
|
717
|
+
return 'e'
|
718
|
+
print(df)
|
719
|
+
message = (
|
720
|
+
Fore.GREEN + "Model Saved Successfully\n" +
|
721
|
+
Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
|
722
|
+
Style.RESET_ALL
|
723
|
+
)
|
724
|
+
|
725
|
+
return print(message)
|
726
|
+
|
727
|
+
|
728
|
+
def load_model(model_name,
|
729
|
+
model_path,
|
730
|
+
):
|
731
|
+
infoload_model = """
|
732
|
+
Function to load a pruning learning model.
|
733
|
+
|
734
|
+
Arguments:
|
735
|
+
model_name (str): Name of the model.
|
736
|
+
model_path (str): Path where the model is saved.
|
737
|
+
log_type (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
738
|
+
|
739
|
+
Returns:
|
740
|
+
lists: W(list[num]), activation_potential, df (DataFrame of the model)
|
741
|
+
"""
|
742
|
+
pass
|
743
|
+
|
744
|
+
|
745
|
+
import pandas as pd
|
746
|
+
import scipy.io as sio
|
747
|
+
|
748
|
+
try:
|
749
|
+
|
750
|
+
df = pd.read_csv(model_path + model_name + '.' + 'txt', delimiter='\t')
|
751
|
+
|
752
|
+
except:
|
753
|
+
|
754
|
+
print(Fore.RED + "ERROR: Model Path error. accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: load_model" + infoload_model + Style.RESET_ALL)
|
755
|
+
|
756
|
+
model_name = str(df['MODEL NAME'].iloc[0])
|
757
|
+
layers = df['LAYERS'].tolist()
|
758
|
+
layer_count = int(df['LAYER COUNT'].iloc[0])
|
759
|
+
class_count = int(df['CLASS COUNT'].iloc[0])
|
760
|
+
NeuronCount = int(df['NEURON COUNT'].iloc[0])
|
761
|
+
SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
|
762
|
+
test_acc = int(df['TEST ACCURACY'].iloc[0])
|
763
|
+
model_type = str(df['MODEL TYPE'].iloc[0])
|
764
|
+
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
765
|
+
WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
|
766
|
+
model_path = str(df['MODEL PATH'].iloc[0])
|
767
|
+
|
768
|
+
W = [0] * layer_count
|
769
|
+
|
770
|
+
if WeightType == 'txt':
|
771
|
+
for i in range(layer_count):
|
772
|
+
W[i] = np.loadtxt(model_path + model_name + str(i+1) + 'w.txt')
|
773
|
+
elif WeightType == 'npy':
|
774
|
+
for i in range(layer_count):
|
775
|
+
W[i] = np.load(model_path + model_name + str(i+1) + 'w.npy')
|
776
|
+
elif WeightType == 'mat':
|
777
|
+
for i in range(layer_count):
|
778
|
+
W[i] = sio.loadmat(model_path + model_name + str(i+1) + 'w.mat')
|
779
|
+
else:
|
780
|
+
raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: load_model." + infoload_model + Style.RESET_ALL)
|
781
|
+
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
782
|
+
return W,df
|
783
|
+
|
784
|
+
def predict_model_ssd(Input,model_name,model_path):
|
785
|
+
|
786
|
+
infopredict_model_ssd = """
|
787
|
+
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
788
|
+
|
789
|
+
Arguments:
|
790
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
791
|
+
model_name (str): Name of the model.
|
792
|
+
model_path (str): Path where the model is saved.
|
793
|
+
Returns:
|
794
|
+
ndarray: Output from the model.
|
795
|
+
"""
|
796
|
+
W = load_model(model_name,model_path)[0]
|
797
|
+
|
798
|
+
layers = ['fex','cat']
|
799
|
+
|
800
|
+
Wc = [0] * len(W)
|
801
|
+
for i, w in enumerate(W):
|
802
|
+
Wc[i] = np.copy(w)
|
803
|
+
try:
|
804
|
+
neural_layer = Input
|
805
|
+
neural_layer = np.array(neural_layer)
|
806
|
+
neural_layer = neural_layer.ravel()
|
807
|
+
for index, Layer in enumerate(layers):
|
808
|
+
|
809
|
+
# neural_layer = normalization(neural_layer)
|
810
|
+
|
811
|
+
if layers[index] == 'fex':
|
812
|
+
neural_layer = fex(neural_layer, W[index],0, 0)[0]
|
813
|
+
if layers[index] == 'cat':
|
814
|
+
neural_layer = cat(neural_layer, W[index], 0, 0)[0]
|
815
|
+
except:
|
816
|
+
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: predict_model_ssd" + infopredict_model_ssd + Style.RESET_ALL)
|
817
|
+
return 'e'
|
818
|
+
for i, w in enumerate(Wc):
|
819
|
+
W[i] = np.copy(w)
|
820
|
+
return neural_layer
|
821
|
+
|
822
|
+
|
823
|
+
def predict_model_ram(Input,W):
|
824
|
+
|
825
|
+
infopredict_model_ram = """
|
826
|
+
Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
|
827
|
+
from weights and parameters stored in memory.
|
828
|
+
|
829
|
+
Arguments:
|
830
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
831
|
+
activation_potential (float): Activation potential.
|
832
|
+
W (list of ndarrays): Weights of the model.
|
833
|
+
|
834
|
+
Returns:
|
835
|
+
ndarray: Output from the model.
|
836
|
+
"""
|
837
|
+
|
838
|
+
layers = ['fex','cat']
|
839
|
+
|
840
|
+
Wc = [0] * len(W)
|
841
|
+
for i, w in enumerate(W):
|
842
|
+
Wc[i] = np.copy(w)
|
843
|
+
try:
|
844
|
+
neural_layer = Input
|
845
|
+
neural_layer = np.array(neural_layer)
|
846
|
+
neural_layer = neural_layer.ravel()
|
847
|
+
for index, Layer in enumerate(layers):
|
848
|
+
|
849
|
+
# neural_layer = normalization(neural_layer)
|
850
|
+
|
851
|
+
if layers[index] == 'fex':
|
852
|
+
neural_layer = fex(neural_layer, W[index],0, 0)[0]
|
853
|
+
if layers[index] == 'cat':
|
854
|
+
neural_layer = cat(neural_layer, W[index], 0, 0)[0]
|
855
|
+
|
856
|
+
except:
|
857
|
+
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: predict_model_ram." + infopredict_model_ram + Style.RESET_ALL)
|
858
|
+
return 'e'
|
859
|
+
for i, w in enumerate(Wc):
|
860
|
+
W[i] = np.copy(w)
|
861
|
+
return neural_layer
|
862
|
+
|
863
|
+
|
864
|
+
def auto_balancer(x_train, y_train, class_count):
|
865
|
+
|
866
|
+
infoauto_balancer = """
|
867
|
+
Function to balance the training data across different classes.
|
868
|
+
|
869
|
+
Arguments:
|
870
|
+
x_train (list): Input data for training.
|
871
|
+
y_train (list): Labels corresponding to the input data.
|
872
|
+
class_count (int): Number of classes.
|
873
|
+
|
874
|
+
Returns:
|
875
|
+
tuple: A tuple containing balanced input data and labels.
|
876
|
+
"""
|
877
|
+
try:
|
878
|
+
ClassIndices = {i: np.where(np.array(y_train)[:, i] == 1)[0] for i in range(class_count)}
|
879
|
+
classes = [len(ClassIndices[i]) for i in range(class_count)]
|
880
|
+
|
881
|
+
if len(set(classes)) == 1:
|
882
|
+
print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
|
883
|
+
return x_train, y_train
|
884
|
+
|
885
|
+
MinCount = min(classes)
|
886
|
+
|
887
|
+
BalancedIndices = []
|
888
|
+
for i in range(class_count):
|
889
|
+
if len(ClassIndices[i]) > MinCount:
|
890
|
+
SelectedIndices = np.random.choice(ClassIndices[i], MinCount, replace=False)
|
891
|
+
else:
|
892
|
+
SelectedIndices = ClassIndices[i]
|
893
|
+
BalancedIndices.extend(SelectedIndices)
|
894
|
+
|
895
|
+
BalancedInputs = [x_train[idx] for idx in BalancedIndices]
|
896
|
+
BalancedLabels = [y_train[idx] for idx in BalancedIndices]
|
897
|
+
|
898
|
+
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(x_train)) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
|
899
|
+
except:
|
900
|
+
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
|
901
|
+
return 'e'
|
902
|
+
|
903
|
+
return BalancedInputs, BalancedLabels
|
904
|
+
|
905
|
+
def synthetic_augmentation(x, y, class_count):
|
906
|
+
"""
|
907
|
+
Generates synthetic examples to balance classes with fewer examples.
|
908
|
+
|
909
|
+
Arguments:
|
910
|
+
x -- Input dataset (examples) - list format
|
911
|
+
y -- Class labels (one-hot encoded) - list format
|
912
|
+
class_count -- Number of classes
|
913
|
+
|
914
|
+
Returns:
|
915
|
+
x_balanced -- Balanced input dataset (list format)
|
916
|
+
y_balanced -- Balanced class labels (one-hot encoded, list format)
|
917
|
+
"""
|
918
|
+
# Calculate class distribution
|
919
|
+
class_distribution = {i: 0 for i in range(class_count)}
|
920
|
+
for label in y:
|
921
|
+
class_distribution[np.argmax(label)] += 1
|
922
|
+
|
923
|
+
max_class_count = max(class_distribution.values())
|
924
|
+
|
925
|
+
x_balanced = list(x)
|
926
|
+
y_balanced = list(y)
|
927
|
+
|
928
|
+
for class_label in range(class_count):
|
929
|
+
class_indices = [i for i, label in enumerate(y) if np.argmax(label) == class_label]
|
930
|
+
num_samples = len(class_indices)
|
931
|
+
|
932
|
+
if num_samples < max_class_count:
|
933
|
+
while num_samples < max_class_count:
|
934
|
+
|
935
|
+
random_indices = np.random.choice(class_indices, 2, replace=False)
|
936
|
+
sample1 = x[random_indices[0]]
|
937
|
+
sample2 = x[random_indices[1]]
|
938
|
+
|
939
|
+
synthetic_sample = sample1 + (np.array(sample2) - np.array(sample1)) * np.random.rand()
|
940
|
+
|
941
|
+
x_balanced.append(synthetic_sample.tolist())
|
942
|
+
y_balanced.append(y[class_indices[0]])
|
943
|
+
|
944
|
+
num_samples += 1
|
945
|
+
|
946
|
+
return np.array(x_balanced), np.array(y_balanced)
|
947
|
+
|
948
|
+
def standard_scaler(x_train, x_test):
|
949
|
+
info_standard_scaler = """
|
950
|
+
Standardizes training and test datasets.
|
951
|
+
|
952
|
+
Args:
|
953
|
+
train_data: numpy.ndarray
|
954
|
+
Training data (n_samples, n_features)
|
955
|
+
test_data: numpy.ndarray
|
956
|
+
Test data (n_samples, n_features)
|
957
|
+
|
958
|
+
Returns:
|
959
|
+
tuple
|
960
|
+
Standardized training and test datasets
|
961
|
+
"""
|
962
|
+
try:
|
963
|
+
mean = np.mean(x_train, axis=0)
|
964
|
+
std = np.std(x_train, axis=0)
|
965
|
+
|
966
|
+
|
967
|
+
train_data_scaled = (x_train - mean) / std
|
968
|
+
test_data_scaled = (x_test - mean) / std
|
969
|
+
|
970
|
+
except:
|
971
|
+
print(Fore.RED + "ERROR: x_train and x_test must be numpy array from standard_scaler" + info_standard_scaler)
|
972
|
+
|
973
|
+
return train_data_scaled, test_data_scaled
|
974
|
+
|
975
|
+
|
976
|
+
def get_weights():
|
977
|
+
|
978
|
+
return 0
|
979
|
+
|
980
|
+
def get_df():
|
981
|
+
|
982
|
+
return 1
|
983
|
+
|
984
|
+
def get_preds():
|
985
|
+
|
986
|
+
return 1
|
987
|
+
|
988
|
+
def get_acc():
|
989
|
+
|
990
|
+
return 2
|
991
|
+
|
992
|
+
def get_pot():
|
993
|
+
|
994
|
+
return 1
|
@@ -0,0 +1,7 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: pyerualjetwork
|
3
|
+
Version: 2.0.7
|
4
|
+
Summary: Advanced python deep learning library. New features: BINARY INJECTION (OLD) NOW ADDED NEW DIRECT FEATURE INJECTION. AND 'standard_scaler' func. Important Note: If there are any data smaller than 0 among the input data of the entry model, import plan_bi; otherwise, import plan_di. (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
|
+
Author: Hasan Can Beydili
|
6
|
+
Author-email: tchasancan@gmail.com
|
7
|
+
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -5,10 +5,10 @@ from setuptools import setup, find_packages
|
|
5
5
|
setup(
|
6
6
|
|
7
7
|
name = "pyerualjetwork",
|
8
|
-
version = "2.0.
|
8
|
+
version = "2.0.7",
|
9
9
|
author = "Hasan Can Beydili",
|
10
10
|
author_email = "tchasancan@gmail.com",
|
11
|
-
description= "Advanced python deep learning library. New features:
|
11
|
+
description= "Advanced python deep learning library. New features: BINARY INJECTION (OLD) NOW ADDED NEW DIRECT FEATURE INJECTION. AND 'standard_scaler' func. Important Note: If there are any data smaller than 0 among the input data of the entry model, import plan_bi; otherwise, import plan_di. (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)",
|
12
12
|
packages = find_packages(),
|
13
13
|
keywords = ["model evaluation", "classifcation", 'pruning learning artficial neural networks'],
|
14
14
|
|
pyerualjetwork-2.0.5/PKG-INFO
DELETED
@@ -1,7 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: pyerualjetwork
|
3
|
-
Version: 2.0.5
|
4
|
-
Summary: Advanced python deep learning library. New features: More simple and practical, action_potential changed to activation_potential, all functions and variables are snake_case. (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
|
-
Author: Hasan Can Beydili
|
6
|
-
Author-email: tchasancan@gmail.com
|
7
|
-
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -1,5 +0,0 @@
|
|
1
|
-
# pyerualjetwork/PLAN/__init__.py
|
2
|
-
|
3
|
-
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
|
-
|
5
|
-
from .plan import auto_balancer, normalization, Softmax, Sigmoid, Relu, synaptic_pruning, synaptic_dividing, weight_identification, fex, cat, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation
|
@@ -1,7 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: pyerualjetwork
|
3
|
-
Version: 2.0.5
|
4
|
-
Summary: Advanced python deep learning library. New features: More simple and practical, action_potential changed to activation_potential, all functions and variables are snake_case. (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
|
-
Author: Hasan Can Beydili
|
6
|
-
Author-email: tchasancan@gmail.com
|
7
|
-
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -1 +0,0 @@
|
|
1
|
-
plan
|
File without changes
|
File without changes
|