pyerualjetwork 1.2.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork-1.2.7/PKG-INFO +7 -0
- pyerualjetwork-1.2.7/plan/__init__.py +5 -0
- pyerualjetwork-1.2.7/plan/plan.py +1048 -0
- pyerualjetwork-1.2.7/pyerualjetwork.egg-info/PKG-INFO +7 -0
- pyerualjetwork-1.2.7/pyerualjetwork.egg-info/SOURCES.txt +7 -0
- pyerualjetwork-1.2.7/pyerualjetwork.egg-info/dependency_links.txt +1 -0
- pyerualjetwork-1.2.7/pyerualjetwork.egg-info/top_level.txt +1 -0
- pyerualjetwork-1.2.7/setup.cfg +4 -0
- pyerualjetwork-1.2.7/setup.py +15 -0
@@ -0,0 +1,7 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: pyerualjetwork
|
3
|
+
Version: 1.2.7
|
4
|
+
Summary: Advanced python deep learning library. 'ThresholdSigns' changed to 'MembranThresholds' and 'ThresholdValues' changed to 'MembranPotentials'. (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
|
+
Author: Hasan Can Beydili
|
6
|
+
Author-email: tchasancan@gmail.com
|
7
|
+
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -0,0 +1,5 @@
|
|
1
|
+
# pyerualjetwork/PLAN/__init__.py
|
2
|
+
|
3
|
+
# Bu dosya, plan modülünün ana giriş noktasıdır.
|
4
|
+
|
5
|
+
from .plan import AutoBalancer, Normalization, Softmax, Sigmoid, Relu, SynapticPruning, SynapticDividing, WeightIdentification, Fex, Cat, TrainPLAN, TestPLAN, SavePLAN, LoadPLAN, PredictFromDiscPLAN, PredictFromRamPLAN, GetWeights, GetDf, GetPreds, GetAcc
|
@@ -0,0 +1,1048 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import time
|
3
|
+
from colorama import Fore,Style
|
4
|
+
from typing import List, Union
|
5
|
+
import math
|
6
|
+
from scipy.special import expit, softmax
|
7
|
+
|
8
|
+
# BUILD -----
|
9
|
+
def TrainPLAN(
|
10
|
+
TrainInputs: List[Union[int, float]],
|
11
|
+
TrainLabels: List[Union[int, float, str]], # At least two.. and one hot encoded
|
12
|
+
ClassCount: int,
|
13
|
+
Layers: List[str],
|
14
|
+
Neurons: List[Union[int, float]],
|
15
|
+
MembranThresholds: List[str],
|
16
|
+
MembranPotentials: List[Union[int, float]],
|
17
|
+
Normalizations: List[str],
|
18
|
+
Activations: List[str]
|
19
|
+
) -> str:
|
20
|
+
|
21
|
+
infoPLAN = """
|
22
|
+
Creates and configures a PLAN model.
|
23
|
+
|
24
|
+
Args:
|
25
|
+
TrainInputs (list[num]): List of input data.
|
26
|
+
TrainLabels (list[num]): List of TrainLabels. (one hot encoded)
|
27
|
+
ClassCount (int): Number of classes.
|
28
|
+
Layers (list[str]): List of layer names. (options: 'fex' (Feature Extraction), 'cat' (Catalyser))
|
29
|
+
Neurons (list[num]): List of neuron counts for each layer.
|
30
|
+
MembranThresholds (list[str]): List of MembranThresholds.
|
31
|
+
MembranPotentials (list[num]): List of MembranPotentials.
|
32
|
+
Normalizations (List[str]): Whether normalization will be performed at indexed layers ("y" or "n").
|
33
|
+
Activations (list[str]): List of activation functions.
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
list([num]): (Weight matrices list, TrainPredictions list, TrainAcc).
|
37
|
+
error handled ?: Process status ('e')
|
38
|
+
"""
|
39
|
+
|
40
|
+
LastNeuron = Neurons[-1:][0]
|
41
|
+
if LastNeuron != ClassCount:
|
42
|
+
print(Fore.RED + "ERROR108: Last layer of neuron count must be equal class count. from: TrainPLAN",infoPLAN)
|
43
|
+
return 'e'
|
44
|
+
|
45
|
+
if len(Normalizations) != len(MembranPotentials):
|
46
|
+
|
47
|
+
print(Fore.RED + "ERROR307: Normalization list length must be equal to length of MembranThresholds List,MembranPotentials List,Layers List,Neurons List. from: TrainPLAN",infoPLAN)
|
48
|
+
return 'e'
|
49
|
+
|
50
|
+
if len(TrainInputs) != len(TrainLabels):
|
51
|
+
print(Fore.RED + "ERROR301: TrainInputs list and TrainLabels list must be same length.",infoPLAN)
|
52
|
+
return 'e'
|
53
|
+
|
54
|
+
for i, Value in enumerate(MembranPotentials):
|
55
|
+
|
56
|
+
if Normalizations[i] != 'y' and Normalizations[i] != 'n':
|
57
|
+
print(Fore.RED + "ERROR105: Normalization list must be 'y' or 'n'.",infoPLAN)
|
58
|
+
return 'e'
|
59
|
+
|
60
|
+
if MembranThresholds[i] == 'none':
|
61
|
+
print(Fore.MAGENTA + "WARNING102: We are advise to do not put 'none' Threshold sign. But some cases improves performance of the model from: TrainPLAN",infoPLAN + Style.RESET_ALL)
|
62
|
+
time.sleep(3)
|
63
|
+
|
64
|
+
if isinstance(Value, str):
|
65
|
+
print(Fore.RED + "ERROR201: MEMBRAN POTENTIALS must be numeric. from: TrainPLAN")
|
66
|
+
return 'e'
|
67
|
+
|
68
|
+
if isinstance(Neurons[i], str):
|
69
|
+
print(Fore.RED + "ERROR202: Neurons list must be numeric.")
|
70
|
+
return 'e'
|
71
|
+
|
72
|
+
if len(MembranThresholds) != len(MembranPotentials):
|
73
|
+
print(Fore.RED + "ERROR302: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
|
74
|
+
return 'e'
|
75
|
+
|
76
|
+
if len(Layers) != len(Neurons):
|
77
|
+
print(Fore.RED + "ERROR303: Layers list and Neurons list must same length. from: TrainPLAN",infoPLAN)
|
78
|
+
return 'e'
|
79
|
+
|
80
|
+
if len(MembranPotentials) != len(Layers) or len(MembranThresholds) != len(Layers):
|
81
|
+
print(Fore.RED + "ERROR306: MEMBRAN POTENTIALS and MEMBRAN THRESHOLDS lists length must be same Layers list length. from: TrainPLAN",infoPLAN)
|
82
|
+
return 'e'
|
83
|
+
|
84
|
+
|
85
|
+
for Activation in Activations:
|
86
|
+
if Activation != 'softmax' and Activation != 'sigmoid' and Activation != 'relu' and Activation != 'none':
|
87
|
+
print(Fore.RED + "ERROR108: Activations list must be 'sigmoid' or 'softmax' or 'relu' or 'none' from: TrainPLAN",infoPLAN)
|
88
|
+
return 'e'
|
89
|
+
|
90
|
+
|
91
|
+
for index, Neuron in enumerate(Neurons):
|
92
|
+
if Neuron < 1:
|
93
|
+
print(Fore.RED + "ERROR101: Neurons list must be positive non zero integer. from: TrainPLAN",infoPLAN)
|
94
|
+
return 'e'
|
95
|
+
|
96
|
+
if index + 1 != len(Neurons) and Neuron % 2 != 0:
|
97
|
+
print(Fore.MAGENTA + "WARNING101: We strongly advise to do Neuron counts be should even numbers. from: TrainPLAN",infoPLAN)
|
98
|
+
time.sleep(3)
|
99
|
+
|
100
|
+
if Neuron < ClassCount:
|
101
|
+
print(Fore.RED + "ERROR102: Neuron count must be greater than class count(For PLAN). from: TrainPLAN")
|
102
|
+
return 'e'
|
103
|
+
|
104
|
+
if Layers[index] != 'fex' and Layers[index] != 'cat':
|
105
|
+
print(Fore.RED + "ERROR107: Layers list must be 'fex'(Feature Extraction Layer) or 'cat' (Catalyser Layer). from: TrainPLAN",infoPLAN)
|
106
|
+
return 'e'
|
107
|
+
|
108
|
+
if len(MembranThresholds) != len(MembranPotentials):
|
109
|
+
print(Fore.RED + "ERROR305: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
|
110
|
+
return 'e'
|
111
|
+
|
112
|
+
|
113
|
+
for i, Sign in enumerate(MembranThresholds):
|
114
|
+
if Sign != '>' and Sign != '<' and Sign != '==' and Sign != '!=' and Sign != 'none':
|
115
|
+
print(Fore.RED + "ERROR104: MEMBRAN THRESHOLDS must be '>' or '<' or '==' or '!='. or 'none' WE SUGGEST '<' FOR FEX LAYER AND '==' FOR CAT LAYER (Your data, your hyperparameter) from: TrainPLAN",infoPLAN)
|
116
|
+
return 'e'
|
117
|
+
|
118
|
+
if Layers[i] == 'fex' and Sign == 'none':
|
119
|
+
print(Fore.RED + "ERROR109: at layer type 'fex', pairing with 'none' Threshold is not acceptlable. if you want to 'none' put '==' and make threshold value '0'. from: TrainPLAN ",infoPLAN)
|
120
|
+
return 'e'
|
121
|
+
|
122
|
+
UniqueTrainLabels = set()
|
123
|
+
for sublist in TrainLabels:
|
124
|
+
|
125
|
+
UniqueTrainLabels.add(tuple(sublist))
|
126
|
+
|
127
|
+
|
128
|
+
UniqueTrainLabels = list(UniqueTrainLabels)
|
129
|
+
|
130
|
+
TrainLabels = [tuple(sublist) for sublist in TrainLabels]
|
131
|
+
|
132
|
+
|
133
|
+
if len(UniqueTrainLabels) != ClassCount:
|
134
|
+
print(Fore.RED + "ERROR106: Label variety length must be same Class Count. from: TrainPLAN",infoPLAN)
|
135
|
+
return 'e'
|
136
|
+
|
137
|
+
TrainInputs[0] = np.array(TrainInputs[0])
|
138
|
+
TrainInputs[0] = TrainInputs[0].ravel()
|
139
|
+
TrainInputsize = len(TrainInputs[0])
|
140
|
+
|
141
|
+
W = WeightIdentification(len(Layers) - 1,ClassCount,Neurons,TrainInputsize)
|
142
|
+
Divides = SynapticDividing(ClassCount,W)
|
143
|
+
TrainedWs = [1] * len(W)
|
144
|
+
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL,)
|
145
|
+
TrainPredictions = [1] * len(TrainLabels)
|
146
|
+
true = 0
|
147
|
+
StartTime = time.time()
|
148
|
+
for index, inp in enumerate(TrainInputs):
|
149
|
+
UniStartTime = time.time()
|
150
|
+
inp = np.array(inp)
|
151
|
+
inp = inp.ravel()
|
152
|
+
|
153
|
+
if TrainInputsize != len(inp):
|
154
|
+
print(Fore.RED +"ERROR304: All input matrices or vectors in TrainInputs list, must be same size. from: TrainPLAN",infoPLAN + Style.RESET_ALL)
|
155
|
+
return 'e'
|
156
|
+
|
157
|
+
|
158
|
+
for Ulindex, Ul in enumerate(UniqueTrainLabels):
|
159
|
+
|
160
|
+
if Ul == TrainLabels[index]:
|
161
|
+
for Windex, w in enumerate(W):
|
162
|
+
for i, ul in enumerate(Ul):
|
163
|
+
if ul == 1.0:
|
164
|
+
k = i
|
165
|
+
Cs = Divides[int(k)][Windex][0]
|
166
|
+
|
167
|
+
W[Windex] = SynapticPruning(w, Cs, 'row', int(k),ClassCount)
|
168
|
+
|
169
|
+
NeuralLayer = inp
|
170
|
+
|
171
|
+
for Lindex, Layer in enumerate(Layers):
|
172
|
+
|
173
|
+
if Normalizations[Lindex] == 'y':
|
174
|
+
NeuralLayer = Normalization(NeuralLayer)
|
175
|
+
|
176
|
+
if Activations[Lindex] == 'relu':
|
177
|
+
NeuralLayer = Relu(NeuralLayer)
|
178
|
+
elif Activations[Lindex] == 'sigmoid':
|
179
|
+
NeuralLayer = Sigmoid(NeuralLayer)
|
180
|
+
elif Activations[Lindex] == 'softmax':
|
181
|
+
NeuralLayer = Softmax(NeuralLayer)
|
182
|
+
|
183
|
+
if Layer == 'fex':
|
184
|
+
NeuralLayer,W[Lindex] = Fex(NeuralLayer, W[Lindex], MembranThresholds[Lindex], MembranPotentials[Lindex])
|
185
|
+
elif Layer == 'cat':
|
186
|
+
NeuralLayer,W[Lindex] = Cat(NeuralLayer, W[Lindex], MembranThresholds[Lindex], MembranPotentials[Lindex],1)
|
187
|
+
|
188
|
+
RealOutput = np.argmax(TrainLabels[index])
|
189
|
+
PredictedOutput = np.argmax(NeuralLayer)
|
190
|
+
if RealOutput == PredictedOutput:
|
191
|
+
true += 1
|
192
|
+
Acc = true / len(TrainLabels)
|
193
|
+
TrainPredictions[index] = PredictedOutput
|
194
|
+
|
195
|
+
if index == 0:
|
196
|
+
for i, w in enumerate(W):
|
197
|
+
TrainedWs[i] = w
|
198
|
+
|
199
|
+
else:
|
200
|
+
for i, w in enumerate(W):
|
201
|
+
TrainedWs[i] = TrainedWs[i] + w
|
202
|
+
|
203
|
+
|
204
|
+
W = WeightIdentification(len(Layers) - 1,ClassCount,Neurons,TrainInputsize)
|
205
|
+
|
206
|
+
|
207
|
+
UniEndTime = time.time()
|
208
|
+
|
209
|
+
CalculatingEst = round((UniEndTime - UniStartTime) * (len(TrainInputs) - index),3)
|
210
|
+
|
211
|
+
if CalculatingEst < 60:
|
212
|
+
print('\rest......(sec):',CalculatingEst,'\n',end= "")
|
213
|
+
print('\rTrain Accuracy: ' ,Acc ,"\n", end="")
|
214
|
+
|
215
|
+
elif CalculatingEst > 60 and CalculatingEst < 3600:
|
216
|
+
print('\rest......(min):',CalculatingEst/60,'\n',end= "")
|
217
|
+
print('\rTrain Accuracy: ' ,Acc ,"\n", end="")
|
218
|
+
|
219
|
+
elif CalculatingEst > 3600:
|
220
|
+
print('\rest......(h):',CalculatingEst/3600,'\n',end= "")
|
221
|
+
print('\rTrain Accuracy: ' ,Acc ,"\n", end="")
|
222
|
+
|
223
|
+
EndTime = time.time()
|
224
|
+
|
225
|
+
CalculatingEst = round(EndTime - StartTime,2)
|
226
|
+
|
227
|
+
print(Fore.GREEN + " \nTrain Finished with 0 ERROR\n")
|
228
|
+
|
229
|
+
if CalculatingEst < 60:
|
230
|
+
print('Total training time(sec): ',CalculatingEst)
|
231
|
+
|
232
|
+
elif CalculatingEst > 60 and CalculatingEst < 3600:
|
233
|
+
print('Total training time(min): ',CalculatingEst/60)
|
234
|
+
|
235
|
+
elif CalculatingEst > 3600:
|
236
|
+
print('Total training time(h): ',CalculatingEst/3600)
|
237
|
+
|
238
|
+
if Acc > 0.8:
|
239
|
+
print(Fore.GREEN + '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
|
240
|
+
|
241
|
+
elif Acc < 0.8 and Acc > 0.6:
|
242
|
+
print(Fore.MAGENTA + '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
|
243
|
+
|
244
|
+
elif Acc < 0.6:
|
245
|
+
print(Fore.RED+ '\nTotal Train Accuracy: ' ,Acc, '\n',Style.RESET_ALL)
|
246
|
+
|
247
|
+
|
248
|
+
|
249
|
+
|
250
|
+
return TrainedWs,TrainPredictions,Acc
|
251
|
+
|
252
|
+
# FUNCTIONS -----
|
253
|
+
|
254
|
+
def WeightIdentification(
|
255
|
+
LayerCount, # int: Number of layers in the neural network.
|
256
|
+
ClassCount, # int: Number of classes in the classification task.
|
257
|
+
Neurons, # list[num]: List of neuron counts for each layer.
|
258
|
+
TrainInputsize # int: Size of the input data.
|
259
|
+
) -> str:
|
260
|
+
"""
|
261
|
+
Identifies the weights for a neural network model.
|
262
|
+
|
263
|
+
Args:
|
264
|
+
LayerCount (int): Number of layers in the neural network.
|
265
|
+
ClassCount (int): Number of classes in the classification task.
|
266
|
+
Neurons (list[num]): List of neuron counts for each layer.
|
267
|
+
TrainInputsize (int): Size of the input data.
|
268
|
+
|
269
|
+
Returns:
|
270
|
+
list([numpy_arrays],[...]): Weight matices of the model. .
|
271
|
+
"""
|
272
|
+
|
273
|
+
|
274
|
+
Wlen = LayerCount + 1
|
275
|
+
W = [None] * Wlen
|
276
|
+
W[0] = np.ones((Neurons[0],TrainInputsize))
|
277
|
+
ws = LayerCount - 1
|
278
|
+
for w in range(ws):
|
279
|
+
W[w + 1] = np.ones((Neurons[w + 1],Neurons[w]))
|
280
|
+
W[LayerCount] = np.ones((ClassCount,Neurons[LayerCount - 1]))
|
281
|
+
return W
|
282
|
+
|
283
|
+
def SynapticPruning(
|
284
|
+
w, # list[list[num]]: Weight matrix of the neural network.
|
285
|
+
Cs, # list[list[num]]: Synaptic connections between neurons.
|
286
|
+
Key, # int: Key for identifying synaptic connections.
|
287
|
+
Class, # int: Class label for the current training instance.
|
288
|
+
ClassCount # int: Total number of classes in the dataset.
|
289
|
+
|
290
|
+
) -> str:
|
291
|
+
infoPruning = """
|
292
|
+
Performs synaptic pruning in a neural network model.
|
293
|
+
|
294
|
+
Args:
|
295
|
+
w (list[list[num]]): Weight matrix of the neural network.
|
296
|
+
Cs (list[list[num]]): Synaptic connections between neurons.
|
297
|
+
Key (str): Key for identifying synaptic row or col connections.
|
298
|
+
Class (int): Class label for the current training instance.
|
299
|
+
ClassCount (int): Total number of classes in the dataset.
|
300
|
+
|
301
|
+
Returns:
|
302
|
+
numpy array: Weight matrix.
|
303
|
+
"""
|
304
|
+
|
305
|
+
|
306
|
+
Class += 1 # because index start 0
|
307
|
+
|
308
|
+
if Class != ClassCount and Class != 1:
|
309
|
+
|
310
|
+
Ce = Cs / Class
|
311
|
+
|
312
|
+
w[int(Ce)-1::-1,:] = 0
|
313
|
+
|
314
|
+
w[Cs:,:] = 0
|
315
|
+
|
316
|
+
|
317
|
+
else:
|
318
|
+
|
319
|
+
if Class == 1:
|
320
|
+
if Key == 'row':
|
321
|
+
|
322
|
+
w[Cs:,:] = 0
|
323
|
+
|
324
|
+
elif Key == 'col':
|
325
|
+
|
326
|
+
w[:,Cs] = 0
|
327
|
+
|
328
|
+
else:
|
329
|
+
print(Fore.RED + "ERROR103: SynapticPruning func's Key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning)
|
330
|
+
return 'e'
|
331
|
+
else:
|
332
|
+
if Key == 'row':
|
333
|
+
|
334
|
+
w[Cs:,:] = 0
|
335
|
+
|
336
|
+
Ce = int(round(w.shape[0] - Cs / ClassCount))
|
337
|
+
w[Ce-1::-1,:] = 0
|
338
|
+
|
339
|
+
elif Key == 'col':
|
340
|
+
|
341
|
+
w[:,Cs] = 0
|
342
|
+
|
343
|
+
else:
|
344
|
+
print(Fore.RED + "ERROR103: SynapticPruning func's Key parameter must be 'row' or 'col' from: SynapticPruning" + infoPruning + Style.RESET_ALL)
|
345
|
+
return 'e'
|
346
|
+
return w
|
347
|
+
|
348
|
+
def SynapticDividing(
|
349
|
+
ClassCount, # int: Total number of classes in the dataset.
|
350
|
+
W # list[list[num]]: Weight matrix of the neural network.
|
351
|
+
) -> str:
|
352
|
+
"""
|
353
|
+
Divides the synaptic weights of a neural network model based on class count.
|
354
|
+
|
355
|
+
Args:
|
356
|
+
ClassCount (int): Total number of classes in the dataset.
|
357
|
+
W (list[list[num]]): Weight matrix of the neural network.
|
358
|
+
|
359
|
+
Returns:
|
360
|
+
list: a 3D list holds informations of divided net.
|
361
|
+
"""
|
362
|
+
|
363
|
+
|
364
|
+
Piece = [1] * len(W)
|
365
|
+
#print('Piece:' + Piece)
|
366
|
+
#input()
|
367
|
+
# Boş bir üç boyutlu liste oluşturma
|
368
|
+
Divides = [[[0] for _ in range(len(W))] for _ in range(ClassCount)]
|
369
|
+
|
370
|
+
for i in range(len(W)):
|
371
|
+
|
372
|
+
|
373
|
+
Piece[i] = int(math.floor(W[i].shape[0] / ClassCount))
|
374
|
+
|
375
|
+
Cs = 0
|
376
|
+
# j = Classes, i = Weights, [0] = CutStart.
|
377
|
+
|
378
|
+
for i in range(len(W)):
|
379
|
+
for j in range(ClassCount):
|
380
|
+
Cs = Cs + Piece[i]
|
381
|
+
Divides[j][i][0] = Cs
|
382
|
+
#print('Divides: ' + j + i + ' = ' + Divides[j][i][0])
|
383
|
+
#input()
|
384
|
+
|
385
|
+
j = 0
|
386
|
+
Cs = 0
|
387
|
+
|
388
|
+
return Divides
|
389
|
+
|
390
|
+
|
391
|
+
def Fex(
|
392
|
+
Input, # list[num]: Input data.
|
393
|
+
w, # list[list[num]]: Weight matrix of the neural network.
|
394
|
+
MembranThreshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
|
395
|
+
MembranPotential # num: Threshold value for comparison.
|
396
|
+
) -> tuple:
|
397
|
+
"""
|
398
|
+
Applies feature extraction process to the input data using synaptic pruning.
|
399
|
+
|
400
|
+
Args:
|
401
|
+
Input (list[num]): Input data.
|
402
|
+
w (list[list[num]]): Weight matrix of the neural network.
|
403
|
+
MembranThreshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
|
404
|
+
MembranPotential (num): Threshold value for comparison.
|
405
|
+
|
406
|
+
Returns:
|
407
|
+
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
408
|
+
"""
|
409
|
+
|
410
|
+
if MembranThreshold == '<':
|
411
|
+
PruneIndex = np.where(Input < MembranPotential)
|
412
|
+
elif MembranThreshold == '>':
|
413
|
+
PruneIndex = np.where(Input > MembranPotential)
|
414
|
+
elif MembranThreshold == '==':
|
415
|
+
PruneIndex = np.where(Input == MembranPotential)
|
416
|
+
elif MembranThreshold == '!=':
|
417
|
+
PruneIndex = np.where(Input != MembranPotential)
|
418
|
+
|
419
|
+
w = SynapticPruning(w, PruneIndex, 'col', 0, 0)
|
420
|
+
|
421
|
+
NeuralLayer = np.dot(w, Input)
|
422
|
+
return NeuralLayer,w
|
423
|
+
|
424
|
+
def Cat(
|
425
|
+
Input, # list[num]: Input data.
|
426
|
+
w, # list[list[num]]: Weight matrix of the neural network.
|
427
|
+
MembranThreshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
|
428
|
+
MembranPotential, # num: Threshold value for comparison.
|
429
|
+
isTrain # int: Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
430
|
+
) -> tuple:
|
431
|
+
"""
|
432
|
+
Applies categorization process to the input data using synaptic pruning if specified.
|
433
|
+
|
434
|
+
Args:
|
435
|
+
Input (list[num]): Input data.
|
436
|
+
w (list[list[num]]): Weight matrix of the neural network.
|
437
|
+
MembranThreshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
|
438
|
+
MembranPotential (num): Threshold value for comparison.
|
439
|
+
isTrain (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
|
440
|
+
|
441
|
+
Returns:
|
442
|
+
tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
|
443
|
+
"""
|
444
|
+
|
445
|
+
if MembranThreshold == '<':
|
446
|
+
PruneIndex = np.where(Input < MembranPotential)
|
447
|
+
elif MembranThreshold == '>':
|
448
|
+
PruneIndex = np.where(Input > MembranPotential)
|
449
|
+
elif MembranThreshold == '==':
|
450
|
+
PruneIndex = np.where(Input == MembranPotential)
|
451
|
+
elif MembranThreshold == '!=':
|
452
|
+
PruneIndex = np.where(Input != MembranPotential)
|
453
|
+
if isTrain == 1 and MembranThreshold != 'none':
|
454
|
+
|
455
|
+
w = SynapticPruning(w, PruneIndex, 'col', 0, 0)
|
456
|
+
|
457
|
+
|
458
|
+
NeuralLayer = np.dot(w, Input)
|
459
|
+
return NeuralLayer,w
|
460
|
+
|
461
|
+
|
462
|
+
def Normalization(
|
463
|
+
Input # list[num]: Input data to be normalized.
|
464
|
+
):
|
465
|
+
"""
|
466
|
+
Normalizes the input data using maximum absolute scaling.
|
467
|
+
|
468
|
+
Args:
|
469
|
+
Input (list[num]): Input data to be normalized.
|
470
|
+
|
471
|
+
Returns:
|
472
|
+
list[num]: Scaled input data after normalization.
|
473
|
+
"""
|
474
|
+
|
475
|
+
|
476
|
+
AbsVector = np.abs(Input)
|
477
|
+
|
478
|
+
MaxAbs = np.max(AbsVector)
|
479
|
+
|
480
|
+
ScaledInput = Input / MaxAbs
|
481
|
+
|
482
|
+
return ScaledInput
|
483
|
+
|
484
|
+
|
485
|
+
def Softmax(
|
486
|
+
x # list[num]: Input data to be transformed using softmax function.
|
487
|
+
):
|
488
|
+
"""
|
489
|
+
Applies the softmax function to the input data.
|
490
|
+
|
491
|
+
Args:
|
492
|
+
x (list[num]): Input data to be transformed using softmax function.
|
493
|
+
|
494
|
+
Returns:
|
495
|
+
list[num]: Transformed data after applying softmax function.
|
496
|
+
"""
|
497
|
+
|
498
|
+
return softmax(x)
|
499
|
+
|
500
|
+
|
501
|
+
def Sigmoid(
|
502
|
+
x # list[num]: Input data to be transformed using sigmoid function.
|
503
|
+
):
|
504
|
+
"""
|
505
|
+
Applies the sigmoid function to the input data.
|
506
|
+
|
507
|
+
Args:
|
508
|
+
x (list[num]): Input data to be transformed using sigmoid function.
|
509
|
+
|
510
|
+
Returns:
|
511
|
+
list[num]: Transformed data after applying sigmoid function.
|
512
|
+
"""
|
513
|
+
return expit(x)
|
514
|
+
|
515
|
+
|
516
|
+
def Relu(
|
517
|
+
x # list[num]: Input data to be transformed using ReLU function.
|
518
|
+
):
|
519
|
+
"""
|
520
|
+
Applies the Rectified Linear Unit (ReLU) function to the input data.
|
521
|
+
|
522
|
+
Args:
|
523
|
+
x (list[num]): Input data to be transformed using ReLU function.
|
524
|
+
|
525
|
+
Returns:
|
526
|
+
list[num]: Transformed data after applying ReLU function.
|
527
|
+
"""
|
528
|
+
|
529
|
+
|
530
|
+
return np.maximum(0, x)
|
531
|
+
|
532
|
+
|
533
|
+
def TestPLAN(
|
534
|
+
TestInputs, # list[list[num]]: Test input data.
|
535
|
+
TestLabels, # list[num]: Test labels.
|
536
|
+
Layers, # list[str]: List of layer names.
|
537
|
+
MembranThresholds, # list[str]: List of MEMBRAN THRESHOLDS for each layer.
|
538
|
+
MembranPotentials, # list[num]: List of MEMBRAN POTENTIALS for each layer.
|
539
|
+
Normalizations, # str: Whether normalization will be performed ("y" or "n").
|
540
|
+
Activations, # str: Activation function list for the neural network.
|
541
|
+
W # list[list[num]]: Weight matrix of the neural network.
|
542
|
+
) -> tuple:
|
543
|
+
infoTestModel = """
|
544
|
+
Tests the neural network model with the given test data.
|
545
|
+
|
546
|
+
Args:
|
547
|
+
TestInputs (list[list[num]]): Test input data.
|
548
|
+
TestLabels (list[num]): Test labels.
|
549
|
+
Layers (list[str]): List of layer names.
|
550
|
+
MembranThresholds (list[str]): List of MEMBRAN THRESHOLDS for each layer.
|
551
|
+
MembranPotentials (list[num]): List of MEMBRAN POTENTIALS for each layer.
|
552
|
+
Normalizatios list([str]): Whether normalization will be performed ("yes" or "no").
|
553
|
+
Activation (str): Activation function for the neural network.
|
554
|
+
W (list[list[num]]): Weight matrix of the neural network.
|
555
|
+
|
556
|
+
Returns:
|
557
|
+
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
558
|
+
"""
|
559
|
+
|
560
|
+
|
561
|
+
try:
|
562
|
+
Wc = [0] * len(W)
|
563
|
+
true = 0
|
564
|
+
TestPredictions = [1] * len(TestLabels)
|
565
|
+
for i, w in enumerate(W):
|
566
|
+
Wc[i] = np.copy(w)
|
567
|
+
print('\rCopying weights.....',i+1,'/',len(W),end = "")
|
568
|
+
|
569
|
+
print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
570
|
+
StartTime = time.time()
|
571
|
+
for inpIndex,Input in enumerate(TestInputs):
|
572
|
+
Input = np.array(Input)
|
573
|
+
Input = Input.ravel()
|
574
|
+
UniStartTime = time.time()
|
575
|
+
NeuralLayer = Input
|
576
|
+
|
577
|
+
for index, Layer in enumerate(Layers):
|
578
|
+
if Normalizations[index] == 'y':
|
579
|
+
NeuralLayer = Normalization(NeuralLayer)
|
580
|
+
if Activations[index] == 'relu':
|
581
|
+
NeuralLayer = Relu(NeuralLayer)
|
582
|
+
elif Activations[index] == 'sigmoid':
|
583
|
+
NeuralLayer = Sigmoid(NeuralLayer)
|
584
|
+
elif Activations[index] == 'softmax':
|
585
|
+
NeuralLayer = Softmax(NeuralLayer)
|
586
|
+
|
587
|
+
if Layers[index] == 'fex':
|
588
|
+
NeuralLayer,useless = Fex(NeuralLayer, W[index], MembranThresholds[index], MembranPotentials[index])
|
589
|
+
if Layers[index] == 'cat':
|
590
|
+
NeuralLayer,useless = Cat(NeuralLayer, W[index], MembranThresholds[index], MembranPotentials[index],0)
|
591
|
+
for i, w in enumerate(Wc):
|
592
|
+
W[i] = np.copy(w)
|
593
|
+
RealOutput = np.argmax(TestLabels[inpIndex])
|
594
|
+
PredictedOutput = np.argmax(NeuralLayer)
|
595
|
+
if RealOutput == PredictedOutput:
|
596
|
+
true += 1
|
597
|
+
Acc = true / len(TestLabels)
|
598
|
+
TestPredictions[inpIndex] = PredictedOutput
|
599
|
+
UniEndTime = time.time()
|
600
|
+
|
601
|
+
CalculatingEst = round((UniEndTime - UniStartTime) * (len(TestInputs) - inpIndex),3)
|
602
|
+
|
603
|
+
if CalculatingEst < 60:
|
604
|
+
print('\rest......(sec):',CalculatingEst,'\n',end= "")
|
605
|
+
print('\rTest Accuracy: ' ,Acc ,"\n", end="")
|
606
|
+
|
607
|
+
elif CalculatingEst > 60 and CalculatingEst < 3600:
|
608
|
+
print('\rest......(min):',CalculatingEst/60,'\n',end= "")
|
609
|
+
print('\rTest Accuracy: ' ,Acc ,"\n", end="")
|
610
|
+
|
611
|
+
elif CalculatingEst > 3600:
|
612
|
+
print('\rest......(h):',CalculatingEst/3600,'\n',end= "")
|
613
|
+
print('\rTest Accuracy: ' ,Acc ,"\n", end="")
|
614
|
+
|
615
|
+
EndTime = time.time()
|
616
|
+
for i, w in enumerate(Wc):
|
617
|
+
W[i] = np.copy(w)
|
618
|
+
|
619
|
+
CalculatingEst = round(EndTime - StartTime,2)
|
620
|
+
|
621
|
+
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
622
|
+
|
623
|
+
if CalculatingEst < 60:
|
624
|
+
print('Total testing time(sec): ',CalculatingEst)
|
625
|
+
|
626
|
+
elif CalculatingEst > 60 and CalculatingEst < 3600:
|
627
|
+
print('Total testing time(min): ',CalculatingEst/60)
|
628
|
+
|
629
|
+
elif CalculatingEst > 3600:
|
630
|
+
print('Total testing time(h): ',CalculatingEst/3600)
|
631
|
+
|
632
|
+
if Acc >= 0.8:
|
633
|
+
print(Fore.GREEN + '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
634
|
+
|
635
|
+
elif Acc < 0.8 and Acc > 0.6:
|
636
|
+
print(Fore.MAGENTA + '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
637
|
+
|
638
|
+
elif Acc <= 0.6:
|
639
|
+
print(Fore.RED+ '\nTotal Test Accuracy: ' ,Acc, '\n' + Style.RESET_ALL)
|
640
|
+
|
641
|
+
except:
|
642
|
+
|
643
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'Layers' 'MembranCounts' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: TestPLAN" + infoTestModel + Style.RESET_ALL)
|
644
|
+
return 'e'
|
645
|
+
|
646
|
+
return W,TestPredictions,Acc
|
647
|
+
|
648
|
+
def SavePLAN(ModelName,
|
649
|
+
ModelType,
|
650
|
+
Layers,
|
651
|
+
ClassCount,
|
652
|
+
MembranThresholds,
|
653
|
+
MembranPotentials,
|
654
|
+
Normalizations,
|
655
|
+
Activations,
|
656
|
+
TestAcc,
|
657
|
+
LogType,
|
658
|
+
WeightsType,
|
659
|
+
WeightsFormat,
|
660
|
+
ModelPath,
|
661
|
+
W
|
662
|
+
):
|
663
|
+
|
664
|
+
infoSavePLAN = """
|
665
|
+
Function to save a deep learning model.
|
666
|
+
|
667
|
+
Arguments:
|
668
|
+
ModelName (str): Name of the model.
|
669
|
+
ModelType (str): Type of the model.(options: PLAN)
|
670
|
+
Layers (list): List containing 'fex' and 'cat' layers.
|
671
|
+
ClassCount (int): Number of classes.
|
672
|
+
MembranThresholds (list): List containing MEMBRAN THRESHOLDS.
|
673
|
+
MembranPotentials (list): List containing MEMBRAN POTENTIALS.
|
674
|
+
DoNormalization (str): is that normalized data ? 'y' or 'n'.
|
675
|
+
Activations (list): List containing activation functions for each layer.
|
676
|
+
TestAcc (float): Test accuracy of the model.
|
677
|
+
LogType (str): Type of log to save (options: 'csv', 'txt', 'hdf5').
|
678
|
+
WeightsType (str): Type of weights to save (options: 'txt', 'npy', 'mat').
|
679
|
+
WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
|
680
|
+
ModelPath (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
681
|
+
W: Weights of the model.
|
682
|
+
|
683
|
+
Returns:
|
684
|
+
str: Message indicating if the model was saved successfully or encountered an error.
|
685
|
+
"""
|
686
|
+
|
687
|
+
# Operations to be performed by the function will be written here
|
688
|
+
pass
|
689
|
+
|
690
|
+
if LogType != 'csv' and LogType != 'txt' and LogType != 'hdf5':
|
691
|
+
print(Fore.RED + "ERROR109: Save Log Type (File Extension) must be 'csv' or 'txt' or 'hdf5' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
692
|
+
return 'e'
|
693
|
+
|
694
|
+
if WeightsType != 'txt' and WeightsType != 'npy' and WeightsType != 'mat':
|
695
|
+
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
696
|
+
return 'e'
|
697
|
+
|
698
|
+
if WeightsFormat != 'd' and WeightsFormat != 'f' and WeightsFormat != 'raw':
|
699
|
+
print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
700
|
+
return 'e'
|
701
|
+
|
702
|
+
NeuronCount = 0
|
703
|
+
SynapseCount = 0
|
704
|
+
try:
|
705
|
+
for w in W:
|
706
|
+
NeuronCount += np.shape(w)[0]
|
707
|
+
SynapseCount += np.shape(w)[0] * np.shape(w)[1]
|
708
|
+
except:
|
709
|
+
|
710
|
+
print(Fore.RED + "ERROR: Weight matrices has a problem from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
711
|
+
return 'e'
|
712
|
+
import pandas as pd
|
713
|
+
from datetime import datetime
|
714
|
+
from scipy import io
|
715
|
+
|
716
|
+
data = {'MODEL NAME': ModelName,
|
717
|
+
'MODEL TYPE': ModelType,
|
718
|
+
'LAYERS': Layers,
|
719
|
+
'LAYER COUNT': len(Layers),
|
720
|
+
'CLASS COUNT': ClassCount,
|
721
|
+
'MEMBRAN THRESHOLDS': MembranThresholds,
|
722
|
+
'MEMBRAN POTENTIALS': MembranPotentials,
|
723
|
+
'NORMALIZATION': Normalizations,
|
724
|
+
'ACTIVATIONS': Activations,
|
725
|
+
'NEURON COUNT': NeuronCount,
|
726
|
+
'SYNAPSE COUNT': SynapseCount,
|
727
|
+
'TEST ACCURACY': TestAcc,
|
728
|
+
'SAVE DATE': datetime.now(),
|
729
|
+
'WEIGHTS TYPE': WeightsType,
|
730
|
+
'WEIGHTS FORMAT': WeightsFormat,
|
731
|
+
'MODEL PATH': ModelPath
|
732
|
+
}
|
733
|
+
try:
|
734
|
+
|
735
|
+
df = pd.DataFrame(data)
|
736
|
+
|
737
|
+
if LogType == 'csv':
|
738
|
+
|
739
|
+
df.to_csv(ModelPath + ModelName + '.csv', sep='\t', index=False)
|
740
|
+
|
741
|
+
elif LogType == 'txt':
|
742
|
+
|
743
|
+
df.to_csv(ModelPath + ModelName + '.txt', sep='\t', index=False)
|
744
|
+
|
745
|
+
elif LogType == 'hdf5':
|
746
|
+
|
747
|
+
df.to_hdf(ModelPath + ModelName + '.h5', key='data', mode='w')
|
748
|
+
|
749
|
+
except:
|
750
|
+
|
751
|
+
print(Fore.RED + "ERROR: Model log not saved. Check the log parameters from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
752
|
+
return 'e'
|
753
|
+
try:
|
754
|
+
|
755
|
+
if WeightsType == 'txt' and WeightsFormat == 'd':
|
756
|
+
|
757
|
+
for i, w in enumerate(W):
|
758
|
+
np.savetxt(ModelPath + ModelName + str(i+1) + 'w.txt' , w, fmt='%d')
|
759
|
+
|
760
|
+
if WeightsType == 'txt' and WeightsFormat == 'f':
|
761
|
+
|
762
|
+
for i, w in enumerate(W):
|
763
|
+
np.savetxt(ModelPath + ModelName + str(i+1) + 'w.txt' , w, fmt='%f')
|
764
|
+
|
765
|
+
if WeightsType == 'txt' and WeightsFormat == 'raw':
|
766
|
+
|
767
|
+
for i, w in enumerate(W):
|
768
|
+
np.savetxt(ModelPath + ModelName + str(i+1) + 'w.txt' , w)
|
769
|
+
|
770
|
+
|
771
|
+
###
|
772
|
+
|
773
|
+
|
774
|
+
if WeightsType == 'npy' and WeightsFormat == 'd':
|
775
|
+
|
776
|
+
for i, w in enumerate(W):
|
777
|
+
np.save(ModelPath + ModelName + str(i+1) + 'w.npy', w.astype(int))
|
778
|
+
|
779
|
+
if WeightsType == 'npy' and WeightsFormat == 'f':
|
780
|
+
|
781
|
+
for i, w in enumerate(W):
|
782
|
+
np.save(ModelPath + ModelName + str(i+1) + 'w.npy' , w, w.astype(float))
|
783
|
+
|
784
|
+
if WeightsType == 'npy' and WeightsFormat == 'raw':
|
785
|
+
|
786
|
+
for i, w in enumerate(W):
|
787
|
+
np.save(ModelPath + ModelName + str(i+1) + 'w.npy' , w)
|
788
|
+
|
789
|
+
|
790
|
+
###
|
791
|
+
|
792
|
+
|
793
|
+
if WeightsType == 'mat' and WeightsFormat == 'd':
|
794
|
+
|
795
|
+
for i, w in enumerate(W):
|
796
|
+
w = {'w': w.astype(int)}
|
797
|
+
io.savemat(ModelPath + ModelName + str(i+1) + 'w.mat', w)
|
798
|
+
|
799
|
+
if WeightsType == 'mat' and WeightsFormat == 'f':
|
800
|
+
|
801
|
+
for i, w in enumerate(W):
|
802
|
+
w = {'w': w.astype(float)}
|
803
|
+
io.savemat(ModelPath + ModelName + str(i+1) + 'w.mat', w)
|
804
|
+
|
805
|
+
if WeightsType == 'mat' and WeightsFormat == 'raw':
|
806
|
+
|
807
|
+
for i, w in enumerate(W):
|
808
|
+
w = {'w': w}
|
809
|
+
io.savemat(ModelPath + ModelName + str(i+1) + 'w.mat', w)
|
810
|
+
|
811
|
+
except:
|
812
|
+
|
813
|
+
print(Fore.RED + "ERROR: Model Weights not saved. Check the Weight parameters. SaveFilePath expl: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
|
814
|
+
return 'e'
|
815
|
+
print(df)
|
816
|
+
message = (
|
817
|
+
Fore.GREEN + "Model Saved Successfully\n" +
|
818
|
+
Fore.MAGENTA + "Don't forget, if you want to load model: model log file and weight files must be in the same directory." +
|
819
|
+
Style.RESET_ALL
|
820
|
+
)
|
821
|
+
|
822
|
+
return print(message)
|
823
|
+
|
824
|
+
|
825
|
+
def LoadPLAN(ModelName,
|
826
|
+
ModelPath,
|
827
|
+
LogType,
|
828
|
+
):
|
829
|
+
infoLoadPLAN = """
|
830
|
+
Function to load a deep learning model.
|
831
|
+
|
832
|
+
Arguments:
|
833
|
+
ModelName (str): Name of the model.
|
834
|
+
ModelPath (str): Path where the model is saved.
|
835
|
+
LogType (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
836
|
+
|
837
|
+
Returns:
|
838
|
+
lists: W(list[num]), Layers, MembranThresholds, MembranPotentials, Normalization,Activations
|
839
|
+
"""
|
840
|
+
pass
|
841
|
+
|
842
|
+
|
843
|
+
import pandas as pd
|
844
|
+
import scipy.io as sio
|
845
|
+
|
846
|
+
try:
|
847
|
+
|
848
|
+
if LogType == 'csv':
|
849
|
+
df = pd.read_csv(ModelPath + ModelName + '.' + LogType)
|
850
|
+
|
851
|
+
|
852
|
+
if LogType == 'txt':
|
853
|
+
df = pd.read_csv(ModelPath + ModelName + '.' + LogType, delimiter='\t')
|
854
|
+
|
855
|
+
|
856
|
+
if LogType == 'hdf5':
|
857
|
+
df = pd.read_hdf(ModelPath + ModelName + '.' + LogType)
|
858
|
+
except:
|
859
|
+
print(Fore.RED + "ERROR: Model Path error. Accaptable form: 'C:/Users/hasancanbeydili/Desktop/denemePLAN/' from: LoadPLAN" + infoLoadPLAN + Style.RESET_ALL)
|
860
|
+
|
861
|
+
ModelName = str(df['MODEL NAME'].iloc[0])
|
862
|
+
Layers = df['LAYERS'].tolist()
|
863
|
+
LayerCount = int(df['LAYER COUNT'].iloc[0])
|
864
|
+
ClassCount = int(df['CLASS COUNT'].iloc[0])
|
865
|
+
MembranThresholds = df['MEMBRAN THRESHOLDS'].tolist()
|
866
|
+
MembranPotentials = df['MEMBRAN POTENTIALS'].tolist()
|
867
|
+
Normalizations = df['NORMALIZATION'].tolist()
|
868
|
+
Activations = df['ACTIVATIONS'].tolist()
|
869
|
+
NeuronCount = int(df['NEURON COUNT'].iloc[0])
|
870
|
+
SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
|
871
|
+
TestAcc = int(df['TEST ACCURACY'].iloc[0])
|
872
|
+
ModelType = str(df['MODEL TYPE'].iloc[0])
|
873
|
+
WeightType = str(df['WEIGHTS TYPE'].iloc[0])
|
874
|
+
WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
|
875
|
+
ModelPath = str(df['MODEL PATH'].iloc[0])
|
876
|
+
|
877
|
+
W = [0] * LayerCount
|
878
|
+
|
879
|
+
if WeightType == 'txt':
|
880
|
+
for i in range(LayerCount):
|
881
|
+
W[i] = np.loadtxt(ModelPath + ModelName + str(i+1) + 'w.txt')
|
882
|
+
elif WeightType == 'npy':
|
883
|
+
for i in range(LayerCount):
|
884
|
+
W[i] = np.load(ModelPath + ModelName + str(i+1) + 'w.npy')
|
885
|
+
elif WeightType == 'mat':
|
886
|
+
for i in range(LayerCount):
|
887
|
+
W[i] = sio.loadmat(ModelPath + ModelName + str(i+1) + 'w.mat')
|
888
|
+
else:
|
889
|
+
raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: LoadPLAN." + infoLoadPLAN + Style.RESET_ALL)
|
890
|
+
print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
|
891
|
+
return W,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations,df
|
892
|
+
|
893
|
+
def PredictFromDiscPLAN(Input,ModelName,ModelPath,LogType):
|
894
|
+
infoPredictFromDİscPLAN = """
|
895
|
+
Function to make a prediction using a divided pruning deep learning neural network (PLAN).
|
896
|
+
|
897
|
+
Arguments:
|
898
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
899
|
+
ModelName (str): Name of the model.
|
900
|
+
ModelPath (str): Path where the model is saved.
|
901
|
+
LogType (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
|
902
|
+
|
903
|
+
Returns:
|
904
|
+
ndarray: Output from the model.
|
905
|
+
"""
|
906
|
+
W,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations = LoadPLAN(ModelName,ModelPath,
|
907
|
+
LogType)[0:6]
|
908
|
+
Wc = [0] * len(W)
|
909
|
+
for i, w in enumerate(W):
|
910
|
+
Wc[i] = np.copy(w)
|
911
|
+
try:
|
912
|
+
NeuralLayer = Input
|
913
|
+
NeuralLayer = np.array(NeuralLayer)
|
914
|
+
NeuralLayer = NeuralLayer.ravel()
|
915
|
+
for index, Layer in enumerate(Layers):
|
916
|
+
if Normalization == 'y':
|
917
|
+
NeuralLayer = Normalization(NeuralLayer)
|
918
|
+
if Activations[index] == 'relu':
|
919
|
+
NeuralLayer = Relu(NeuralLayer)
|
920
|
+
elif Activations[index] == 'sigmoid':
|
921
|
+
NeuralLayer = Sigmoid(NeuralLayer)
|
922
|
+
elif Activations[index] == 'softmax':
|
923
|
+
NeuralLayer = Softmax(NeuralLayer)
|
924
|
+
|
925
|
+
if Layers[index] == 'fex':
|
926
|
+
NeuralLayer,useless = Fex(NeuralLayer, W[index],
|
927
|
+
MembranThresholds[index],
|
928
|
+
MembranPotentials[index])
|
929
|
+
if Layers[index] == 'cat':
|
930
|
+
NeuralLayer,useless = Cat(NeuralLayer, W[index],
|
931
|
+
MembranThresholds[index],
|
932
|
+
MembranPotentials[index],
|
933
|
+
0)
|
934
|
+
except:
|
935
|
+
print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: PredictFromDiscPLAN" + infoPredictFromDİscPLAN + Style.RESET_ALL)
|
936
|
+
return 'e'
|
937
|
+
for i, w in enumerate(Wc):
|
938
|
+
W[i] = np.copy(w)
|
939
|
+
return NeuralLayer
|
940
|
+
|
941
|
+
|
942
|
+
def PredictFromRamPLAN(Input,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations,W):
|
943
|
+
infoPredictFromRamPLAN = """
|
944
|
+
Function to make a prediction using a pruning learning artificial neural network (PLAN)
|
945
|
+
from weights and parameters stored in memory.
|
946
|
+
|
947
|
+
Arguments:
|
948
|
+
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
949
|
+
Layers (list): Number and types of layers.
|
950
|
+
MembranThresholds (list): MEMBRAN THRESHOLDS.
|
951
|
+
MembranPotentials (list): MEMBRAN POTENTIALS.
|
952
|
+
DoNormalization (str): Whether to normalize ('y' or 'n').
|
953
|
+
Activations (list): Activation functions for each layer.
|
954
|
+
W (list of ndarrays): Weights of the model.
|
955
|
+
|
956
|
+
Returns:
|
957
|
+
ndarray: Output from the model.
|
958
|
+
"""
|
959
|
+
|
960
|
+
Wc = [0] * len(W)
|
961
|
+
for i, w in enumerate(W):
|
962
|
+
Wc[i] = np.copy(w)
|
963
|
+
try:
|
964
|
+
NeuralLayer = Input
|
965
|
+
NeuralLayer = np.array(NeuralLayer)
|
966
|
+
NeuralLayer = NeuralLayer.ravel()
|
967
|
+
for index, Layer in enumerate(Layers):
|
968
|
+
if Normalizations[index] == 'y':
|
969
|
+
NeuralLayer = Normalization(NeuralLayer)
|
970
|
+
if Activations[index] == 'relu':
|
971
|
+
NeuralLayer = Relu(NeuralLayer)
|
972
|
+
elif Activations[index] == 'sigmoid':
|
973
|
+
NeuralLayer = Sigmoid(NeuralLayer)
|
974
|
+
elif Activations[index] == 'softmax':
|
975
|
+
NeuralLayer = Softmax(NeuralLayer)
|
976
|
+
|
977
|
+
if Layers[index] == 'fex':
|
978
|
+
NeuralLayer,useless = Fex(NeuralLayer, W[index],
|
979
|
+
MembranThresholds[index],
|
980
|
+
MembranPotentials[index])
|
981
|
+
if Layers[index] == 'cat':
|
982
|
+
NeuralLayer,useless = Cat(NeuralLayer, W[index],
|
983
|
+
MembranThresholds[index],
|
984
|
+
MembranPotentials[index],0)
|
985
|
+
except:
|
986
|
+
print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: PredictFromRamPLAN." + infoPredictFromRamPLAN + Style.RESET_ALL)
|
987
|
+
return 'e'
|
988
|
+
for i, w in enumerate(Wc):
|
989
|
+
W[i] = np.copy(w)
|
990
|
+
return NeuralLayer
|
991
|
+
|
992
|
+
|
993
|
+
def AutoBalancer(TrainInputs, TrainLabels, ClassCount):
|
994
|
+
infoAutoBalancer = """
|
995
|
+
Function to balance the training data across different classes.
|
996
|
+
|
997
|
+
Arguments:
|
998
|
+
TrainInputs (list): Input data for training.
|
999
|
+
TrainLabels (list): Labels corresponding to the input data.
|
1000
|
+
ClassCount (int): Number of classes.
|
1001
|
+
|
1002
|
+
Returns:
|
1003
|
+
tuple: A tuple containing balanced input data and labels.
|
1004
|
+
"""
|
1005
|
+
try:
|
1006
|
+
ClassIndices = {i: np.where(np.array(TrainLabels)[:, i] == 1)[0] for i in range(ClassCount)}
|
1007
|
+
ClassCounts = [len(ClassIndices[i]) for i in range(ClassCount)]
|
1008
|
+
|
1009
|
+
if len(set(ClassCounts)) == 1:
|
1010
|
+
print(Fore.WHITE + "INFO: All training data have already balanced. from: AutoBalancer" + Style.RESET_ALL)
|
1011
|
+
return TrainInputs, TrainLabels
|
1012
|
+
|
1013
|
+
MinCount = min(ClassCounts)
|
1014
|
+
|
1015
|
+
BalancedIndices = []
|
1016
|
+
for i in range(ClassCount):
|
1017
|
+
if len(ClassIndices[i]) > MinCount:
|
1018
|
+
SelectedIndices = np.random.choice(ClassIndices[i], MinCount, replace=False)
|
1019
|
+
else:
|
1020
|
+
SelectedIndices = ClassIndices[i]
|
1021
|
+
BalancedIndices.extend(SelectedIndices)
|
1022
|
+
|
1023
|
+
BalancedInputs = [TrainInputs[idx] for idx in BalancedIndices]
|
1024
|
+
BalancedLabels = [TrainLabels[idx] for idx in BalancedIndices]
|
1025
|
+
|
1026
|
+
print(Fore.GREEN + "All Training Data Succesfully Balanced from: " + str(len(TrainInputs)) + " to: " + str(len(BalancedInputs)) + ". from: AutoBalancer " + Style.RESET_ALL)
|
1027
|
+
except:
|
1028
|
+
print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoAutoBalancer)
|
1029
|
+
return 'e'
|
1030
|
+
|
1031
|
+
return BalancedInputs, BalancedLabels
|
1032
|
+
|
1033
|
+
|
1034
|
+
def GetWeights():
|
1035
|
+
|
1036
|
+
return 0
|
1037
|
+
|
1038
|
+
def GetDf():
|
1039
|
+
|
1040
|
+
return 6
|
1041
|
+
|
1042
|
+
def GetPreds():
|
1043
|
+
|
1044
|
+
return 1
|
1045
|
+
|
1046
|
+
def GetAcc():
|
1047
|
+
|
1048
|
+
return 2
|
@@ -0,0 +1,7 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: pyerualjetwork
|
3
|
+
Version: 1.2.7
|
4
|
+
Summary: Advanced python deep learning library. 'ThresholdSigns' changed to 'MembranThresholds' and 'ThresholdValues' changed to 'MembranPotentials'. (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
|
5
|
+
Author: Hasan Can Beydili
|
6
|
+
Author-email: tchasancan@gmail.com
|
7
|
+
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
@@ -0,0 +1 @@
|
|
1
|
+
|
@@ -0,0 +1 @@
|
|
1
|
+
plan
|
@@ -0,0 +1,15 @@
|
|
1
|
+
from setuptools import setup, find_packages
|
2
|
+
|
3
|
+
# Setting Up
|
4
|
+
|
5
|
+
setup(
|
6
|
+
|
7
|
+
name = "pyerualjetwork",
|
8
|
+
version = "1.2.7",
|
9
|
+
author = "Hasan Can Beydili",
|
10
|
+
author_email = "tchasancan@gmail.com",
|
11
|
+
description= "Advanced python deep learning library. 'ThresholdSigns' changed to 'MembranThresholds' and 'ThresholdValues' changed to 'MembranPotentials'. (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)",
|
12
|
+
packages = find_packages(),
|
13
|
+
keywords = ["model evaluation", "classifcation", 'pruning learning artficial neural networks']
|
14
|
+
|
15
|
+
)
|