pyerualjetwork 2.5.8__py3-none-any.whl → 2.6.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plan/plan.py +113 -197
- pyerualjetwork-2.6.6.dist-info/METADATA +8 -0
- pyerualjetwork-2.6.6.dist-info/RECORD +6 -0
- pyerualjetwork-2.5.8.dist-info/METADATA +0 -8
- pyerualjetwork-2.5.8.dist-info/RECORD +0 -6
- {pyerualjetwork-2.5.8.dist-info → pyerualjetwork-2.6.6.dist-info}/WHEEL +0 -0
- {pyerualjetwork-2.5.8.dist-info → pyerualjetwork-2.6.6.dist-info}/top_level.txt +0 -0
plan/plan.py
CHANGED
@@ -6,25 +6,6 @@ Created on Tue Jun 18 23:32:16 2024
|
|
6
6
|
@author: hasan
|
7
7
|
"""
|
8
8
|
|
9
|
-
# optic
|
10
|
-
|
11
|
-
# -*- coding: utf-8 -*-
|
12
|
-
"""
|
13
|
-
Created on Fri Jun 21 05:21:35 2024
|
14
|
-
|
15
|
-
@author: hasan
|
16
|
-
"""
|
17
|
-
|
18
|
-
# -*- coding: utf-8 -*-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
"""
|
23
|
-
Created on Thu Jun 12 00:00:00 2024
|
24
|
-
|
25
|
-
@author: hasan can beydili
|
26
|
-
"""
|
27
|
-
|
28
9
|
import pandas as pd
|
29
10
|
import numpy as np
|
30
11
|
import time
|
@@ -34,6 +15,7 @@ import math
|
|
34
15
|
from scipy.special import expit, softmax
|
35
16
|
import matplotlib.pyplot as plt
|
36
17
|
import seaborn as sns
|
18
|
+
from tqdm import tqdm
|
37
19
|
|
38
20
|
# BUILD -----
|
39
21
|
|
@@ -43,11 +25,10 @@ def fit(
|
|
43
25
|
y_train: List[Union[int, float]], # At least two.. and one hot encoded
|
44
26
|
show_training,
|
45
27
|
show_count= None,
|
46
|
-
val_count= None,
|
47
28
|
val= None,
|
48
29
|
x_val= None,
|
49
30
|
y_val= None,
|
50
|
-
|
31
|
+
activation_potentiation=None # (float): Input activation_potentiation (optional)
|
51
32
|
) -> str:
|
52
33
|
|
53
34
|
infoPLAN = """
|
@@ -58,10 +39,11 @@ def fit(
|
|
58
39
|
y_train (list[num]): List of target labels. (one hot encoded)
|
59
40
|
show_training (bool, str): True, None or'final'
|
60
41
|
show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
|
42
|
+
val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None
|
61
43
|
val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
|
62
44
|
x_val (list[num]): List of validation data. (optional) Default: x_train
|
63
45
|
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
|
64
|
-
|
46
|
+
activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
|
65
47
|
Returns:
|
66
48
|
list([num]): (Weight matrices list, train_predictions list, Train_acc).
|
67
49
|
error handled ?: Process status ('e')
|
@@ -78,18 +60,12 @@ def fit(
|
|
78
60
|
x_val = x_train
|
79
61
|
y_val = y_train
|
80
62
|
|
81
|
-
if val == True
|
82
|
-
|
83
|
-
|
84
|
-
val_count_copy = val_count
|
85
|
-
|
86
|
-
if val == True:
|
63
|
+
if val == True or val == 'final':
|
64
|
+
|
65
|
+
val_bar = tqdm(total=1, desc="Training / Validating Accuracy", ncols=120)
|
87
66
|
|
88
|
-
val_count = int(len(x_train) * val_count)
|
89
|
-
|
90
|
-
val_count_copy = val_count
|
91
67
|
|
92
|
-
if
|
68
|
+
if show_count == None:
|
93
69
|
|
94
70
|
show_count = 10
|
95
71
|
|
@@ -109,25 +85,23 @@ def fit(
|
|
109
85
|
|
110
86
|
neurons = [len(class_count), len(class_count)]
|
111
87
|
layers = ['fex']
|
112
|
-
val_list = [
|
88
|
+
val_list = []
|
113
89
|
|
114
90
|
x_train[0] = np.array(x_train[0])
|
115
91
|
x_train[0] = x_train[0].ravel()
|
116
92
|
x_train_size = len(x_train[0])
|
117
93
|
|
118
|
-
|
119
|
-
len(layers) - 1, len(class_count), neurons, x_train_size)
|
94
|
+
STPW = weight_identification(
|
95
|
+
len(layers) - 1, len(class_count), neurons, x_train_size) # STPW = SHORT TIME POTENTIATION WEIGHT
|
96
|
+
|
97
|
+
LTPW = [1] * len(STPW) # LTPW = LONG TIME POTENTIATION WEIGHT
|
120
98
|
|
121
|
-
trained_W = [1] * len(W)
|
122
|
-
print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
|
123
|
-
start_time = time.time()
|
124
99
|
y = decode_one_hot(y_train)
|
125
100
|
|
126
101
|
for index, inp in enumerate(x_train):
|
127
|
-
|
102
|
+
|
128
103
|
progress = index / len(x_train) * 100
|
129
104
|
|
130
|
-
uni_start_time = time.time()
|
131
105
|
inp = np.array(inp)
|
132
106
|
inp = inp.ravel()
|
133
107
|
|
@@ -143,41 +117,37 @@ def fit(
|
|
143
117
|
neural_layer = normalization(neural_layer)
|
144
118
|
|
145
119
|
if Layer == 'fex':
|
146
|
-
|
120
|
+
STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation)
|
147
121
|
|
148
|
-
for i, w in enumerate(
|
149
|
-
|
122
|
+
for i, w in enumerate(STPW):
|
123
|
+
LTPW[i] = LTPW[i] + w
|
150
124
|
|
151
125
|
|
152
126
|
if val == True:
|
127
|
+
|
128
|
+
if show_training == True:
|
129
|
+
try:
|
130
|
+
plt.close(fig)
|
153
131
|
|
154
|
-
|
155
|
-
|
156
|
-
val_count += val_count_copy
|
157
|
-
|
158
|
-
layers.append('cat')
|
159
|
-
trained_W.append(np.eye(len(class_count)))
|
160
|
-
|
161
|
-
validation_model = evaluate(x_val, y_val, None, trained_W)
|
162
|
-
|
163
|
-
layers.pop()
|
164
|
-
trained_W.pop()
|
165
|
-
|
166
|
-
val_acc = validation_model[get_acc()]
|
132
|
+
except:
|
133
|
+
pass
|
167
134
|
|
168
|
-
|
135
|
+
validation_model = evaluate(x_val, y_val, None, LTPW, activation_potentiation, None)
|
136
|
+
|
137
|
+
val_acc = validation_model[get_acc()]
|
169
138
|
|
139
|
+
val_list.append(val_acc)
|
140
|
+
|
141
|
+
if index == 0:
|
170
142
|
|
171
|
-
|
172
|
-
|
143
|
+
val_bar.update(val_acc)
|
144
|
+
|
145
|
+
if index != 0:
|
146
|
+
|
147
|
+
val_acc = val_acc - val_list[index - 1]
|
173
148
|
|
174
|
-
|
175
|
-
|
176
|
-
plt.xlabel('Learning Progress')
|
177
|
-
plt.ylabel('Accuracy')
|
178
|
-
plt.ylim(0, 1)
|
179
|
-
plt.draw()
|
180
|
-
plt.pause(0.1)
|
149
|
+
val_bar.update(val_acc)
|
150
|
+
|
181
151
|
|
182
152
|
if show_training == True:
|
183
153
|
|
@@ -200,11 +170,11 @@ def fit(
|
|
200
170
|
|
201
171
|
if row != 0:
|
202
172
|
|
203
|
-
mat =
|
173
|
+
mat = LTPW[0][j,:].reshape(row, col)
|
204
174
|
suptitle_info = 'Neurons Learning Progress: % '
|
205
175
|
title_info = f'{j+1}. Neuron'
|
206
176
|
|
207
|
-
mat =
|
177
|
+
mat = LTPW[0][j,:].reshape(row, col)
|
208
178
|
|
209
179
|
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
210
180
|
|
@@ -216,38 +186,23 @@ def fit(
|
|
216
186
|
|
217
187
|
else:
|
218
188
|
|
219
|
-
mat =
|
189
|
+
mat = LTPW[0]
|
220
190
|
ax.imshow(mat, interpolation='sinc', cmap='viridis')
|
221
191
|
suptitle_info = 'Weight Learning Progress: % '
|
222
192
|
title_info = 'Weight Matrix Of Fex Layer'
|
223
193
|
|
224
|
-
|
225
|
-
|
194
|
+
|
195
|
+
|
226
196
|
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
197
|
+
progress_status = f"{progress:.1f}"
|
198
|
+
fig.suptitle(suptitle_info + progress_status)
|
199
|
+
plt.draw()
|
200
|
+
plt.pause(0.1)
|
201
|
+
|
232
202
|
|
233
203
|
W = weight_identification(
|
234
204
|
len(layers) - 1, len(class_count), neurons, x_train_size)
|
235
205
|
|
236
|
-
uni_end_time = time.time()
|
237
|
-
|
238
|
-
calculating_est = round(
|
239
|
-
(uni_end_time - uni_start_time) * (len(x_train) - index), 3)
|
240
|
-
|
241
|
-
if calculating_est < 60:
|
242
|
-
print('\rest......(sec):', calculating_est, '\n', end="")
|
243
|
-
|
244
|
-
elif calculating_est > 60 and calculating_est < 3600:
|
245
|
-
print('\rest......(min):', calculating_est/60, '\n', end="")
|
246
|
-
|
247
|
-
elif calculating_est > 3600:
|
248
|
-
print('\rest......(h):', calculating_est/3600, '\n', end="")
|
249
|
-
|
250
|
-
print('\rTraining: ', index, "/", len(x_train), "\n", end="")
|
251
206
|
|
252
207
|
if show_training == 'final':
|
253
208
|
|
@@ -255,7 +210,7 @@ def fit(
|
|
255
210
|
|
256
211
|
for j in range(len(class_count)):
|
257
212
|
|
258
|
-
mat =
|
213
|
+
mat = LTPW[0][j,:].reshape(row, col)
|
259
214
|
|
260
215
|
ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
261
216
|
ax[j].set_aspect('equal')
|
@@ -270,26 +225,18 @@ def fit(
|
|
270
225
|
plt.pause(0.1)
|
271
226
|
|
272
227
|
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
if calculating_est < 60:
|
280
|
-
print('Total training time(sec): ', calculating_est)
|
281
|
-
|
282
|
-
elif calculating_est > 60 and calculating_est < 3600:
|
283
|
-
print('Total training time(min): ', calculating_est/60)
|
284
|
-
|
285
|
-
elif calculating_est > 3600:
|
286
|
-
print('Total training time(h): ', calculating_est/3600)
|
228
|
+
if val == 'final':
|
229
|
+
|
230
|
+
validation_model = evaluate(x_val, y_val, None, LTPW, activation_potentiation, None)
|
231
|
+
|
232
|
+
val_acc = validation_model[get_acc()]
|
287
233
|
|
288
|
-
|
289
|
-
|
234
|
+
val_list.append(val_acc)
|
235
|
+
|
236
|
+
val_bar.update(val_acc)
|
290
237
|
|
291
238
|
|
292
|
-
return
|
239
|
+
return LTPW
|
293
240
|
|
294
241
|
# FUNCTIONS -----
|
295
242
|
|
@@ -381,48 +328,48 @@ def fex(
|
|
381
328
|
w, # num: Weight matrix of the neural network.
|
382
329
|
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
383
330
|
Class, # int: Which class is, if training.
|
384
|
-
|
331
|
+
activation_potentiation # float or None: Input activation potentiation (optional)
|
385
332
|
) -> tuple:
|
386
333
|
"""
|
387
|
-
Applies feature extraction process to the input data using synaptic
|
334
|
+
Applies feature extraction process to the input data using synaptic potentiation.
|
388
335
|
|
389
336
|
Args:
|
390
337
|
Input (num): Input data.
|
391
338
|
w (num): Weight matrix of the neural network.
|
392
339
|
is_training (bool): Flag indicating if the function is called during training (True or False).
|
393
340
|
Class (int): if is during training then which class(label) ? is isnt then put None.
|
394
|
-
|
341
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
395
342
|
|
396
343
|
Returns:
|
397
344
|
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
398
345
|
"""
|
399
346
|
|
400
|
-
if is_training == True and
|
347
|
+
if is_training == True and activation_potentiation == None:
|
401
348
|
|
402
349
|
w[Class, :] = Input
|
403
350
|
|
404
351
|
return w
|
405
352
|
|
406
|
-
elif is_training == True and
|
353
|
+
elif is_training == True and activation_potentiation != None:
|
407
354
|
|
408
355
|
|
409
|
-
Input[Input <
|
410
|
-
Input[Input >
|
356
|
+
Input[Input < activation_potentiation] = 0
|
357
|
+
Input[Input > activation_potentiation] = 1
|
411
358
|
|
412
359
|
w[Class,:] = Input
|
413
360
|
|
414
361
|
return w
|
415
362
|
|
416
|
-
elif is_training == False and
|
363
|
+
elif is_training == False and activation_potentiation == None:
|
417
364
|
|
418
365
|
neural_layer = np.dot(w, Input)
|
419
366
|
|
420
367
|
return neural_layer
|
421
368
|
|
422
|
-
elif is_training == False and
|
369
|
+
elif is_training == False and activation_potentiation != None:
|
423
370
|
|
424
|
-
Input[Input <
|
425
|
-
Input[Input >
|
371
|
+
Input[Input < activation_potentiation] = 0
|
372
|
+
Input[Input > activation_potentiation] = 1
|
426
373
|
|
427
374
|
neural_layer = np.dot(w, Input)
|
428
375
|
|
@@ -504,7 +451,8 @@ def evaluate(
|
|
504
451
|
y_test, # list[num]: Test labels.
|
505
452
|
show_metrices, # show_metrices (bool): (True or False)
|
506
453
|
W, # list[num]: Weight matrix list of the neural network.
|
507
|
-
|
454
|
+
activation_potentiation=None, # activation_potentiation (float or None): Threshold value for comparison. (optional)
|
455
|
+
acc_bar_status = True
|
508
456
|
) -> tuple:
|
509
457
|
infoTestModel = """
|
510
458
|
Tests the neural network model with the given test data.
|
@@ -514,13 +462,14 @@ def evaluate(
|
|
514
462
|
y_test (list[num]): Test labels.
|
515
463
|
show_metrices (bool): (True or False)
|
516
464
|
W (list[num]): Weight matrix list of the neural network.
|
517
|
-
|
465
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
466
|
+
acc_bar_status (bool): Loading bar for accuracy (True or None)
|
518
467
|
|
519
468
|
Returns:
|
520
469
|
tuple: A tuple containing the predicted labels and the accuracy of the model.
|
521
470
|
"""
|
522
471
|
|
523
|
-
layers = ['fex'
|
472
|
+
layers = ['fex']
|
524
473
|
|
525
474
|
try:
|
526
475
|
Wc = [0] * len(W) # Wc = Weight copy
|
@@ -531,9 +480,10 @@ def evaluate(
|
|
531
480
|
for i, w in enumerate(W):
|
532
481
|
Wc[i] = np.copy(w)
|
533
482
|
print('\rCopying weights.....', i+1, '/', len(W), end="")
|
534
|
-
|
535
|
-
|
536
|
-
|
483
|
+
|
484
|
+
# print(Fore.GREEN + "\n\nTest Started with 0 ERROR\n" + Style.RESET_ALL)
|
485
|
+
if acc_bar_status == True:
|
486
|
+
acc_bar = tqdm(total=1, desc="Test Accuracy", ncols=75)
|
537
487
|
for inpIndex, Input in enumerate(x_test):
|
538
488
|
Input = np.array(Input)
|
539
489
|
Input = Input.ravel()
|
@@ -545,9 +495,8 @@ def evaluate(
|
|
545
495
|
neural_layer = normalization(neural_layer)
|
546
496
|
|
547
497
|
if Layer == 'fex':
|
548
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
549
|
-
|
550
|
-
neural_layer = np.dot(W[index], neural_layer)
|
498
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
499
|
+
|
551
500
|
|
552
501
|
for i, w in enumerate(Wc):
|
553
502
|
W[i] = np.copy(w)
|
@@ -556,56 +505,25 @@ def evaluate(
|
|
556
505
|
if RealOutput == PredictedOutput:
|
557
506
|
true += 1
|
558
507
|
acc = true / len(y_test)
|
559
|
-
if show_metrices == True:
|
560
|
-
acc_list.append(acc)
|
561
|
-
y_preds[inpIndex] = PredictedOutput
|
562
|
-
|
563
|
-
uni_end_time = time.time()
|
564
|
-
|
565
|
-
calculating_est = round(
|
566
|
-
(uni_end_time - uni_start_time) * (len(x_test) - inpIndex), 3)
|
567
508
|
|
568
|
-
if calculating_est < 60:
|
569
|
-
print('\rest......(sec):', calculating_est, '\n', end="")
|
570
|
-
print('\rTest accuracy: ', acc, "\n", end="")
|
571
|
-
|
572
|
-
elif calculating_est > 60 and calculating_est < 3600:
|
573
|
-
print('\rest......(min):', calculating_est/60, '\n', end="")
|
574
|
-
print('\rTest accuracy: ', acc, "\n", end="")
|
575
509
|
|
576
|
-
|
577
|
-
|
578
|
-
|
510
|
+
acc_list.append(acc)
|
511
|
+
y_preds[inpIndex] = PredictedOutput
|
512
|
+
|
513
|
+
if acc_bar_status == True:
|
514
|
+
if inpIndex == 0:
|
515
|
+
acc_bar.update(acc)
|
516
|
+
|
517
|
+
else:
|
518
|
+
acc = acc - acc_list[inpIndex - 1]
|
519
|
+
acc_bar.update(acc)
|
520
|
+
|
579
521
|
if show_metrices == True:
|
580
522
|
plot_evaluate(y_test, y_preds, acc_list)
|
581
523
|
|
582
|
-
|
524
|
+
|
583
525
|
for i, w in enumerate(Wc):
|
584
526
|
W[i] = np.copy(w)
|
585
|
-
|
586
|
-
calculating_est = round(EndTime - start_time, 2)
|
587
|
-
|
588
|
-
print(Fore.GREEN + "\nTest Finished with 0 ERROR\n")
|
589
|
-
|
590
|
-
if calculating_est < 60:
|
591
|
-
print('Total testing time(sec): ', calculating_est)
|
592
|
-
|
593
|
-
elif calculating_est > 60 and calculating_est < 3600:
|
594
|
-
print('Total testing time(min): ', calculating_est/60)
|
595
|
-
|
596
|
-
elif calculating_est > 3600:
|
597
|
-
print('Total testing time(h): ', calculating_est/3600)
|
598
|
-
|
599
|
-
if acc >= 0.8:
|
600
|
-
print(Fore.GREEN + '\nTotal Test accuracy: ',
|
601
|
-
acc, '\n' + Style.RESET_ALL)
|
602
|
-
|
603
|
-
elif acc < 0.8 and acc > 0.6:
|
604
|
-
print(Fore.MAGENTA + '\nTotal Test accuracy: ',
|
605
|
-
acc, '\n' + Style.RESET_ALL)
|
606
|
-
|
607
|
-
elif acc <= 0.6:
|
608
|
-
print(Fore.RED + '\nTotal Test accuracy: ', acc, '\n' + Style.RESET_ALL)
|
609
527
|
|
610
528
|
except:
|
611
529
|
|
@@ -621,7 +539,7 @@ def multiple_evaluate(
|
|
621
539
|
y_test, # list[num]: Test labels.
|
622
540
|
show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
|
623
541
|
MW, # list[list[num]]: Weight matrix of the neural network.
|
624
|
-
|
542
|
+
activation_potentiation=None # (float or None): Threshold value for comparison. (optional)
|
625
543
|
) -> tuple:
|
626
544
|
infoTestModel = """
|
627
545
|
Tests the neural network model with the given test data.
|
@@ -668,9 +586,7 @@ def multiple_evaluate(
|
|
668
586
|
neural_layer = normalization(neural_layer)
|
669
587
|
|
670
588
|
if Layer == 'fex':
|
671
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
672
|
-
elif Layer == 'cat':
|
673
|
-
neural_layer = np.dot(W[index], neural_layer)
|
589
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
674
590
|
|
675
591
|
output_layer += neural_layer
|
676
592
|
|
@@ -738,7 +654,7 @@ def multiple_evaluate(
|
|
738
654
|
|
739
655
|
except:
|
740
656
|
|
741
|
-
print(Fore.RED + "ERROR: Testing model parameters like '
|
657
|
+
print(Fore.RED + "ERROR: Testing model parameters like 'activation_potentiation' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: evaluate" + infoTestModel + Style.RESET_ALL)
|
742
658
|
return 'e'
|
743
659
|
|
744
660
|
return W, y_preds, acc
|
@@ -753,11 +669,11 @@ def save_model(model_name,
|
|
753
669
|
model_path,
|
754
670
|
scaler_params,
|
755
671
|
W,
|
756
|
-
|
672
|
+
activation_potentiation=None
|
757
673
|
):
|
758
674
|
|
759
675
|
infosave_model = """
|
760
|
-
Function to save a
|
676
|
+
Function to save a potentiation learning model.
|
761
677
|
|
762
678
|
Arguments:
|
763
679
|
model_name (str): Name of the model.
|
@@ -769,7 +685,7 @@ def save_model(model_name,
|
|
769
685
|
model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
|
770
686
|
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
771
687
|
W: Weights of the model.
|
772
|
-
|
688
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
773
689
|
|
774
690
|
Returns:
|
775
691
|
str: Message indicating if the model was saved successfully or encountered an error.
|
@@ -778,7 +694,7 @@ def save_model(model_name,
|
|
778
694
|
# Operations to be performed by the function will be written here
|
779
695
|
pass
|
780
696
|
|
781
|
-
layers = ['fex'
|
697
|
+
layers = ['fex']
|
782
698
|
|
783
699
|
if weights_type != 'txt' and weights_type != 'npy' and weights_type != 'mat':
|
784
700
|
print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: save_model" +
|
@@ -819,7 +735,7 @@ def save_model(model_name,
|
|
819
735
|
'WEIGHTS FORMAT': weights_format,
|
820
736
|
'MODEL PATH': model_path,
|
821
737
|
'STANDARD SCALER': scaler_params,
|
822
|
-
'ACTIVATION
|
738
|
+
'ACTIVATION POTENTIATION': activation_potentiation
|
823
739
|
}
|
824
740
|
try:
|
825
741
|
|
@@ -908,14 +824,14 @@ def load_model(model_name,
|
|
908
824
|
model_path,
|
909
825
|
):
|
910
826
|
infoload_model = """
|
911
|
-
Function to load a
|
827
|
+
Function to load a potentiation learning model.
|
912
828
|
|
913
829
|
Arguments:
|
914
830
|
model_name (str): Name of the model.
|
915
831
|
model_path (str): Path where the model is saved.
|
916
832
|
|
917
833
|
Returns:
|
918
|
-
lists: W(list[num]),
|
834
|
+
lists: W(list[num]), activation_potentiation, DataFrame of the model
|
919
835
|
"""
|
920
836
|
pass
|
921
837
|
|
@@ -955,7 +871,7 @@ def load_model(model_name,
|
|
955
871
|
def predict_model_ssd(Input, model_name, model_path):
|
956
872
|
|
957
873
|
infopredict_model_ssd = """
|
958
|
-
Function to make a prediction using a divided
|
874
|
+
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
959
875
|
|
960
876
|
Arguments:
|
961
877
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
@@ -965,15 +881,15 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
965
881
|
"""
|
966
882
|
W, df = load_model(model_name, model_path)
|
967
883
|
|
968
|
-
|
884
|
+
activation_potentiation = str(df['ACTIVATION POTENTIATION'].iloc[0])
|
969
885
|
|
970
|
-
if
|
886
|
+
if activation_potentiation != 'nan':
|
971
887
|
|
972
|
-
|
888
|
+
activation_potentiation = float(activation_potentiation)
|
973
889
|
|
974
890
|
else:
|
975
891
|
|
976
|
-
|
892
|
+
activation_potentiation = None
|
977
893
|
|
978
894
|
try:
|
979
895
|
|
@@ -989,7 +905,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
989
905
|
non_scaled = True
|
990
906
|
|
991
907
|
|
992
|
-
layers = ['fex'
|
908
|
+
layers = ['fex']
|
993
909
|
|
994
910
|
Wc = [0] * len(W)
|
995
911
|
for i, w in enumerate(W):
|
@@ -1003,7 +919,7 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
1003
919
|
neural_layer = normalization(neural_layer)
|
1004
920
|
|
1005
921
|
if Layer == 'fex':
|
1006
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
922
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
1007
923
|
elif Layer == 'cat':
|
1008
924
|
neural_layer = np.dot(W[index], neural_layer)
|
1009
925
|
except:
|
@@ -1015,17 +931,17 @@ def predict_model_ssd(Input, model_name, model_path):
|
|
1015
931
|
return neural_layer
|
1016
932
|
|
1017
933
|
|
1018
|
-
def predict_model_ram(Input, scaler_params, W,
|
934
|
+
def predict_model_ram(Input, scaler_params, W, activation_potentiation=None):
|
1019
935
|
|
1020
936
|
infopredict_model_ram = """
|
1021
|
-
Function to make a prediction using a divided
|
937
|
+
Function to make a prediction using a divided potentiation learning artificial neural network (PLAN).
|
1022
938
|
from weights and parameters stored in memory.
|
1023
939
|
|
1024
940
|
Arguments:
|
1025
941
|
Input (list or ndarray): Input data for the model (single vector or single matrix).
|
1026
942
|
scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
|
1027
943
|
W (list of ndarrays): Weights of the model.
|
1028
|
-
|
944
|
+
activation_potentiation (float or None): Threshold value for comparison. (optional)
|
1029
945
|
|
1030
946
|
Returns:
|
1031
947
|
ndarray: Output from the model.
|
@@ -1035,7 +951,7 @@ def predict_model_ram(Input, scaler_params, W, activation_potential=None):
|
|
1035
951
|
|
1036
952
|
Input = standard_scaler(None, Input, scaler_params)
|
1037
953
|
|
1038
|
-
layers = ['fex'
|
954
|
+
layers = ['fex']
|
1039
955
|
|
1040
956
|
Wc = [0] * len(W)
|
1041
957
|
for i, w in enumerate(W):
|
@@ -1049,7 +965,7 @@ def predict_model_ram(Input, scaler_params, W, activation_potential=None):
|
|
1049
965
|
neural_layer = normalization(neural_layer)
|
1050
966
|
|
1051
967
|
if Layer == 'fex':
|
1052
|
-
neural_layer = fex(neural_layer, W[index], False, None,
|
968
|
+
neural_layer = fex(neural_layer, W[index], False, None, activation_potentiation)
|
1053
969
|
elif Layer == 'cat':
|
1054
970
|
neural_layer = np.dot(W[index], neural_layer)
|
1055
971
|
|
@@ -1695,4 +1611,4 @@ def get_preds():
|
|
1695
1611
|
|
1696
1612
|
def get_acc():
|
1697
1613
|
|
1698
|
-
return 2
|
1614
|
+
return 2
|
@@ -0,0 +1,8 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: pyerualjetwork
|
3
|
+
Version: 2.6.6
|
4
|
+
Summary: Visual improvements, new option added for val paramater in fit function: 'final'. Removed: val_count parameter.
|
5
|
+
Author: Hasan Can Beydili
|
6
|
+
Author-email: tchasancan@gmail.com
|
7
|
+
Keywords: model evaluation,classifcation,potentiation learning artficial neural networks
|
8
|
+
|
@@ -0,0 +1,6 @@
|
|
1
|
+
plan/__init__.py,sha256=gmaz8lnQfl18MbOQwabBUPmShajK5S99jfyY-hQe8tc,502
|
2
|
+
plan/plan.py,sha256=iICeYTx7-I4HRFqUFOsfuG1Nlp1vcYIWRiw3D960gGI,53301
|
3
|
+
pyerualjetwork-2.6.6.dist-info/METADATA,sha256=cfxWuZznzuP91MhQ3Vrro8INQF7l0LW_5YaSAw0GyEM,338
|
4
|
+
pyerualjetwork-2.6.6.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
5
|
+
pyerualjetwork-2.6.6.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
|
6
|
+
pyerualjetwork-2.6.6.dist-info/RECORD,,
|
@@ -1,8 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: pyerualjetwork
|
3
|
-
Version: 2.5.8
|
4
|
-
Summary: New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document
|
5
|
-
Author: Hasan Can Beydili
|
6
|
-
Author-email: tchasancan@gmail.com
|
7
|
-
Keywords: model evaluation,classifcation,pruning learning artficial neural networks
|
8
|
-
|
@@ -1,6 +0,0 @@
|
|
1
|
-
plan/__init__.py,sha256=gmaz8lnQfl18MbOQwabBUPmShajK5S99jfyY-hQe8tc,502
|
2
|
-
plan/plan.py,sha256=3jSj2QXjLhinm_5BH0-pVhHP6mHyrXzLKm4m5GaV6Yw,56167
|
3
|
-
pyerualjetwork-2.5.8.dist-info/METADATA,sha256=ILdOu0T__HUunUBsrZcnxEjeyAzP_jl122hO-OVPR38,357
|
4
|
-
pyerualjetwork-2.5.8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
5
|
-
pyerualjetwork-2.5.8.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
|
6
|
-
pyerualjetwork-2.5.8.dist-info/RECORD,,
|
File without changes
|
File without changes
|