pyerualjetwork 4.0.5__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- pyerualjetwork/__init__.py +71 -0
- pyerualjetwork/activation_functions.py +367 -0
- pyerualjetwork/activation_functions_cuda.py +363 -0
- pyerualjetwork/data_operations.py +439 -0
- pyerualjetwork/data_operations_cuda.py +463 -0
- pyerualjetwork/help.py +16 -0
- pyerualjetwork/loss_functions.py +21 -0
- pyerualjetwork/loss_functions_cuda.py +21 -0
- pyerualjetwork/metrics.py +190 -0
- pyerualjetwork/metrics_cuda.py +165 -0
- pyerualjetwork/model_operations.py +408 -0
- pyerualjetwork/model_operations_cuda.py +414 -0
- pyerualjetwork/plan.py +681 -0
- pyerualjetwork/plan_cuda.py +677 -0
- pyerualjetwork/planeat.py +734 -0
- pyerualjetwork/planeat_cuda.py +736 -0
- pyerualjetwork/ui.py +22 -0
- pyerualjetwork/visualizations.py +799 -0
- pyerualjetwork/visualizations_cuda.py +799 -0
- pyerualjetwork-4.0.5.dist-info/METADATA +95 -0
- pyerualjetwork-4.0.5.dist-info/RECORD +23 -0
- pyerualjetwork-4.0.5.dist-info/WHEEL +5 -0
- pyerualjetwork-4.0.5.dist-info/top_level.txt +1 -0
pyerualjetwork/plan.py
ADDED
@@ -0,0 +1,681 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
|
4
|
+
MAIN MODULE FOR PLAN
|
5
|
+
|
6
|
+
PLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_PLAN/PLAN.pdf
|
7
|
+
ANAPLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
8
|
+
|
9
|
+
@author: Hasan Can Beydili
|
10
|
+
@YouTube: https://www.youtube.com/@HasanCanBeydili
|
11
|
+
@Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
12
|
+
@Instagram: https://www.instagram.com/canbeydilj/
|
13
|
+
@contact: tchasancan@gmail.com
|
14
|
+
"""
|
15
|
+
|
16
|
+
import numpy as np
|
17
|
+
from colorama import Fore
|
18
|
+
|
19
|
+
### LIBRARY IMPORTS ###
|
20
|
+
from .ui import loading_bars, initialize_loading_bar
|
21
|
+
from .data_operations import normalization, decode_one_hot, batcher
|
22
|
+
from .loss_functions import binary_crossentropy, categorical_crossentropy
|
23
|
+
from .activation_functions import apply_activation, Softmax, all_activations
|
24
|
+
from .metrics import metrics
|
25
|
+
from .model_operations import get_acc, get_preds, get_preds_softmax
|
26
|
+
from .visualizations import (
|
27
|
+
draw_neural_web,
|
28
|
+
plot_evaluate,
|
29
|
+
neuron_history,
|
30
|
+
initialize_visualization_for_fit,
|
31
|
+
update_weight_visualization_for_fit,
|
32
|
+
update_decision_boundary_for_fit,
|
33
|
+
update_validation_history_for_fit,
|
34
|
+
display_visualization_for_fit,
|
35
|
+
display_visualizations_for_learner,
|
36
|
+
update_history_plots_for_learner,
|
37
|
+
initialize_visualization_for_learner
|
38
|
+
)
|
39
|
+
|
40
|
+
### GLOBAL VARIABLES ###
|
41
|
+
bar_format_normal = loading_bars()[0]
|
42
|
+
bar_format_learner = loading_bars()[1]
|
43
|
+
|
44
|
+
# BUILD -----
|
45
|
+
|
46
|
+
def fit(
|
47
|
+
x_train,
|
48
|
+
y_train,
|
49
|
+
val=False,
|
50
|
+
val_count=None,
|
51
|
+
activation_potentiation=['linear'],
|
52
|
+
x_val=None,
|
53
|
+
y_val=None,
|
54
|
+
show_training=None,
|
55
|
+
interval=100,
|
56
|
+
LTD=0,
|
57
|
+
decision_boundary_status=True,
|
58
|
+
train_bar=True,
|
59
|
+
auto_normalization=True,
|
60
|
+
neurons_history=False,
|
61
|
+
dtype=np.float32
|
62
|
+
):
|
63
|
+
"""
|
64
|
+
Creates a model to fitting data.
|
65
|
+
|
66
|
+
fit Args:
|
67
|
+
|
68
|
+
x_train (list[num]): List or numarray of input data.
|
69
|
+
|
70
|
+
y_train (list[num]): List or numarray of target labels. (one hot encoded)
|
71
|
+
|
72
|
+
val (None or True): validation in training process ? None or True default: None (optional)
|
73
|
+
|
74
|
+
val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
|
75
|
+
|
76
|
+
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
|
77
|
+
|
78
|
+
x_val (list[num]): List of validation data. default: x_train (optional)
|
79
|
+
|
80
|
+
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
|
81
|
+
|
82
|
+
show_training (bool, str): True or None default: None (optional)
|
83
|
+
|
84
|
+
LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
|
85
|
+
|
86
|
+
interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
|
87
|
+
|
88
|
+
decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
|
89
|
+
|
90
|
+
train_bar (bool): Training loading bar? True or False. Default is True. (optional)
|
91
|
+
|
92
|
+
auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
|
93
|
+
|
94
|
+
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
|
95
|
+
|
96
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
97
|
+
|
98
|
+
Returns:
|
99
|
+
numpyarray([num]): (Weight matrix).
|
100
|
+
"""
|
101
|
+
# Pre-checks
|
102
|
+
|
103
|
+
x_train = x_train.astype(dtype, copy=False)
|
104
|
+
|
105
|
+
if len(y_train[0]) < 256:
|
106
|
+
if y_train.dtype != np.uint8:
|
107
|
+
y_train = np.array(y_train, copy=False).astype(np.uint8, copy=False)
|
108
|
+
elif len(y_train[0]) <= 32767:
|
109
|
+
if y_train.dtype != np.uint16:
|
110
|
+
y_train = np.array(y_train, copy=False).astype(np.uint16, copy=False)
|
111
|
+
else:
|
112
|
+
if y_train.dtype != np.uint32:
|
113
|
+
y_train = np.array(y_train, copy=False).astype(np.uint32, copy=False)
|
114
|
+
|
115
|
+
if train_bar and val:
|
116
|
+
train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
|
117
|
+
elif train_bar and val == False:
|
118
|
+
train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
|
119
|
+
|
120
|
+
if len(x_train) != len(y_train):
|
121
|
+
raise ValueError("x_train and y_train must have the same length.")
|
122
|
+
|
123
|
+
if val and (x_val is None and y_val is None):
|
124
|
+
x_val, y_val = x_train, y_train
|
125
|
+
|
126
|
+
elif val and (x_val is not None and y_val is not None):
|
127
|
+
x_val = x_val.astype(dtype, copy=False)
|
128
|
+
y_val = y_val.astype(dtype, copy=False)
|
129
|
+
|
130
|
+
val_list = [] if val else None
|
131
|
+
val_count = val_count or 10
|
132
|
+
# Defining weights
|
133
|
+
STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TIME POTENTIATION WEIGHT
|
134
|
+
LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TIME POTENTIATION WEIGHT
|
135
|
+
# Initialize visualization
|
136
|
+
vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
|
137
|
+
|
138
|
+
# Training process
|
139
|
+
for index, inp in enumerate(x_train):
|
140
|
+
inp = np.array(inp, copy=False).ravel()
|
141
|
+
y_decoded = decode_one_hot(y_train)
|
142
|
+
# Weight updates
|
143
|
+
STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded[index], activation_potentiation=activation_potentiation, LTD=LTD)
|
144
|
+
LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
|
145
|
+
|
146
|
+
# Visualization updates
|
147
|
+
if show_training:
|
148
|
+
update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
|
149
|
+
if decision_boundary_status:
|
150
|
+
update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
|
151
|
+
update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
|
152
|
+
if train_bar:
|
153
|
+
train_progress.update(1)
|
154
|
+
|
155
|
+
STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
|
156
|
+
|
157
|
+
# Finalize visualization
|
158
|
+
if show_training:
|
159
|
+
display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
|
160
|
+
|
161
|
+
return normalization(LTPW, dtype=dtype)
|
162
|
+
|
163
|
+
|
164
|
+
def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', batch_size=1,
|
165
|
+
neural_web_history=False, show_current_activations=False, auto_normalization=True,
|
166
|
+
neurons_history=False, patience=None, depth=None, early_shifting=False,
|
167
|
+
early_stop=False, loss='categorical_crossentropy', show_history=False,
|
168
|
+
interval=33.33, target_acc=None, target_loss=None, except_this=None,
|
169
|
+
only_this=None, start_this=None, dtype=np.float32):
|
170
|
+
"""
|
171
|
+
Optimizes the activation functions for a neural network by leveraging train data to find
|
172
|
+
the most accurate combination of activation potentiation for the given dataset.
|
173
|
+
|
174
|
+
Args:
|
175
|
+
|
176
|
+
x_train (array-like): Training input data.
|
177
|
+
|
178
|
+
y_train (array-like): Labels for training data. one-hot encoded.
|
179
|
+
|
180
|
+
x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
|
181
|
+
|
182
|
+
y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
|
183
|
+
|
184
|
+
strategy (str, optional): Learning strategy. (options: 'accuracy', 'loss', 'f1', 'precision', 'recall', 'adaptive_accuracy', 'adaptive_loss', 'all'): 'accuracy', Maximizes test accuracy during learning. 'f1', Maximizes test f1 score during learning. 'precision', Maximizes test preciison score during learning. 'recall', Maximizes test recall during learning. loss', Minimizes test loss during learning. 'adaptive_accuracy', The model compares the current accuracy with the accuracy from the past based on the number specified by the patience value. If no improvement is observed it adapts to the condition by switching to the 'loss' strategy quickly starts minimizing loss and continues learning. 'adaptive_loss',The model adopts the 'loss' strategy until the loss reaches or falls below the value specified by the patience parameter. However, when the patience threshold is reached, it automatically switches to the 'accuracy' strategy and begins to maximize accuracy. 'all', Maximizes all test scores and minimizes test loss, 'all' strategy most strong and most robust strategy. Default is 'accuracy'.
|
185
|
+
|
186
|
+
patience ((int, float), optional): patience value for adaptive strategies. For 'adaptive_accuracy' Default value: 5. For 'adaptive_loss' Default value: 0.150.
|
187
|
+
|
188
|
+
depth (int, optional): The depth of the PLAN neural networks Aggreagation layers.
|
189
|
+
|
190
|
+
batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
|
191
|
+
|
192
|
+
auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
|
193
|
+
|
194
|
+
early_shifting (int, optional): Early shifting checks if the test accuracy improves after a given number of activation attempts while inside a depth. If there's no improvement, it automatically shifts to the next depth. Basically, if no progress, it’s like, “Alright, let’s move on!” Default is False
|
195
|
+
|
196
|
+
early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two depth stops learning.) Default is False.
|
197
|
+
|
198
|
+
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
199
|
+
|
200
|
+
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
201
|
+
|
202
|
+
loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training(if strategy not 'loss'). options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
|
203
|
+
|
204
|
+
interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
205
|
+
|
206
|
+
target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
|
207
|
+
|
208
|
+
target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
|
209
|
+
|
210
|
+
except_this (list, optional): A list of activations to exclude from optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
|
211
|
+
|
212
|
+
only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
|
213
|
+
|
214
|
+
start_this (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
215
|
+
|
216
|
+
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
217
|
+
|
218
|
+
neural_web_history (bool, optional): Draws history of neural web. Default is False.
|
219
|
+
|
220
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
221
|
+
|
222
|
+
Returns:
|
223
|
+
tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
|
224
|
+
|
225
|
+
"""
|
226
|
+
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
|
227
|
+
|
228
|
+
activation_potentiation = all_activations()
|
229
|
+
|
230
|
+
# Pre-checks
|
231
|
+
|
232
|
+
x_train = x_train.astype(dtype, copy=False)
|
233
|
+
|
234
|
+
if len(y_train[0]) < 256:
|
235
|
+
if y_train.dtype != np.uint8:
|
236
|
+
y_train = np.array(y_train, copy=False).astype(np.uint8, copy=False)
|
237
|
+
elif len(y_train[0]) <= 32767:
|
238
|
+
if y_train.dtype != np.uint16:
|
239
|
+
y_train = np.array(y_train, copy=False).astype(np.uint16, copy=False)
|
240
|
+
else:
|
241
|
+
if y_train.dtype != np.uint32:
|
242
|
+
y_train = np.array(y_train, copy=False).astype(np.uint32, copy=False)
|
243
|
+
|
244
|
+
if x_test is not None:
|
245
|
+
x_test = x_test.astype(dtype, copy=False)
|
246
|
+
|
247
|
+
if len(y_test[0]) < 256:
|
248
|
+
if y_test.dtype != np.uint8:
|
249
|
+
y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
|
250
|
+
elif len(y_test[0]) <= 32767:
|
251
|
+
if y_test.dtype != np.uint16:
|
252
|
+
y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
|
253
|
+
else:
|
254
|
+
if y_test.dtype != np.uint32:
|
255
|
+
y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
|
256
|
+
|
257
|
+
if x_test is None and y_test is None:
|
258
|
+
x_test = x_train
|
259
|
+
y_test = y_train
|
260
|
+
data = 'Train'
|
261
|
+
else:
|
262
|
+
data = 'Test'
|
263
|
+
|
264
|
+
if early_shifting != False:
|
265
|
+
shift_patience = early_shifting
|
266
|
+
|
267
|
+
# Strategy initialization
|
268
|
+
if strategy == 'adaptive_accuracy':
|
269
|
+
strategy = 'accuracy'
|
270
|
+
adaptive = True
|
271
|
+
if patience == None:
|
272
|
+
patience = 5
|
273
|
+
elif strategy == 'adaptive_loss':
|
274
|
+
strategy = 'loss'
|
275
|
+
adaptive = True
|
276
|
+
if patience == None:
|
277
|
+
patience = 0.150
|
278
|
+
else:
|
279
|
+
adaptive = False
|
280
|
+
|
281
|
+
# Filter activation functions
|
282
|
+
if only_this != None:
|
283
|
+
activation_potentiation = only_this
|
284
|
+
if except_this != None:
|
285
|
+
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
286
|
+
if depth is None:
|
287
|
+
depth = len(activation_potentiation)
|
288
|
+
|
289
|
+
# Initialize visualization components
|
290
|
+
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
291
|
+
|
292
|
+
# Initialize progress bar
|
293
|
+
if batch_size == 1:
|
294
|
+
ncols = 76
|
295
|
+
else:
|
296
|
+
ncols = 89
|
297
|
+
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
298
|
+
|
299
|
+
# Initialize variables
|
300
|
+
activations = []
|
301
|
+
if start_this is None:
|
302
|
+
best_activations = []
|
303
|
+
best_acc = 0
|
304
|
+
else:
|
305
|
+
best_activations = start_this
|
306
|
+
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
307
|
+
W = fit(x_train, y_train, activation_potentiation=best_activations, train_bar=False, auto_normalization=auto_normalization)
|
308
|
+
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=activations)
|
309
|
+
|
310
|
+
if loss == 'categorical_crossentropy':
|
311
|
+
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
312
|
+
else:
|
313
|
+
test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
314
|
+
|
315
|
+
best_acc = model[get_acc()]
|
316
|
+
best_loss = test_loss
|
317
|
+
|
318
|
+
best_acc_per_depth_list = []
|
319
|
+
postfix_dict = {}
|
320
|
+
loss_list = []
|
321
|
+
|
322
|
+
for i in range(depth):
|
323
|
+
postfix_dict["Depth"] = str(i+1) + '/' + str(depth)
|
324
|
+
progress.set_postfix(postfix_dict)
|
325
|
+
|
326
|
+
progress.n = 0
|
327
|
+
progress.last_print_n = 0
|
328
|
+
progress.update(0)
|
329
|
+
|
330
|
+
for j in range(len(activation_potentiation)):
|
331
|
+
for k in range(len(best_activations)):
|
332
|
+
activations.append(best_activations[k])
|
333
|
+
|
334
|
+
activations.append(activation_potentiation[j])
|
335
|
+
|
336
|
+
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
337
|
+
W = fit(x_train, y_train, activation_potentiation=activations, train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
338
|
+
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=activations, dtype=dtype)
|
339
|
+
|
340
|
+
acc = model[get_acc()]
|
341
|
+
|
342
|
+
if strategy == 'loss' or strategy == 'all':
|
343
|
+
if loss == 'categorical_crossentropy':
|
344
|
+
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
345
|
+
else:
|
346
|
+
test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
347
|
+
|
348
|
+
if i == 0 and j == 0 and start_this is None:
|
349
|
+
best_loss = test_loss
|
350
|
+
|
351
|
+
if strategy == 'f1' or strategy == 'precision' or strategy == 'recall' or strategy == 'all':
|
352
|
+
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
353
|
+
|
354
|
+
if strategy == 'precision' or strategy == 'all':
|
355
|
+
if i == 0 and j == 0:
|
356
|
+
best_precision = precision_score
|
357
|
+
|
358
|
+
if strategy == 'recall' or strategy == 'all':
|
359
|
+
if i == 0 and j == 0:
|
360
|
+
best_recall = recall_score
|
361
|
+
|
362
|
+
if strategy == 'f1' or strategy == 'all':
|
363
|
+
if i == 0 and j == 0:
|
364
|
+
best_f1 = f1_score
|
365
|
+
|
366
|
+
if early_shifting != False:
|
367
|
+
if acc <= best_acc:
|
368
|
+
early_shifting -= 1
|
369
|
+
if early_shifting == 0:
|
370
|
+
early_shifting = shift_patience
|
371
|
+
break
|
372
|
+
|
373
|
+
if ((strategy == 'accuracy' and acc >= best_acc) or
|
374
|
+
(strategy == 'loss' and test_loss <= best_loss) or
|
375
|
+
(strategy == 'f1' and f1_score >= best_f1) or
|
376
|
+
(strategy == 'precision' and precision_score >= best_precision) or
|
377
|
+
(strategy == 'recall' and recall_score >= best_recall) or
|
378
|
+
(strategy == 'all' and f1_score >= best_f1 and acc >= best_acc and
|
379
|
+
test_loss <= best_loss and precision_score >= best_precision and
|
380
|
+
recall_score >= best_recall)):
|
381
|
+
|
382
|
+
current_best_activation = activation_potentiation[j]
|
383
|
+
best_acc = acc
|
384
|
+
|
385
|
+
if batch_size == 1:
|
386
|
+
postfix_dict[f"{data} Accuracy"] = best_acc
|
387
|
+
else:
|
388
|
+
postfix_dict[f"{data} Batch Accuracy"] = acc
|
389
|
+
progress.set_postfix(postfix_dict)
|
390
|
+
|
391
|
+
final_activations = activations
|
392
|
+
|
393
|
+
if show_current_activations:
|
394
|
+
print(f", Current Activations={final_activations}", end='')
|
395
|
+
|
396
|
+
best_weights = W
|
397
|
+
best_model = model
|
398
|
+
|
399
|
+
if strategy != 'loss':
|
400
|
+
if loss == 'categorical_crossentropy':
|
401
|
+
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
402
|
+
else:
|
403
|
+
test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
404
|
+
|
405
|
+
if batch_size == 1:
|
406
|
+
postfix_dict[f"{data} Loss"] = test_loss
|
407
|
+
best_loss = test_loss
|
408
|
+
else:
|
409
|
+
postfix_dict[f"{data} Batch Loss"] = test_loss
|
410
|
+
progress.set_postfix(postfix_dict)
|
411
|
+
best_loss = test_loss
|
412
|
+
|
413
|
+
# Update visualizations during training
|
414
|
+
if show_history:
|
415
|
+
depth_list = range(1, len(best_acc_per_depth_list) + 2)
|
416
|
+
update_history_plots_for_learner(viz_objects, depth_list, loss_list + [test_loss],
|
417
|
+
best_acc_per_depth_list + [best_acc], x_train, final_activations)
|
418
|
+
|
419
|
+
if neurons_history:
|
420
|
+
viz_objects['neurons']['artists'] = (
|
421
|
+
neuron_history(np.copy(best_weights), viz_objects['neurons']['ax'],
|
422
|
+
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
423
|
+
y_train[0], viz_objects['neurons']['artists'],
|
424
|
+
data=data, fig1=viz_objects['neurons']['fig'],
|
425
|
+
acc=best_acc, loss=test_loss)
|
426
|
+
)
|
427
|
+
|
428
|
+
if neural_web_history:
|
429
|
+
art5_1, art5_2, art5_3 = draw_neural_web(W=best_weights, ax=viz_objects['web']['ax'],
|
430
|
+
G=viz_objects['web']['G'], return_objs=True)
|
431
|
+
art5_list = [art5_1] + [art5_2] + list(art5_3.values())
|
432
|
+
viz_objects['web']['artists'].append(art5_list)
|
433
|
+
|
434
|
+
# Check target accuracy
|
435
|
+
if target_acc is not None and best_acc >= target_acc:
|
436
|
+
progress.close()
|
437
|
+
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
438
|
+
activation_potentiation=final_activations)
|
439
|
+
|
440
|
+
if loss == 'categorical_crossentropy':
|
441
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
442
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
443
|
+
else:
|
444
|
+
train_loss = binary_crossentropy(y_true_batch=y_train,
|
445
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
446
|
+
|
447
|
+
print('\nActivations: ', final_activations)
|
448
|
+
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
449
|
+
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
450
|
+
if data == 'Test':
|
451
|
+
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
452
|
+
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
453
|
+
|
454
|
+
# Display final visualizations
|
455
|
+
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
456
|
+
test_loss, y_train, interval)
|
457
|
+
return best_weights, best_model[get_preds()], best_acc, final_activations
|
458
|
+
|
459
|
+
# Check target loss
|
460
|
+
if target_loss is not None and best_loss <= target_loss:
|
461
|
+
progress.close()
|
462
|
+
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
463
|
+
activation_potentiation=final_activations)
|
464
|
+
|
465
|
+
if loss == 'categorical_crossentropy':
|
466
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
467
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
468
|
+
else:
|
469
|
+
train_loss = binary_crossentropy(y_true_batch=y_train,
|
470
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
471
|
+
|
472
|
+
print('\nActivations: ', final_activations)
|
473
|
+
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
474
|
+
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
475
|
+
if data == 'Test':
|
476
|
+
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
477
|
+
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
478
|
+
|
479
|
+
# Display final visualizations
|
480
|
+
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
481
|
+
test_loss, y_train, interval)
|
482
|
+
return best_weights, best_model[get_preds()], best_acc, final_activations
|
483
|
+
|
484
|
+
progress.update(1)
|
485
|
+
activations = []
|
486
|
+
|
487
|
+
best_activations.append(current_best_activation)
|
488
|
+
best_acc_per_depth_list.append(best_acc)
|
489
|
+
loss_list.append(best_loss)
|
490
|
+
|
491
|
+
# Check adaptive strategy conditions
|
492
|
+
if adaptive == True and strategy == 'accuracy' and i + 1 >= patience:
|
493
|
+
check = best_acc_per_depth_list[-patience:]
|
494
|
+
if all(x == check[0] for x in check):
|
495
|
+
strategy = 'loss'
|
496
|
+
adaptive = False
|
497
|
+
elif adaptive == True and strategy == 'loss' and best_loss <= patience:
|
498
|
+
strategy = 'accuracy'
|
499
|
+
adaptive = False
|
500
|
+
|
501
|
+
# Early stopping check
|
502
|
+
if early_stop == True and i > 0:
|
503
|
+
if best_acc_per_depth_list[i] == best_acc_per_depth_list[i-1]:
|
504
|
+
progress.close()
|
505
|
+
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
506
|
+
activation_potentiation=final_activations)
|
507
|
+
|
508
|
+
if loss == 'categorical_crossentropy':
|
509
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
510
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
511
|
+
else:
|
512
|
+
train_loss = binary_crossentropy(y_true_batch=y_train,
|
513
|
+
y_pred_batch=train_model[get_preds_softmax()])
|
514
|
+
|
515
|
+
print('\nActivations: ', final_activations)
|
516
|
+
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
517
|
+
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
518
|
+
if data == 'Test':
|
519
|
+
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
520
|
+
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
521
|
+
|
522
|
+
# Display final visualizations
|
523
|
+
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
524
|
+
test_loss, y_train, interval)
|
525
|
+
return best_weights, best_model[get_preds()], best_acc, final_activations
|
526
|
+
|
527
|
+
# Final evaluation
|
528
|
+
progress.close()
|
529
|
+
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
530
|
+
activation_potentiation=final_activations)
|
531
|
+
|
532
|
+
if loss == 'categorical_crossentropy':
|
533
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
534
|
+
else:
|
535
|
+
train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
536
|
+
|
537
|
+
print('\nActivations: ', final_activations)
|
538
|
+
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
539
|
+
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
540
|
+
if data == 'Test':
|
541
|
+
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
542
|
+
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
543
|
+
|
544
|
+
# Display final visualizations
|
545
|
+
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval)
|
546
|
+
return best_weights, best_model[get_preds()], best_acc, final_activations
|
547
|
+
|
548
|
+
|
549
|
+
|
550
|
+
def feed_forward(
|
551
|
+
Input, # list[num]: Input data.
|
552
|
+
w, # num: Weight matrix of the neural network.
|
553
|
+
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
554
|
+
activation_potentiation,
|
555
|
+
Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
|
556
|
+
LTD=0
|
557
|
+
) -> tuple:
|
558
|
+
"""
|
559
|
+
Applies feature extraction process to the input data using synaptic potentiation.
|
560
|
+
|
561
|
+
Args:
|
562
|
+
Input (num): Input data.
|
563
|
+
w (num): Weight matrix of the neural network.
|
564
|
+
is_training (bool): Flag indicating if the function is called during training (True or False).
|
565
|
+
Class (int): if is during training then which class(label) ? is isnt then put None.
|
566
|
+
# activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
567
|
+
|
568
|
+
Returns:
|
569
|
+
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
570
|
+
or
|
571
|
+
num: neural network output
|
572
|
+
"""
|
573
|
+
|
574
|
+
Output = apply_activation(Input, activation_potentiation)
|
575
|
+
|
576
|
+
Input = Output
|
577
|
+
|
578
|
+
if is_training == True:
|
579
|
+
|
580
|
+
for _ in range(LTD):
|
581
|
+
|
582
|
+
depression_vector = np.random.rand(*Input.shape)
|
583
|
+
|
584
|
+
Input -= depression_vector
|
585
|
+
|
586
|
+
w[Class, :] = Input
|
587
|
+
return w
|
588
|
+
|
589
|
+
else:
|
590
|
+
|
591
|
+
neural_layer = np.dot(w, Input)
|
592
|
+
|
593
|
+
return neural_layer
|
594
|
+
|
595
|
+
|
596
|
+
def evaluate(
|
597
|
+
x_test, # NumPy array: Test input data.
|
598
|
+
y_test, # NumPy array: Test labels.
|
599
|
+
W, # List of NumPy arrays: Neural network weight matrices.
|
600
|
+
activation_potentiation=['linear'], # List of activation functions.
|
601
|
+
loading_bar_status=True, # Optionally show loading bar.
|
602
|
+
show_metrics=None, # Optionally show metrics.
|
603
|
+
dtype=np.float32
|
604
|
+
) -> tuple:
|
605
|
+
"""
|
606
|
+
Evaluates the neural network model using the given test data.
|
607
|
+
|
608
|
+
Args:
|
609
|
+
x_test (np.ndarray): Test input data.
|
610
|
+
|
611
|
+
y_test (np.ndarray): Test labels. one-hot encoded.
|
612
|
+
|
613
|
+
W (list[np.ndarray]): List of neural network weight matrices.
|
614
|
+
|
615
|
+
activation_potentiation (list): List of activation functions.
|
616
|
+
|
617
|
+
loading_bar_status (bool): Option to show a loading bar (optional).
|
618
|
+
|
619
|
+
show_metrics (bool): Option to show metrics (optional).
|
620
|
+
|
621
|
+
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
622
|
+
|
623
|
+
Returns:
|
624
|
+
tuple: Predicted labels, model accuracy, and other evaluation metrics.
|
625
|
+
"""
|
626
|
+
# Pre-checks
|
627
|
+
|
628
|
+
x_test = x_test.astype(dtype, copy=False)
|
629
|
+
|
630
|
+
if len(y_test[0]) < 256:
|
631
|
+
if y_test.dtype != np.uint8:
|
632
|
+
y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
|
633
|
+
elif len(y_test[0]) <= 32767:
|
634
|
+
if y_test.dtype != np.uint16:
|
635
|
+
y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
|
636
|
+
else:
|
637
|
+
if y_test.dtype != np.uint32:
|
638
|
+
y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
|
639
|
+
|
640
|
+
predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=np.float32)
|
641
|
+
real_classes = np.empty(len(x_test), dtype=np.int32)
|
642
|
+
predict_classes = np.empty(len(x_test), dtype=np.int32)
|
643
|
+
|
644
|
+
true_predict = 0
|
645
|
+
acc_list = np.empty(len(x_test), dtype=np.float32)
|
646
|
+
|
647
|
+
if loading_bar_status:
|
648
|
+
loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
|
649
|
+
|
650
|
+
for inpIndex in range(len(x_test)):
|
651
|
+
Input = x_test[inpIndex].ravel()
|
652
|
+
|
653
|
+
neural_layer = Input
|
654
|
+
|
655
|
+
neural_layer = feed_forward(neural_layer, np.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
|
656
|
+
|
657
|
+
predict_probabilitys[inpIndex] = Softmax(neural_layer)
|
658
|
+
|
659
|
+
RealOutput = np.argmax(y_test[inpIndex])
|
660
|
+
real_classes[inpIndex] = RealOutput
|
661
|
+
PredictedOutput = np.argmax(neural_layer)
|
662
|
+
predict_classes[inpIndex] = PredictedOutput
|
663
|
+
|
664
|
+
if RealOutput == PredictedOutput:
|
665
|
+
true_predict += 1
|
666
|
+
|
667
|
+
acc = true_predict / (inpIndex + 1)
|
668
|
+
acc_list[inpIndex] = acc
|
669
|
+
|
670
|
+
if loading_bar_status:
|
671
|
+
loading_bar.update(1)
|
672
|
+
loading_bar.set_postfix({"Test Accuracy": acc})
|
673
|
+
|
674
|
+
if loading_bar_status:
|
675
|
+
loading_bar.close()
|
676
|
+
|
677
|
+
if show_metrics:
|
678
|
+
# Plot the evaluation metrics
|
679
|
+
plot_evaluate(x_test, y_test, predict_classes, acc_list, W=np.copy(W), activation_potentiation=activation_potentiation)
|
680
|
+
|
681
|
+
return W, predict_classes, acc_list[-1], None, None, predict_probabilitys
|