pyerualjetwork 4.3.8.dev15__py3-none-any.whl → 4.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/activation_functions.py +2 -2
- pyerualjetwork/activation_functions_cuda.py +63 -114
- pyerualjetwork/data_operations_cuda.py +1 -1
- pyerualjetwork/fitness_functions.py +72 -0
- pyerualjetwork/fitness_functions_cuda.py +85 -0
- pyerualjetwork/model_operations.py +14 -14
- pyerualjetwork/model_operations_cuda.py +16 -17
- pyerualjetwork/plan.py +159 -382
- pyerualjetwork/plan_cuda.py +149 -387
- pyerualjetwork/planeat.py +24 -54
- pyerualjetwork/planeat_cuda.py +11 -47
- pyerualjetwork/visualizations.py +33 -30
- pyerualjetwork/visualizations_cuda.py +22 -24
- {pyerualjetwork-4.3.8.dev15.dist-info → pyerualjetwork-4.3.9.dist-info}/METADATA +3 -19
- pyerualjetwork-4.3.9.dist-info/RECORD +24 -0
- pyerualjetwork-4.3.9.dist-info/top_level.txt +1 -0
- pyerualjetwork/loss_functions.py +0 -21
- pyerualjetwork/loss_functions_cuda.py +0 -21
- pyerualjetwork-4.3.8.dev15.dist-info/RECORD +0 -45
- pyerualjetwork-4.3.8.dev15.dist-info/top_level.txt +0 -2
- pyerualjetwork_afterburner/__init__.py +0 -11
- pyerualjetwork_afterburner/activation_functions.py +0 -290
- pyerualjetwork_afterburner/activation_functions_cuda.py +0 -289
- pyerualjetwork_afterburner/data_operations.py +0 -406
- pyerualjetwork_afterburner/data_operations_cuda.py +0 -461
- pyerualjetwork_afterburner/help.py +0 -17
- pyerualjetwork_afterburner/loss_functions.py +0 -21
- pyerualjetwork_afterburner/loss_functions_cuda.py +0 -21
- pyerualjetwork_afterburner/memory_operations.py +0 -298
- pyerualjetwork_afterburner/metrics.py +0 -190
- pyerualjetwork_afterburner/metrics_cuda.py +0 -163
- pyerualjetwork_afterburner/model_operations.py +0 -408
- pyerualjetwork_afterburner/model_operations_cuda.py +0 -420
- pyerualjetwork_afterburner/parallel.py +0 -118
- pyerualjetwork_afterburner/plan.py +0 -432
- pyerualjetwork_afterburner/plan_cuda.py +0 -441
- pyerualjetwork_afterburner/planeat.py +0 -793
- pyerualjetwork_afterburner/planeat_cuda.py +0 -752
- pyerualjetwork_afterburner/ui.py +0 -22
- pyerualjetwork_afterburner/visualizations.py +0 -823
- pyerualjetwork_afterburner/visualizations_cuda.py +0 -825
- {pyerualjetwork-4.3.8.dev15.dist-info → pyerualjetwork-4.3.9.dist-info}/WHEEL +0 -0
pyerualjetwork/plan.py
CHANGED
@@ -16,31 +16,19 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
16
16
|
"""
|
17
17
|
|
18
18
|
import numpy as np
|
19
|
-
import math
|
20
19
|
|
21
20
|
### LIBRARY IMPORTS ###
|
22
21
|
from .ui import loading_bars, initialize_loading_bar
|
23
|
-
from .data_operations import normalization,
|
24
|
-
from .
|
25
|
-
from .activation_functions import apply_activation, Softmax, all_activations
|
26
|
-
from .metrics import metrics
|
22
|
+
from .data_operations import normalization, batcher
|
23
|
+
from .activation_functions import apply_activation, all_activations
|
27
24
|
from .model_operations import get_acc, get_preds, get_preds_softmax
|
28
25
|
from .memory_operations import optimize_labels
|
29
26
|
from .visualizations import (
|
30
27
|
draw_neural_web,
|
31
|
-
update_neural_web_for_fit,
|
32
|
-
plot_evaluate,
|
33
|
-
update_neuron_history,
|
34
|
-
initialize_visualization_for_fit,
|
35
|
-
update_weight_visualization_for_fit,
|
36
|
-
update_decision_boundary_for_fit,
|
37
|
-
update_validation_history_for_fit,
|
38
|
-
display_visualization_for_fit,
|
39
28
|
display_visualizations_for_learner,
|
40
29
|
update_history_plots_for_learner,
|
41
30
|
initialize_visualization_for_learner,
|
42
|
-
update_neuron_history_for_learner
|
43
|
-
show
|
31
|
+
update_neuron_history_for_learner
|
44
32
|
)
|
45
33
|
|
46
34
|
### GLOBAL VARIABLES ###
|
@@ -52,18 +40,9 @@ bar_format_learner = loading_bars()[1]
|
|
52
40
|
def fit(
|
53
41
|
x_train,
|
54
42
|
y_train,
|
55
|
-
val=False,
|
56
|
-
val_count=None,
|
57
43
|
activation_potentiation=['linear'],
|
58
|
-
|
59
|
-
|
60
|
-
show_training=None,
|
61
|
-
interval=100,
|
62
|
-
LTD=0,
|
63
|
-
decision_boundary_status=True,
|
64
|
-
train_bar=True,
|
65
|
-
auto_normalization=True,
|
66
|
-
neurons_history=False,
|
44
|
+
W=None,
|
45
|
+
auto_normalization=False,
|
67
46
|
dtype=np.float32
|
68
47
|
):
|
69
48
|
"""
|
@@ -71,112 +50,41 @@ def fit(
|
|
71
50
|
|
72
51
|
fit Args:
|
73
52
|
|
74
|
-
x_train (
|
53
|
+
x_train (aray-like[num]): List or numarray of input data.
|
75
54
|
|
76
|
-
y_train (
|
77
|
-
|
78
|
-
val (None or True): validation in training process ? None or True default: None (optional)
|
79
|
-
|
80
|
-
val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
|
55
|
+
y_train (aray-like[num]): List or numarray of target labels. (one hot encoded)
|
81
56
|
|
82
57
|
activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
|
83
58
|
|
84
|
-
|
85
|
-
|
86
|
-
y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
|
87
|
-
|
88
|
-
show_training (bool, str): True or None default: None (optional)
|
89
|
-
|
90
|
-
LTD (int): Long Term Depression Hyperparameter for train PLAN neural network default: 0 (optional)
|
91
|
-
|
92
|
-
interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
|
93
|
-
|
94
|
-
decision_boundary_status (bool): If the visualization of validation and training history is enabled during training, should the decision boundaries also be visualized? True or False. Default is True. (optional)
|
59
|
+
W (numpy.ndarray): If you want to re-continue or update model
|
95
60
|
|
96
|
-
|
97
|
-
|
98
|
-
auto_normalization(bool): Normalization process during training. May effect training time and model quality. True or False. Default is True. (optional)
|
99
|
-
|
100
|
-
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the CL (Cumulative Learning) stages. True or False. Default is False. (optional)
|
61
|
+
auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
101
62
|
|
102
63
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
103
64
|
|
104
65
|
Returns:
|
105
|
-
numpyarray
|
66
|
+
numpyarray: (Weight matrix).
|
106
67
|
"""
|
107
68
|
|
108
|
-
# Pre-
|
69
|
+
# Pre-check
|
70
|
+
|
71
|
+
if len(x_train) != len(y_train): raise ValueError("x_train and y_train must have the same length.")
|
109
72
|
|
110
|
-
|
73
|
+
weight = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
|
111
74
|
|
112
|
-
if
|
113
|
-
|
114
|
-
|
115
|
-
train_progress = initialize_loading_bar(total=len(x_train), ncols=44, desc='Fitting', bar_format=bar_format_normal)
|
116
|
-
|
117
|
-
if len(x_train) != len(y_train):
|
118
|
-
raise ValueError("x_train and y_train must have the same length.")
|
119
|
-
|
120
|
-
if val and (x_val is None and y_val is None):
|
121
|
-
x_val, y_val = x_train, y_train
|
122
|
-
|
123
|
-
elif val and (x_val is not None and y_val is not None):
|
124
|
-
x_val = x_val.astype(dtype, copy=False)
|
125
|
-
y_val = y_val.astype(dtype, copy=False)
|
126
|
-
|
127
|
-
val_list = [] if val else None
|
128
|
-
val_count = val_count or 10
|
129
|
-
# Defining weights
|
130
|
-
STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # STPW = SHORT TIME POTENTIATION WEIGHT
|
131
|
-
LTPW = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) # LTPW = LONG TIME POTENTIATION WEIGHT
|
132
|
-
# Initialize visualization
|
133
|
-
vis_objects = initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train)
|
134
|
-
|
135
|
-
# Training process
|
136
|
-
for index, inp in enumerate(x_train):
|
137
|
-
inp = np.array(inp, copy=False).ravel()
|
138
|
-
y_decoded = decode_one_hot(y_train[index])
|
139
|
-
# Weight updates
|
140
|
-
STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded, activation_potentiation=activation_potentiation, LTD=LTD)
|
141
|
-
LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
|
142
|
-
if val and index != 0:
|
143
|
-
if index % math.ceil((val_count / len(x_train)) * 100) == 0:
|
144
|
-
val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW)[get_acc()]
|
145
|
-
val_list.append(val_acc)
|
146
|
-
|
147
|
-
# Visualization updates
|
148
|
-
if show_training:
|
149
|
-
update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
|
150
|
-
if decision_boundary_status:
|
151
|
-
update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
|
152
|
-
update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
|
153
|
-
update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
|
154
|
-
if neurons_history:
|
155
|
-
update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
|
156
|
-
if train_bar:
|
157
|
-
train_progress.update(1)
|
158
|
-
|
159
|
-
STPW = np.ones((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False)
|
160
|
-
|
161
|
-
# Finalize visualization
|
162
|
-
if show_training:
|
163
|
-
ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
|
164
|
-
ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
|
165
|
-
ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
|
166
|
-
ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
|
167
|
-
show()
|
75
|
+
if auto_normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
|
76
|
+
elif auto_normalization is False: x_train = apply_activation(x_train, activation_potentiation)
|
77
|
+
else: raise ValueError('normalization parameter only be True or False')
|
168
78
|
|
169
|
-
|
170
|
-
ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
|
171
|
-
show()
|
79
|
+
weight += y_train.T @ x_train
|
172
80
|
|
173
|
-
return normalization(
|
81
|
+
return normalization(weight, dtype=dtype)
|
174
82
|
|
175
83
|
|
176
|
-
def learner(x_train, y_train, optimizer,
|
177
|
-
neural_web_history=False, show_current_activations=False, auto_normalization=
|
178
|
-
neurons_history=False, early_stop=False,
|
179
|
-
interval=33.33, target_acc=None,
|
84
|
+
def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batch_size=1, pop_size=None,
|
85
|
+
neural_web_history=False, show_current_activations=False, auto_normalization=False,
|
86
|
+
neurons_history=False, early_stop=False, show_history=False,
|
87
|
+
interval=33.33, target_acc=None,
|
180
88
|
start_this_act=None, start_this_W=None, dtype=np.float32):
|
181
89
|
"""
|
182
90
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
@@ -194,14 +102,15 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
194
102
|
|
195
103
|
optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
196
104
|
```python
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
105
|
+
optimizer = lambda *args, **kwargs: planeat.evolver(*args,
|
106
|
+
activation_add_prob=0.85,
|
107
|
+
strategy='aggressive',
|
108
|
+
**kwargs)
|
201
109
|
|
202
110
|
model = plan.learner(x_train,
|
203
111
|
y_train,
|
204
|
-
optimizer
|
112
|
+
optimizer,
|
113
|
+
fitness,
|
205
114
|
fit_start=True,
|
206
115
|
strategy='accuracy',
|
207
116
|
show_history=True,
|
@@ -210,30 +119,42 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
210
119
|
interval=16.67)
|
211
120
|
```
|
212
121
|
|
213
|
-
|
122
|
+
fitness (function): Fitness function. We have 'hybrid_accuracy_confidence' function in the fitness_functions module. Use this: from pyerualjetwork import fitness_functions (and) fitness_function = lambda *args, **kwargs: fitness_functions.hybrid_accuracy_confidence(*args, 'here give your fitness function hyperparameters for example: alpha=2, beta=1.5, lambda_div=0.05', **kwargs) Example:
|
123
|
+
```python
|
124
|
+
fitness = lambda *args, **kwargs: fitness_functions.hybrid_accuracy_confidence(*args, alpha=2, beta=1.5, lambda_div=0.05, **kwargs)
|
214
125
|
|
215
|
-
|
126
|
+
model = plan.learner(x_train,
|
127
|
+
y_train,
|
128
|
+
optimizer,
|
129
|
+
fitness
|
130
|
+
fit_start=True,
|
131
|
+
strategy='accuracy',
|
132
|
+
show_history=True,
|
133
|
+
gen=15,
|
134
|
+
batch_size=0.05,
|
135
|
+
interval=16.67)
|
136
|
+
```
|
137
|
+
|
138
|
+
fit_start (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
|
216
139
|
|
217
140
|
gen (int, optional): The generation count for genetic optimization.
|
218
141
|
|
219
142
|
batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
|
220
143
|
|
221
|
-
|
144
|
+
pop_size (int, optional): Population size of each generation. Default: count of activation functions
|
222
145
|
|
223
|
-
|
146
|
+
early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
|
224
147
|
|
225
148
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
226
149
|
|
227
150
|
show_history (bool, optional): If True, displays the training history after optimization. Default is False.
|
228
151
|
|
229
|
-
|
152
|
+
auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
|
230
153
|
|
231
154
|
interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
|
232
155
|
|
233
156
|
target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
|
234
157
|
|
235
|
-
target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
|
236
|
-
|
237
158
|
start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
238
159
|
|
239
160
|
start_this_W (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
|
@@ -249,56 +170,53 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
249
170
|
|
250
171
|
"""
|
251
172
|
|
252
|
-
from
|
173
|
+
from fitness_functions import diversity_score
|
174
|
+
from planeat import define_genomes
|
253
175
|
|
254
176
|
data = 'Train'
|
255
177
|
|
256
|
-
|
178
|
+
except_this = ['spiral', 'circular']
|
179
|
+
activation_potentiation = [item for item in all_activations() if item not in except_this]
|
257
180
|
activation_potentiation_len = len(activation_potentiation)
|
258
181
|
|
259
182
|
# Pre-checks
|
260
183
|
|
184
|
+
if pop_size is None: pop_size = activation_potentiation_len
|
185
|
+
|
261
186
|
x_train = x_train.astype(dtype, copy=False)
|
262
187
|
y_train = optimize_labels(y_train, cuda=False)
|
263
188
|
|
189
|
+
if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
|
190
|
+
|
264
191
|
if gen is None:
|
265
192
|
gen = activation_potentiation_len
|
266
193
|
|
267
|
-
if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
|
268
194
|
if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
|
269
195
|
if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
|
270
196
|
|
271
197
|
# Initialize visualization components
|
272
198
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
273
199
|
|
274
|
-
# Initialize progress bar
|
275
|
-
if batch_size == 1:
|
276
|
-
ncols = 76
|
277
|
-
else:
|
278
|
-
ncols = 89
|
279
|
-
|
280
200
|
# Initialize variables
|
281
201
|
best_acc = 0
|
282
|
-
|
283
|
-
best_recall = 0
|
284
|
-
best_precision = 0
|
202
|
+
best_fit = 0
|
285
203
|
best_acc_per_gen_list = []
|
286
204
|
postfix_dict = {}
|
287
|
-
|
205
|
+
fit_list = []
|
288
206
|
target_pop = []
|
289
207
|
|
290
|
-
progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=
|
208
|
+
progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=74, bar_format=bar_format_learner)
|
291
209
|
|
292
|
-
if fit_start is False:
|
293
|
-
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=
|
294
|
-
|
295
|
-
if start_this_act is not None and start_this_W is not None:
|
296
|
-
weight_pop[0] = start_this_W
|
297
|
-
act_pop[0] = start_this_act
|
210
|
+
if fit_start is False or pop_size > activation_potentiation_len:
|
211
|
+
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
|
298
212
|
|
299
213
|
else:
|
300
|
-
weight_pop = []
|
301
|
-
act_pop = []
|
214
|
+
weight_pop = [0] * pop_size
|
215
|
+
act_pop = [0] * pop_size
|
216
|
+
|
217
|
+
if start_this_act is not None and start_this_W is not None:
|
218
|
+
weight_pop[0] = start_this_W
|
219
|
+
act_pop[0] = start_this_act
|
302
220
|
|
303
221
|
for i in range(gen):
|
304
222
|
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
@@ -308,92 +226,66 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
308
226
|
progress.last_print_n = 0
|
309
227
|
progress.update(0)
|
310
228
|
|
311
|
-
for j in range(
|
229
|
+
for j in range(pop_size):
|
312
230
|
|
313
231
|
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
314
232
|
|
315
|
-
if fit_start is True and i == 0:
|
316
|
-
|
317
|
-
|
318
|
-
|
233
|
+
if fit_start is True and i == 0 and j < activation_potentiation_len:
|
234
|
+
if start_this_act is not None and j == 0:
|
235
|
+
pass
|
236
|
+
else:
|
237
|
+
act_pop[j] = activation_potentiation[j]
|
238
|
+
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
|
239
|
+
weight_pop[j] = W
|
319
240
|
|
320
|
-
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j],
|
241
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
|
321
242
|
acc = model[get_acc()]
|
322
243
|
|
323
|
-
|
324
|
-
|
325
|
-
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
326
|
-
precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
|
327
|
-
|
328
|
-
if strategy == 'precision':
|
329
|
-
target_pop.append(precision_score)
|
330
|
-
|
331
|
-
if i == 0 and j == 0:
|
332
|
-
best_precision = precision_score
|
333
|
-
|
334
|
-
if strategy == 'recall':
|
335
|
-
target_pop.append(recall_score)
|
336
|
-
|
337
|
-
if i == 0 and j == 0:
|
338
|
-
best_recall = recall_score
|
244
|
+
div_score = diversity_score(weight_pop[j])
|
245
|
+
fit_score = fitness(y_train_batch, model[get_preds_softmax()], div_score, acc)
|
339
246
|
|
340
|
-
|
341
|
-
target_pop.append(f1_score)
|
247
|
+
target_pop.append(fit_score)
|
342
248
|
|
343
|
-
|
344
|
-
best_f1 = f1_score
|
345
|
-
|
346
|
-
if ((strategy == 'accuracy' and acc >= best_acc) or
|
347
|
-
(strategy == 'f1' and f1_score >= best_f1) or
|
348
|
-
(strategy == 'precision' and precision_score >= best_precision) or
|
349
|
-
(strategy == 'recall' and recall_score >= best_recall)):
|
249
|
+
if fit_score >= best_fit:
|
350
250
|
|
351
251
|
best_acc = acc
|
352
|
-
|
252
|
+
best_fit = fit_score
|
253
|
+
best_weight = np.copy(weight_pop[j])
|
353
254
|
final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
|
255
|
+
|
354
256
|
best_model = model
|
355
257
|
|
356
258
|
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
357
259
|
|
358
260
|
if batch_size == 1:
|
359
|
-
postfix_dict[f"{data} Accuracy"] = best_acc
|
360
|
-
|
361
|
-
postfix_dict[f"{data} Batch Accuracy"] = acc
|
362
|
-
progress.set_postfix(postfix_dict)
|
261
|
+
postfix_dict[f"{data} Accuracy"] = np.round(best_acc, 4)
|
262
|
+
progress.set_postfix(postfix_dict)
|
363
263
|
|
364
264
|
if show_current_activations:
|
365
265
|
print(f", Current Activations={final_activations}", end='')
|
366
266
|
|
367
|
-
if loss == 'categorical_crossentropy':
|
368
|
-
train_loss = categorical_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
|
369
|
-
else:
|
370
|
-
train_loss = binary_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
|
371
|
-
|
372
267
|
if batch_size == 1:
|
373
|
-
postfix_dict[
|
374
|
-
|
375
|
-
|
376
|
-
postfix_dict[f"{data} Batch Loss"] = train_loss
|
377
|
-
progress.set_postfix(postfix_dict)
|
378
|
-
best_loss = train_loss
|
268
|
+
postfix_dict["Fitness"] = np.round(fit_score, 4)
|
269
|
+
progress.set_postfix(postfix_dict)
|
270
|
+
best_fit = fit_score
|
379
271
|
|
380
272
|
# Update visualizations during training
|
381
273
|
if show_history:
|
382
274
|
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
383
|
-
update_history_plots_for_learner(viz_objects, gen_list,
|
275
|
+
update_history_plots_for_learner(viz_objects, gen_list, fit_list + [fit_score],
|
384
276
|
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
385
277
|
|
386
278
|
if neurons_history:
|
387
279
|
viz_objects['neurons']['artists'] = (
|
388
|
-
update_neuron_history_for_learner(np.copy(
|
280
|
+
update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
|
389
281
|
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
390
282
|
y_train[0], viz_objects['neurons']['artists'],
|
391
283
|
data=data, fig1=viz_objects['neurons']['fig'],
|
392
|
-
acc=best_acc, loss=
|
284
|
+
acc=best_acc, loss=fit_score)
|
393
285
|
)
|
394
286
|
|
395
287
|
if neural_web_history:
|
396
|
-
art5_1, art5_2, art5_3 = draw_neural_web(W=
|
288
|
+
art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
|
397
289
|
G=viz_objects['web']['G'], return_objs=True)
|
398
290
|
art5_list = [art5_1] + [art5_2] + list(art5_3.values())
|
399
291
|
viz_objects['web']['artists'].append(art5_list)
|
@@ -401,51 +293,46 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
401
293
|
# Check target accuracy
|
402
294
|
if target_acc is not None and best_acc >= target_acc:
|
403
295
|
progress.close()
|
404
|
-
train_model = evaluate(x_train, y_train, W=
|
405
|
-
activation_potentiation=final_activations
|
296
|
+
train_model = evaluate(x_train, y_train, W=best_weight,
|
297
|
+
activation_potentiation=final_activations)
|
406
298
|
|
407
|
-
|
408
|
-
|
409
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
410
|
-
else:
|
411
|
-
train_loss = binary_crossentropy(y_true_batch=y_train,
|
412
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
299
|
+
div_score = diversity_score(best_weight)
|
300
|
+
fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
|
413
301
|
|
414
302
|
print('\nActivations: ', final_activations)
|
415
|
-
print(f'Train Accuracy
|
416
|
-
print(f'
|
417
|
-
|
303
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
304
|
+
print(f'Fitness Value: ', fit_score, '\n')
|
305
|
+
|
306
|
+
postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
|
307
|
+
postfix_dict["Fitness"] = np.round(best_fit, 4)
|
308
|
+
progress.set_postfix(postfix_dict)
|
418
309
|
# Display final visualizations
|
419
|
-
display_visualizations_for_learner(viz_objects,
|
420
|
-
|
421
|
-
return
|
422
|
-
|
423
|
-
# Check target loss
|
424
|
-
if target_loss is not None and best_loss <= target_loss:
|
425
|
-
progress.close()
|
426
|
-
train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
|
427
|
-
activation_potentiation=final_activations, dtype=dtype)
|
428
|
-
|
429
|
-
if loss == 'categorical_crossentropy':
|
430
|
-
train_loss = categorical_crossentropy(y_true_batch=y_train,
|
431
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
432
|
-
else:
|
433
|
-
train_loss = binary_crossentropy(y_true_batch=y_train,
|
434
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
435
|
-
|
436
|
-
print('\nActivations: ', final_activations)
|
437
|
-
print(f'Train Accuracy :', train_model[get_acc()])
|
438
|
-
print(f'Train Loss : ', train_loss, '\n')
|
439
|
-
|
440
|
-
# Display final visualizations
|
441
|
-
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
442
|
-
train_loss, y_train, interval)
|
443
|
-
return best_weights, best_model[get_preds()], best_acc, final_activations
|
310
|
+
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
|
311
|
+
fit_score, y_train, interval)
|
312
|
+
return best_weight, best_model[get_preds()], best_acc, final_activations
|
444
313
|
|
445
314
|
progress.update(1)
|
446
315
|
|
447
|
-
|
448
|
-
|
316
|
+
if batch_size != 1:
|
317
|
+
train_model = evaluate(x_train, y_train, best_weight, final_activations)
|
318
|
+
|
319
|
+
div_score = diversity_score(best_weight)
|
320
|
+
fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
|
321
|
+
|
322
|
+
print('\nActivations: ', final_activations)
|
323
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
324
|
+
print(f'Fitness Value: ', fit_score, '\n')
|
325
|
+
|
326
|
+
postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
|
327
|
+
postfix_dict["Fitness"] = np.round(best_fit, 4)
|
328
|
+
progress.set_postfix(postfix_dict)
|
329
|
+
|
330
|
+
best_acc_per_gen_list.append(train_model[get_acc()])
|
331
|
+
fit_list.append(fit_score)
|
332
|
+
|
333
|
+
else:
|
334
|
+
best_acc_per_gen_list.append(best_acc)
|
335
|
+
fit_list.append(best_fit)
|
449
336
|
|
450
337
|
weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
|
451
338
|
target_pop = []
|
@@ -454,174 +341,64 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
|
|
454
341
|
if early_stop == True and i > 0:
|
455
342
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
456
343
|
progress.close()
|
457
|
-
train_model = evaluate(x_train, y_train, W=
|
458
|
-
activation_potentiation=final_activations
|
344
|
+
train_model = evaluate(x_train, y_train, W=best_weight,
|
345
|
+
activation_potentiation=final_activations)
|
459
346
|
|
460
|
-
|
461
|
-
|
462
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
463
|
-
else:
|
464
|
-
train_loss = binary_crossentropy(y_true_batch=y_train,
|
465
|
-
y_pred_batch=train_model[get_preds_softmax()])
|
347
|
+
div_score = diversity_score(best_weight)
|
348
|
+
fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
|
466
349
|
|
467
350
|
print('\nActivations: ', final_activations)
|
468
|
-
print(f'Train Accuracy
|
469
|
-
print(f'
|
351
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
352
|
+
print(f'Fitness Value: ', fit_score, '\n')
|
470
353
|
|
471
354
|
# Display final visualizations
|
472
|
-
display_visualizations_for_learner(viz_objects,
|
473
|
-
|
474
|
-
return
|
355
|
+
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
|
356
|
+
fit_score, y_train, interval)
|
357
|
+
return best_weight, best_model[get_preds()], best_acc, final_activations
|
475
358
|
|
476
359
|
# Final evaluation
|
477
360
|
progress.close()
|
478
|
-
train_model = evaluate(x_train, y_train, W=
|
479
|
-
activation_potentiation=final_activations
|
361
|
+
train_model = evaluate(x_train, y_train, W=best_weight,
|
362
|
+
activation_potentiation=final_activations)
|
480
363
|
|
481
|
-
|
482
|
-
|
483
|
-
else:
|
484
|
-
train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
364
|
+
div_score = diversity_score(best_weight)
|
365
|
+
fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
|
485
366
|
|
486
367
|
print('\nActivations: ', final_activations)
|
487
|
-
print(f'Train Accuracy
|
488
|
-
print(f'
|
368
|
+
print(f'Train Accuracy:', train_model[get_acc()])
|
369
|
+
print(f'Fitness Value: ', fit_score, '\n')
|
489
370
|
|
490
371
|
# Display final visualizations
|
491
|
-
display_visualizations_for_learner(viz_objects,
|
492
|
-
return
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
def feed_forward(
|
497
|
-
Input, # list[num]: Input data.
|
498
|
-
w, # num: Weight matrix of the neural network.
|
499
|
-
is_training, # bool: Flag indicating if the function is called during training (True or False).
|
500
|
-
activation_potentiation,
|
501
|
-
Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
|
502
|
-
LTD=0
|
503
|
-
) -> tuple:
|
504
|
-
"""
|
505
|
-
Applies feature extraction process to the input data using synaptic potentiation.
|
506
|
-
|
507
|
-
Args:
|
508
|
-
Input (num): Input data.
|
509
|
-
w (num): Weight matrix of the neural network.
|
510
|
-
is_training (bool): Flag indicating if the function is called during training (True or False).
|
511
|
-
Class (int): if is during training then which class(label) ? is isnt then put None.
|
512
|
-
# activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
|
513
|
-
|
514
|
-
Returns:
|
515
|
-
tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
|
516
|
-
or
|
517
|
-
num: neural network output
|
518
|
-
"""
|
519
|
-
|
520
|
-
Output = apply_activation(Input, activation_potentiation)
|
521
|
-
|
522
|
-
Input = Output
|
523
|
-
|
524
|
-
if is_training == True:
|
525
|
-
|
526
|
-
for _ in range(LTD):
|
527
|
-
|
528
|
-
depression_vector = np.random.rand(*Input.shape)
|
529
|
-
|
530
|
-
Input -= depression_vector
|
531
|
-
|
532
|
-
w[Class, :] = Input
|
533
|
-
return w
|
534
|
-
|
535
|
-
else:
|
536
|
-
|
537
|
-
neural_layer = np.dot(w, Input)
|
538
|
-
|
539
|
-
return neural_layer
|
372
|
+
display_visualizations_for_learner(viz_objects, best_weight, data, best_acc, fit_score, y_train, interval)
|
373
|
+
return best_weight, best_model[get_preds()], best_acc, final_activations
|
540
374
|
|
541
375
|
|
542
376
|
def evaluate(
|
543
|
-
x_test,
|
544
|
-
y_test,
|
545
|
-
W,
|
546
|
-
activation_potentiation=['linear']
|
547
|
-
loading_bar_status=True, # Optionally show loading bar.
|
548
|
-
show_metrics=None, # Optionally show metrics.
|
549
|
-
dtype=np.float32
|
377
|
+
x_test,
|
378
|
+
y_test,
|
379
|
+
W,
|
380
|
+
activation_potentiation=['linear']
|
550
381
|
) -> tuple:
|
551
382
|
"""
|
552
383
|
Evaluates the neural network model using the given test data.
|
553
384
|
|
554
385
|
Args:
|
555
|
-
x_test (np.ndarray): Test
|
556
|
-
|
557
|
-
y_test (np.ndarray): Test labels. one-hot encoded.
|
558
|
-
|
559
|
-
W (list[np.ndarray]): List of neural network weight matrices.
|
560
|
-
|
561
|
-
activation_potentiation (list): List of activation functions.
|
562
|
-
|
563
|
-
loading_bar_status (bool): Option to show a loading bar (optional).
|
564
|
-
|
565
|
-
show_metrics (bool): Option to show metrics (optional).
|
386
|
+
x_test (np.ndarray): Test data.
|
566
387
|
|
567
|
-
|
388
|
+
y_test (np.ndarray): Test labels (one-hot encoded).
|
568
389
|
|
390
|
+
W (np.ndarray): Neural net weight matrix.
|
391
|
+
|
392
|
+
activation_potentiation (list): Activation list. Default = ['linear'].
|
393
|
+
|
569
394
|
Returns:
|
570
|
-
tuple:
|
395
|
+
tuple: Model (list).
|
571
396
|
"""
|
572
|
-
# Pre-checks
|
573
|
-
|
574
|
-
x_test = x_test.astype(dtype, copy=False)
|
575
|
-
|
576
|
-
if len(y_test[0]) < 256:
|
577
|
-
if y_test.dtype != np.uint8:
|
578
|
-
y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
|
579
|
-
elif len(y_test[0]) <= 32767:
|
580
|
-
if y_test.dtype != np.uint16:
|
581
|
-
y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
|
582
|
-
else:
|
583
|
-
if y_test.dtype != np.uint32:
|
584
|
-
y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
|
585
|
-
|
586
|
-
predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=dtype)
|
587
|
-
real_classes = np.empty(len(x_test), dtype=y_test.dtype)
|
588
|
-
predict_classes = np.empty(len(x_test), dtype=y_test.dtype)
|
589
|
-
|
590
|
-
true_predict = 0
|
591
|
-
acc_list = np.empty(len(x_test), dtype=dtype)
|
592
|
-
|
593
|
-
if loading_bar_status:
|
594
|
-
loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
|
595
|
-
|
596
|
-
for inpIndex in range(len(x_test)):
|
597
|
-
Input = x_test[inpIndex].ravel()
|
598
|
-
|
599
|
-
neural_layer = Input
|
600
|
-
|
601
|
-
neural_layer = feed_forward(neural_layer, np.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
|
602
|
-
|
603
|
-
predict_probabilitys[inpIndex] = Softmax(neural_layer)
|
604
|
-
|
605
|
-
RealOutput = np.argmax(y_test[inpIndex])
|
606
|
-
real_classes[inpIndex] = RealOutput
|
607
|
-
PredictedOutput = np.argmax(neural_layer)
|
608
|
-
predict_classes[inpIndex] = PredictedOutput
|
609
|
-
|
610
|
-
if RealOutput == PredictedOutput:
|
611
|
-
true_predict += 1
|
612
|
-
|
613
|
-
acc = true_predict / (inpIndex + 1)
|
614
|
-
acc_list[inpIndex] = acc
|
615
|
-
|
616
|
-
if loading_bar_status:
|
617
|
-
loading_bar.update(1)
|
618
|
-
loading_bar.set_postfix({"Test Accuracy": acc})
|
619
|
-
|
620
|
-
if loading_bar_status:
|
621
|
-
loading_bar.close()
|
622
397
|
|
623
|
-
|
624
|
-
|
625
|
-
|
398
|
+
x_test = apply_activation(x_test, activation_potentiation)
|
399
|
+
result = x_test @ W.T
|
400
|
+
epsilon = 1e-10
|
401
|
+
accuracy = (np.argmax(result, axis=1) == np.argmax(y_test, axis=1)).mean()
|
402
|
+
softmax_preds = np.exp(result) / (np.sum(np.exp(result), axis=1, keepdims=True) + epsilon)
|
626
403
|
|
627
|
-
return W,
|
404
|
+
return W, None, accuracy, None, None, softmax_preds
|