pyerualjetwork 4.2.0b6__tar.gz → 4.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/PKG-INFO +2 -2
- pyerualjetwork-4.2.2/pyerualjetwork/__init__.py +11 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/data_operations.py +4 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/help.py +2 -1
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/memory_operations.py +4 -4
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/model_operations.py +2 -2
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/plan.py +62 -99
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/plan_cuda.py +68 -113
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/planeat.py +26 -25
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/planeat_cuda.py +22 -22
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork.egg-info/PKG-INFO +2 -2
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/setup.py +2 -2
- pyerualjetwork-4.2.0b6/pyerualjetwork/__init__.py +0 -61
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/README.md +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/activation_functions.py +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/activation_functions_cuda.py +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/data_operations_cuda.py +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/loss_functions.py +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/loss_functions_cuda.py +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/metrics.py +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/metrics_cuda.py +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/model_operations_cuda.py +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/ui.py +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/visualizations.py +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork/visualizations_cuda.py +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork.egg-info/SOURCES.txt +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/pyerualjetwork.egg-info/top_level.txt +0 -0
- {pyerualjetwork-4.2.0b6 → pyerualjetwork-4.2.2}/setup.cfg +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.2.
|
4
|
-
Summary: PyerualJetwork is a machine learning library written in Python for professionals
|
3
|
+
Version: 4.2.2
|
4
|
+
Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
7
7
|
Keywords: model evaluation,classification,potentiation learning artificial neural networks,NEAT,genetic algorithms,reinforcement learning,neural networks
|
@@ -0,0 +1,11 @@
|
|
1
|
+
__version__ = "4.2.2"
|
2
|
+
__update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
|
3
|
+
|
4
|
+
def print_version(__version__):
|
5
|
+
print(f"PyerualJetwork Version {__version__}" + '\n')
|
6
|
+
|
7
|
+
def print_update_notes(__update__):
|
8
|
+
print(f"Notes:\n{__update__}")
|
9
|
+
|
10
|
+
print_version(__version__)
|
11
|
+
print_update_notes(__update__)
|
@@ -66,9 +66,13 @@ def split(X, y, test_size, random_state=42, dtype=np.float32):
|
|
66
66
|
|
67
67
|
Args:
|
68
68
|
X (numpy.ndarray): Features data.
|
69
|
+
|
69
70
|
y (numpy.ndarray): Labels data.
|
71
|
+
|
70
72
|
test_size (float or int): Proportion or number of samples for the test subset.
|
73
|
+
|
71
74
|
random_state (int or None): Seed for random state. Default: 42.
|
75
|
+
|
72
76
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
|
73
77
|
|
74
78
|
Returns:
|
@@ -34,13 +34,13 @@ def transfer_to_cpu(x, dtype=np.float32):
|
|
34
34
|
The `transfer_to_cpu` function converts data to a specified data type on the CPU, handling memory constraints
|
35
35
|
by batching the conversion process and ensuring complete GPU memory cleanup.
|
36
36
|
|
37
|
-
|
37
|
+
param x: Input data to transfer to CPU (CuPy array)
|
38
38
|
|
39
|
-
|
39
|
+
param dtype: Target NumPy dtype for the output array (default: np.float32)
|
40
40
|
|
41
|
-
|
41
|
+
return: NumPy array with the specified dtype
|
42
42
|
"""
|
43
|
-
from ui import loading_bars, initialize_loading_bar
|
43
|
+
from .ui import loading_bars, initialize_loading_bar
|
44
44
|
try:
|
45
45
|
if isinstance(x, np.ndarray):
|
46
46
|
return x.astype(dtype) if x.dtype != dtype else x
|
@@ -334,8 +334,8 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
334
334
|
ndarray: Output from the model.
|
335
335
|
"""
|
336
336
|
|
337
|
-
from
|
338
|
-
from
|
337
|
+
from data_operations import standard_scaler
|
338
|
+
from plan import feed_forward
|
339
339
|
|
340
340
|
Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
|
341
341
|
|
@@ -16,7 +16,6 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
16
16
|
"""
|
17
17
|
|
18
18
|
import numpy as np
|
19
|
-
from colorama import Fore
|
20
19
|
import math
|
21
20
|
|
22
21
|
### LIBRARY IMPORTS ###
|
@@ -174,11 +173,11 @@ def fit(
|
|
174
173
|
return normalization(LTPW, dtype=dtype)
|
175
174
|
|
176
175
|
|
177
|
-
def learner(x_train, y_train, optimizer,
|
176
|
+
def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
|
178
177
|
neural_web_history=False, show_current_activations=False, auto_normalization=True,
|
179
178
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
180
|
-
interval=33.33, target_acc=None, target_loss=None,
|
181
|
-
|
179
|
+
interval=33.33, target_acc=None, target_loss=None,
|
180
|
+
start_this_act=None, start_this_W=None, dtype=np.float32):
|
182
181
|
"""
|
183
182
|
Optimizes the activation functions for a neural network by leveraging train data to find
|
184
183
|
the most accurate combination of activation potentiation for the given dataset using genetic algorithm NEAT (Neuroevolution of Augmenting Topologies). But modifided for PLAN version. Created by me: PLANEAT.
|
@@ -195,34 +194,34 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
195
194
|
|
196
195
|
optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
197
196
|
```python
|
198
|
-
genetic_optimizer = lambda *args, **kwargs: planeat.
|
197
|
+
genetic_optimizer = lambda *args, **kwargs: planeat.evolver(*args,
|
199
198
|
activation_add_prob=0.85,
|
200
|
-
mutations=
|
201
|
-
strategy='
|
199
|
+
mutations=True,
|
200
|
+
strategy='aggressive',
|
202
201
|
**kwargs)
|
203
202
|
|
204
203
|
model = plan.learner(x_train,
|
205
204
|
y_train,
|
206
205
|
optimizer=genetic_optimizer,
|
206
|
+
fit_start=True,
|
207
207
|
strategy='accuracy',
|
208
208
|
show_history=True,
|
209
|
-
|
209
|
+
gen=15,
|
210
|
+
batch_size=0.05,
|
210
211
|
interval=16.67)
|
211
212
|
```
|
212
213
|
|
213
|
-
|
214
|
-
|
215
|
-
y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
|
214
|
+
fit_start (bool): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. The fit_start parameter is MANDATORY and must be provided.
|
216
215
|
|
217
216
|
strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
|
218
217
|
|
219
218
|
gen (int, optional): The generation count for genetic optimization.
|
220
219
|
|
221
|
-
batch_size (float, optional): Batch size is used in the prediction process to receive
|
220
|
+
batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
|
222
221
|
|
223
|
-
|
222
|
+
early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
|
224
223
|
|
225
|
-
|
224
|
+
auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
|
226
225
|
|
227
226
|
show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
|
228
227
|
|
@@ -235,10 +234,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
235
234
|
target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
|
236
235
|
|
237
236
|
target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
|
238
|
-
|
239
|
-
except_this (list, optional): A list of activations to exclude from optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
|
240
|
-
|
241
|
-
only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
|
242
237
|
|
243
238
|
start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
|
244
239
|
|
@@ -247,8 +242,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
247
242
|
neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
|
248
243
|
|
249
244
|
neural_web_history (bool, optional): Draws history of neural web. Default is False.
|
250
|
-
|
251
|
-
target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
|
252
245
|
|
253
246
|
dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
254
247
|
|
@@ -257,37 +250,25 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
257
250
|
|
258
251
|
"""
|
259
252
|
|
260
|
-
|
253
|
+
from .planeat import define_genomes
|
261
254
|
|
262
|
-
|
255
|
+
data = 'Train'
|
263
256
|
|
257
|
+
activation_potentiation = all_activations()
|
258
|
+
activation_potentiation_len = len(activation_potentiation)
|
259
|
+
|
264
260
|
# Pre-checks
|
265
261
|
|
266
262
|
x_train = x_train.astype(dtype, copy=False)
|
267
263
|
y_train = optimize_labels(y_train, cuda=False)
|
268
264
|
|
269
|
-
if x_test is None and y_test is None:
|
270
|
-
x_test = x_train
|
271
|
-
y_test = y_train
|
272
|
-
data = 'Train'
|
273
|
-
else:
|
274
|
-
x_test = x_test.astype(dtype, copy=False)
|
275
|
-
y_test = optimize_labels(y_test, cuda=False)
|
276
|
-
data = 'Test'
|
277
|
-
|
278
|
-
# Filter activation functions
|
279
|
-
if only_this is not None:
|
280
|
-
activation_potentiation = only_this
|
281
|
-
if except_this is not None:
|
282
|
-
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
283
265
|
if gen is None:
|
284
|
-
gen =
|
266
|
+
gen = activation_potentiation_len
|
285
267
|
|
286
268
|
if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
|
269
|
+
if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
|
270
|
+
if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
|
287
271
|
|
288
|
-
if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
289
|
-
if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
290
|
-
|
291
272
|
# Initialize visualization components
|
292
273
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
293
274
|
|
@@ -298,33 +279,27 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
298
279
|
ncols = 89
|
299
280
|
|
300
281
|
# Initialize variables
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
best_acc = 0
|
306
|
-
else:
|
307
|
-
act_pop.append(start_this_act)
|
308
|
-
weight_pop.append(start_this_W)
|
309
|
-
|
310
|
-
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
311
|
-
|
312
|
-
model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype)
|
313
|
-
|
314
|
-
if loss == 'categorical_crossentropy':
|
315
|
-
test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
316
|
-
else:
|
317
|
-
test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
|
318
|
-
|
319
|
-
best_acc = model[get_acc()]
|
320
|
-
best_loss = test_loss
|
321
|
-
|
282
|
+
best_acc = 0
|
283
|
+
best_f1 = 0
|
284
|
+
best_recall = 0
|
285
|
+
best_precision = 0
|
322
286
|
best_acc_per_gen_list = []
|
323
287
|
postfix_dict = {}
|
324
288
|
loss_list = []
|
325
289
|
target_pop = []
|
326
290
|
|
327
|
-
progress = initialize_loading_bar(total=
|
291
|
+
progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
|
292
|
+
|
293
|
+
if fit_start is False:
|
294
|
+
weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len, dtype=dtype)
|
295
|
+
|
296
|
+
if start_this_act is not None and start_this_W is not None:
|
297
|
+
weight_pop[0] = start_this_W
|
298
|
+
act_pop[0] = start_this_act
|
299
|
+
|
300
|
+
else:
|
301
|
+
weight_pop = []
|
302
|
+
act_pop = []
|
328
303
|
|
329
304
|
for i in range(gen):
|
330
305
|
postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
|
@@ -334,22 +309,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
334
309
|
progress.last_print_n = 0
|
335
310
|
progress.update(0)
|
336
311
|
|
337
|
-
for j in range(
|
312
|
+
for j in range(activation_potentiation_len):
|
338
313
|
|
339
|
-
|
314
|
+
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
340
315
|
|
341
|
-
if i == 0:
|
316
|
+
if fit_start is True and i == 0:
|
342
317
|
act_pop.append(activation_potentiation[j])
|
343
|
-
W = fit(
|
318
|
+
W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
344
319
|
weight_pop.append(W)
|
345
|
-
|
346
|
-
model = evaluate(
|
320
|
+
|
321
|
+
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
|
347
322
|
acc = model[get_acc()]
|
348
323
|
|
349
324
|
if strategy == 'accuracy': target_pop.append(acc)
|
350
325
|
|
351
326
|
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
352
|
-
precision_score, recall_score, f1_score = metrics(
|
327
|
+
precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
|
353
328
|
|
354
329
|
if strategy == 'precision':
|
355
330
|
target_pop.append(precision_score)
|
@@ -391,22 +366,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
391
366
|
print(f", Current Activations={final_activations}", end='')
|
392
367
|
|
393
368
|
if loss == 'categorical_crossentropy':
|
394
|
-
|
369
|
+
train_loss = categorical_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
|
395
370
|
else:
|
396
|
-
|
371
|
+
train_loss = binary_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
|
397
372
|
|
398
373
|
if batch_size == 1:
|
399
|
-
postfix_dict[f"{data} Loss"] =
|
400
|
-
best_loss =
|
374
|
+
postfix_dict[f"{data} Loss"] = train_loss
|
375
|
+
best_loss = train_loss
|
401
376
|
else:
|
402
|
-
postfix_dict[f"{data} Batch Loss"] =
|
377
|
+
postfix_dict[f"{data} Batch Loss"] = train_loss
|
403
378
|
progress.set_postfix(postfix_dict)
|
404
|
-
best_loss =
|
379
|
+
best_loss = train_loss
|
405
380
|
|
406
381
|
# Update visualizations during training
|
407
382
|
if show_history:
|
408
383
|
gen_list = range(1, len(best_acc_per_gen_list) + 2)
|
409
|
-
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [
|
384
|
+
update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
|
410
385
|
best_acc_per_gen_list + [best_acc], x_train, final_activations)
|
411
386
|
|
412
387
|
if neurons_history:
|
@@ -415,7 +390,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
415
390
|
viz_objects['neurons']['row'], viz_objects['neurons']['col'],
|
416
391
|
y_train[0], viz_objects['neurons']['artists'],
|
417
392
|
data=data, fig1=viz_objects['neurons']['fig'],
|
418
|
-
acc=best_acc, loss=
|
393
|
+
acc=best_acc, loss=train_loss)
|
419
394
|
)
|
420
395
|
|
421
396
|
if neural_web_history:
|
@@ -440,13 +415,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
440
415
|
print('\nActivations: ', final_activations)
|
441
416
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
442
417
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
443
|
-
if data == 'Test':
|
444
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
445
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
446
418
|
|
447
419
|
# Display final visualizations
|
448
420
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
449
|
-
|
421
|
+
train_loss, y_train, interval)
|
450
422
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
451
423
|
|
452
424
|
# Check target loss
|
@@ -465,13 +437,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
465
437
|
print('\nActivations: ', final_activations)
|
466
438
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
467
439
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
468
|
-
if data == 'Test':
|
469
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
470
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
471
440
|
|
472
441
|
# Display final visualizations
|
473
442
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
474
|
-
|
443
|
+
train_loss, y_train, interval)
|
475
444
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
476
445
|
|
477
446
|
progress.update(1)
|
@@ -479,7 +448,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
479
448
|
best_acc_per_gen_list.append(best_acc)
|
480
449
|
loss_list.append(best_loss)
|
481
450
|
|
482
|
-
weight_pop, act_pop = optimizer(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False),
|
451
|
+
weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
|
483
452
|
target_pop = []
|
484
453
|
|
485
454
|
# Early stopping check
|
@@ -499,13 +468,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
499
468
|
print('\nActivations: ', final_activations)
|
500
469
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
501
470
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
502
|
-
if data == 'Test':
|
503
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
504
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
505
471
|
|
506
472
|
# Display final visualizations
|
507
473
|
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
508
|
-
|
474
|
+
train_loss, y_train, interval)
|
509
475
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
510
476
|
|
511
477
|
# Final evaluation
|
@@ -518,15 +484,12 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
518
484
|
else:
|
519
485
|
train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
|
520
486
|
|
521
|
-
print('\nActivations: ',
|
487
|
+
print('\nActivations: ', final_activations)
|
522
488
|
print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
|
523
489
|
print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
|
524
|
-
if data == 'Test':
|
525
|
-
print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
|
526
|
-
print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
|
527
490
|
|
528
491
|
# Display final visualizations
|
529
|
-
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
|
492
|
+
display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
|
530
493
|
return best_weights, best_model[get_preds()], best_acc, final_activations
|
531
494
|
|
532
495
|
|
@@ -621,12 +584,12 @@ def evaluate(
|
|
621
584
|
if y_test.dtype != np.uint32:
|
622
585
|
y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
|
623
586
|
|
624
|
-
predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=
|
625
|
-
real_classes = np.empty(len(x_test), dtype=
|
626
|
-
predict_classes = np.empty(len(x_test), dtype=
|
587
|
+
predict_probabilitys = np.empty((len(x_test), W.shape[0]), dtype=dtype)
|
588
|
+
real_classes = np.empty(len(x_test), dtype=y_test.dtype)
|
589
|
+
predict_classes = np.empty(len(x_test), dtype=y_test.dtype)
|
627
590
|
|
628
591
|
true_predict = 0
|
629
|
-
acc_list = np.empty(len(x_test), dtype=
|
592
|
+
acc_list = np.empty(len(x_test), dtype=dtype)
|
630
593
|
|
631
594
|
if loading_bar_status:
|
632
595
|
loading_bar = initialize_loading_bar(total=len(x_test), ncols=64, desc='Testing', bar_format=bar_format_normal)
|