pyerualjetwork 4.1.8__py3-none-any.whl → 4.1.8b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/plan.py +62 -33
- pyerualjetwork/plan_cuda.py +41 -34
- pyerualjetwork/planeat.py +1 -1
- pyerualjetwork/planeat_cuda.py +9 -7
- pyerualjetwork/visualizations_cuda.py +1 -1
- {pyerualjetwork-4.1.8.dist-info → pyerualjetwork-4.1.8b0.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.1.8.dist-info → pyerualjetwork-4.1.8b0.dist-info}/RECORD +10 -10
- {pyerualjetwork-4.1.8.dist-info → pyerualjetwork-4.1.8b0.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.1.8.dist-info → pyerualjetwork-4.1.8b0.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -48,7 +48,7 @@ for package_name in package_names:
|
|
48
48
|
|
49
49
|
print(f"PyerualJetwork is ready to use with {err} errors")
|
50
50
|
|
51
|
-
__version__ = "4.1.
|
51
|
+
__version__ = "4.1.8b0"
|
52
52
|
__update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
|
53
53
|
|
54
54
|
def print_version(__version__):
|
pyerualjetwork/plan.py
CHANGED
@@ -24,7 +24,6 @@ from .loss_functions import binary_crossentropy, categorical_crossentropy
|
|
24
24
|
from .activation_functions import apply_activation, Softmax, all_activations
|
25
25
|
from .metrics import metrics
|
26
26
|
from .model_operations import get_acc, get_preds, get_preds_softmax
|
27
|
-
from .memory_operations import optimize_labels
|
28
27
|
from .visualizations import (
|
29
28
|
draw_neural_web,
|
30
29
|
update_neural_web_for_fit,
|
@@ -103,6 +102,8 @@ def fit(
|
|
103
102
|
Returns:
|
104
103
|
numpyarray([num]): (Weight matrix).
|
105
104
|
"""
|
105
|
+
|
106
|
+
from model_operations import get_acc
|
106
107
|
|
107
108
|
# Pre-checks
|
108
109
|
|
@@ -172,7 +173,7 @@ def fit(
|
|
172
173
|
return normalization(LTPW, dtype=dtype)
|
173
174
|
|
174
175
|
|
175
|
-
def learner(x_train, y_train,
|
176
|
+
def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
|
176
177
|
neural_web_history=False, show_current_activations=False, auto_normalization=True,
|
177
178
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
178
179
|
interval=33.33, target_acc=None, target_loss=None, except_this=None,
|
@@ -191,9 +192,9 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
191
192
|
|
192
193
|
y_train (array-like): Labels for training data. one-hot encoded.
|
193
194
|
|
194
|
-
|
195
|
+
optimizator (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizator = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
195
196
|
```python
|
196
|
-
|
197
|
+
genetic_optimizator = lambda *args, **kwargs: planeat.evolve(*args,
|
197
198
|
activation_add_prob=0.85,
|
198
199
|
mutations=False,
|
199
200
|
strategy='cross_over',
|
@@ -201,7 +202,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
201
202
|
|
202
203
|
model = plan.learner(x_train,
|
203
204
|
y_train,
|
204
|
-
|
205
|
+
optimizator=genetic_optimizator,
|
205
206
|
strategy='accuracy',
|
206
207
|
show_history=True,
|
207
208
|
target_acc=0.94,
|
@@ -255,37 +256,52 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
255
256
|
|
256
257
|
"""
|
257
258
|
|
258
|
-
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'
|
259
|
+
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
|
259
260
|
|
260
261
|
activation_potentiation = all_activations()
|
261
262
|
|
262
263
|
# Pre-checks
|
263
264
|
|
264
265
|
x_train = x_train.astype(dtype, copy=False)
|
265
|
-
|
266
|
+
|
267
|
+
if len(y_train[0]) < 256:
|
268
|
+
if y_train.dtype != np.uint8:
|
269
|
+
y_train = np.array(y_train, copy=False).astype(np.uint8, copy=False)
|
270
|
+
elif len(y_train[0]) <= 32767:
|
271
|
+
if y_train.dtype != np.uint16:
|
272
|
+
y_train = np.array(y_train, copy=False).astype(np.uint16, copy=False)
|
273
|
+
else:
|
274
|
+
if y_train.dtype != np.uint32:
|
275
|
+
y_train = np.array(y_train, copy=False).astype(np.uint32, copy=False)
|
276
|
+
|
277
|
+
if x_test is not None:
|
278
|
+
x_test = x_test.astype(dtype, copy=False)
|
279
|
+
|
280
|
+
if len(y_test[0]) < 256:
|
281
|
+
if y_test.dtype != np.uint8:
|
282
|
+
y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
|
283
|
+
elif len(y_test[0]) <= 32767:
|
284
|
+
if y_test.dtype != np.uint16:
|
285
|
+
y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
|
286
|
+
else:
|
287
|
+
if y_test.dtype != np.uint32:
|
288
|
+
y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
|
266
289
|
|
267
290
|
if x_test is None and y_test is None:
|
268
291
|
x_test = x_train
|
269
292
|
y_test = y_train
|
270
293
|
data = 'Train'
|
271
294
|
else:
|
272
|
-
x_test = x_test.astype(dtype, copy=False)
|
273
|
-
y_test = optimize_labels(y_test, cuda=False)
|
274
295
|
data = 'Test'
|
275
296
|
|
276
297
|
# Filter activation functions
|
277
|
-
if only_this
|
298
|
+
if only_this != None:
|
278
299
|
activation_potentiation = only_this
|
279
|
-
if except_this
|
300
|
+
if except_this != None:
|
280
301
|
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
281
302
|
if gen is None:
|
282
303
|
gen = len(activation_potentiation)
|
283
304
|
|
284
|
-
if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
|
285
|
-
|
286
|
-
if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
287
|
-
if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
288
|
-
|
289
305
|
# Initialize visualization components
|
290
306
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
291
307
|
|
@@ -297,14 +313,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
297
313
|
|
298
314
|
# Initialize variables
|
299
315
|
act_pop = []
|
300
|
-
weight_pop = []
|
301
|
-
|
302
316
|
if start_this_act is None and start_this_W is None:
|
303
317
|
best_acc = 0
|
304
318
|
else:
|
305
319
|
act_pop.append(start_this_act)
|
306
|
-
weight_pop.append(start_this_W)
|
307
|
-
|
308
320
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
309
321
|
|
310
322
|
model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype)
|
@@ -320,8 +332,33 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
320
332
|
best_acc_per_gen_list = []
|
321
333
|
postfix_dict = {}
|
322
334
|
loss_list = []
|
335
|
+
act_pop = []
|
336
|
+
weight_pop = []
|
323
337
|
target_pop = []
|
324
338
|
|
339
|
+
for i in range(len(activation_potentiation)):
|
340
|
+
print(f"\rPre-Run {i}/{len(activation_potentiation)}",end='')
|
341
|
+
|
342
|
+
if i == 0 and start_this_act is not None:
|
343
|
+
act_pop[0] = activation_potentiation[i]
|
344
|
+
weight_pop[0] = W
|
345
|
+
else:
|
346
|
+
act_pop.append(activation_potentiation[i])
|
347
|
+
weight_pop.append(W)
|
348
|
+
|
349
|
+
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
350
|
+
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
351
|
+
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=act_pop[-1], dtype=dtype)
|
352
|
+
|
353
|
+
if strategy == 'accuracy': target_pop.append(model[get_acc()])
|
354
|
+
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
355
|
+
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
356
|
+
|
357
|
+
if strategy == 'precision': target_pop.append(precision_score)
|
358
|
+
if strategy == 'recall': target_pop.append(recall_score)
|
359
|
+
if strategy == 'f1': target_pop.append(f1_score)
|
360
|
+
|
361
|
+
weight_pop, act_pop = optimizator(np.array(weight_pop, dtype=dtype), act_pop, 0, np.array(target_pop, dtype=dtype), target_fitness=target_fitness, bar_status=False)
|
325
362
|
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
326
363
|
|
327
364
|
for i in range(gen):
|
@@ -331,19 +368,16 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
331
368
|
progress.n = 0
|
332
369
|
progress.last_print_n = 0
|
333
370
|
progress.update(0)
|
371
|
+
|
372
|
+
weight_pop, act_pop = optimizator(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
373
|
+
target_pop = []
|
334
374
|
|
335
375
|
for j in range(len(activation_potentiation)):
|
336
376
|
|
337
377
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
338
|
-
|
339
|
-
if i == 0:
|
340
|
-
act_pop.append(activation_potentiation[j])
|
341
|
-
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
342
|
-
weight_pop.append(W)
|
343
|
-
|
344
378
|
model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
|
379
|
+
|
345
380
|
acc = model[get_acc()]
|
346
|
-
|
347
381
|
if strategy == 'accuracy': target_pop.append(acc)
|
348
382
|
|
349
383
|
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
@@ -369,7 +403,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
369
403
|
|
370
404
|
if ((strategy == 'accuracy' and acc >= best_acc) or
|
371
405
|
(strategy == 'f1' and f1_score >= best_f1) or
|
372
|
-
(strategy == 'precision' and precision_score >= best_precision) or
|
406
|
+
(strategy == 'precision' and precision_score >= best_precision) or
|
373
407
|
(strategy == 'recall' and recall_score >= best_recall)):
|
374
408
|
|
375
409
|
best_acc = acc
|
@@ -377,8 +411,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
377
411
|
final_activations = act_pop[j]
|
378
412
|
best_model = model
|
379
413
|
|
380
|
-
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
381
|
-
|
382
414
|
if batch_size == 1:
|
383
415
|
postfix_dict[f"{data} Accuracy"] = best_acc
|
384
416
|
else:
|
@@ -477,9 +509,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
477
509
|
best_acc_per_gen_list.append(best_acc)
|
478
510
|
loss_list.append(best_loss)
|
479
511
|
|
480
|
-
weight_pop, act_pop = optimizer(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
481
|
-
target_pop = []
|
482
|
-
|
483
512
|
# Early stopping check
|
484
513
|
if early_stop == True and i > 0:
|
485
514
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
pyerualjetwork/plan_cuda.py
CHANGED
@@ -14,7 +14,6 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
14
14
|
"""
|
15
15
|
|
16
16
|
import cupy as cp
|
17
|
-
import numpy as np
|
18
17
|
from colorama import Fore
|
19
18
|
import math
|
20
19
|
|
@@ -25,7 +24,7 @@ from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
|
|
25
24
|
from .activation_functions_cuda import apply_activation, Softmax, all_activations
|
26
25
|
from .metrics_cuda import metrics
|
27
26
|
from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
|
28
|
-
from .memory_operations import transfer_to_gpu, transfer_to_cpu
|
27
|
+
from .memory_operations import transfer_to_gpu, transfer_to_cpu
|
29
28
|
from .visualizations_cuda import (
|
30
29
|
draw_neural_web,
|
31
30
|
update_neural_web_for_fit,
|
@@ -108,6 +107,8 @@ def fit(
|
|
108
107
|
numpyarray([num]): (Weight matrix).
|
109
108
|
"""
|
110
109
|
# Pre-checks
|
110
|
+
|
111
|
+
from memory_operations import transfer_to_gpu, transfer_to_cpu
|
111
112
|
|
112
113
|
if train_bar and val:
|
113
114
|
train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
|
@@ -188,7 +189,7 @@ def fit(
|
|
188
189
|
return normalization(LTPW, dtype=dtype)
|
189
190
|
|
190
191
|
|
191
|
-
def learner(x_train, y_train,
|
192
|
+
def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
|
192
193
|
neural_web_history=False, show_current_activations=False, auto_normalization=True,
|
193
194
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
194
195
|
interval=33.33, target_acc=None, target_loss=None, except_this=None,
|
@@ -207,9 +208,9 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
207
208
|
|
208
209
|
y_train (array-like): Labels for training data.
|
209
210
|
|
210
|
-
|
211
|
+
optimizator (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizator = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
211
212
|
```python
|
212
|
-
|
213
|
+
genetic_optimizator = lambda *args, **kwargs: planeat_cuda.evolve(*args,
|
213
214
|
activation_add_prob=0.85,
|
214
215
|
mutations=False,
|
215
216
|
strategy='cross_over',
|
@@ -217,7 +218,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
217
218
|
|
218
219
|
model = plan_cuda.learner(x_train,
|
219
220
|
y_train,
|
220
|
-
|
221
|
+
optimizator=genetic_optimizator,
|
221
222
|
strategy='accuracy',
|
222
223
|
show_history=True,
|
223
224
|
target_acc=0.94,
|
@@ -273,19 +274,16 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
273
274
|
|
274
275
|
"""
|
275
276
|
|
276
|
-
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'
|
277
|
+
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
|
277
278
|
|
278
279
|
activation_potentiation = all_activations()
|
279
280
|
|
280
|
-
y_train = optimize_labels(y_train, cuda=True)
|
281
|
-
|
282
281
|
if x_test is None and y_test is None:
|
283
282
|
x_test = x_train
|
284
283
|
y_test = y_train
|
285
284
|
data = 'Train'
|
286
285
|
else:
|
287
286
|
data = 'Test'
|
288
|
-
y_test = optimize_labels(y_test, cuda=True)
|
289
287
|
|
290
288
|
if memory == 'gpu':
|
291
289
|
x_train = transfer_to_gpu(x_train, dtype=dtype)
|
@@ -308,39 +306,31 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
308
306
|
else:
|
309
307
|
raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
|
310
308
|
|
311
|
-
if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
|
312
|
-
|
313
309
|
# Filter activation functions
|
314
|
-
if only_this
|
310
|
+
if only_this != None:
|
315
311
|
activation_potentiation = only_this
|
316
|
-
if except_this
|
312
|
+
if except_this != None:
|
317
313
|
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
318
314
|
if gen is None:
|
319
315
|
gen = len(activation_potentiation)
|
320
316
|
|
321
|
-
if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
322
|
-
if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
323
|
-
|
324
317
|
# Initialize visualization components
|
325
318
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
326
319
|
|
327
320
|
# Initialize progress bar
|
328
321
|
if batch_size == 1:
|
329
|
-
ncols =
|
322
|
+
ncols = 76
|
330
323
|
else:
|
331
|
-
ncols =
|
324
|
+
ncols = 89
|
332
325
|
|
333
326
|
# Initialize variables
|
334
327
|
act_pop = []
|
335
|
-
weight_pop = []
|
336
|
-
|
337
328
|
if start_this_act is None and start_this_W is None:
|
338
329
|
best_acc = 0
|
339
330
|
else:
|
340
331
|
act_pop.append(start_this_act)
|
341
|
-
weight_pop.append(start_this_W)
|
342
|
-
|
343
332
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
333
|
+
|
344
334
|
model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype, memory=memory)
|
345
335
|
|
346
336
|
if loss == 'categorical_crossentropy':
|
@@ -354,8 +344,35 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
354
344
|
best_acc_per_gen_list = []
|
355
345
|
postfix_dict = {}
|
356
346
|
loss_list = []
|
347
|
+
act_pop = []
|
348
|
+
weight_pop = []
|
357
349
|
target_pop = []
|
358
350
|
|
351
|
+
for i in range(len(activation_potentiation)):
|
352
|
+
print(f"\rPre-Run {i}/{len(activation_potentiation)}",end='')
|
353
|
+
|
354
|
+
if i == 0 and start_this_act is not None:
|
355
|
+
act_pop[0] = activation_potentiation[i]
|
356
|
+
weight_pop[0] = W
|
357
|
+
else:
|
358
|
+
act_pop.append(activation_potentiation[i])
|
359
|
+
weight_pop.append(W)
|
360
|
+
|
361
|
+
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
362
|
+
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype, memory=memory)
|
363
|
+
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=act_pop[-1], dtype=dtype, memory=memory)
|
364
|
+
|
365
|
+
if strategy == 'accuracy': target_pop.append(model[get_acc()])
|
366
|
+
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
367
|
+
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
368
|
+
|
369
|
+
if strategy == 'precision': target_pop.append(precision_score)
|
370
|
+
if strategy == 'recall': target_pop.append(recall_score)
|
371
|
+
if strategy == 'f1': target_pop.append(f1_score)
|
372
|
+
|
373
|
+
|
374
|
+
|
375
|
+
weight_pop, act_pop = optimizator(cp.array(weight_pop, dtype=dtype), act_pop, 0, cp.array(target_pop, dtype=dtype), target_fitness=target_fitness, bar_status=False)
|
359
376
|
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
360
377
|
|
361
378
|
for i in range(gen):
|
@@ -369,12 +386,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
369
386
|
for j in range(len(activation_potentiation)):
|
370
387
|
|
371
388
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
372
|
-
|
373
|
-
if i == 0:
|
374
|
-
act_pop.append(activation_potentiation[j])
|
375
|
-
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
376
|
-
weight_pop.append(W)
|
377
|
-
|
378
389
|
model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
|
379
390
|
|
380
391
|
acc = model[get_acc()]
|
@@ -411,8 +422,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
411
422
|
final_activations = act_pop[j]
|
412
423
|
best_model = model
|
413
424
|
|
414
|
-
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
415
|
-
|
416
425
|
if batch_size == 1:
|
417
426
|
postfix_dict[f"{data} Accuracy"] = best_acc
|
418
427
|
else:
|
@@ -510,9 +519,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
510
519
|
|
511
520
|
best_acc_per_gen_list.append(best_acc)
|
512
521
|
loss_list.append(best_loss)
|
513
|
-
|
514
|
-
weight_pop, act_pop = optimizer(cp.array(weight_pop, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
515
|
-
target_pop = []
|
516
522
|
|
517
523
|
# Early stopping check
|
518
524
|
if early_stop == True and i > 0:
|
@@ -642,6 +648,7 @@ def evaluate(
|
|
642
648
|
Returns:
|
643
649
|
tuple: Model (list).
|
644
650
|
"""
|
651
|
+
from memory_operations import transfer_to_cpu, transfer_to_gpu
|
645
652
|
|
646
653
|
if memory == 'gpu':
|
647
654
|
x_test = transfer_to_gpu(x_test, dtype=dtype)
|
pyerualjetwork/planeat.py
CHANGED
@@ -185,7 +185,7 @@ Notes:
|
|
185
185
|
|
186
186
|
Example:
|
187
187
|
```python
|
188
|
-
weights, activation_potentiations =
|
188
|
+
weights, activation_potentiations = learner(weights, activation_potentiations, 1, fitness, info=True, strategy='cross_over', policy='normal_selective')
|
189
189
|
```
|
190
190
|
|
191
191
|
- The function returns the updated weights and activations after processing based on the chosen strategy, policy, and mutation parameters.
|
pyerualjetwork/planeat_cuda.py
CHANGED
@@ -186,7 +186,7 @@ Notes:
|
|
186
186
|
|
187
187
|
Example:
|
188
188
|
```python
|
189
|
-
weights, activation_potentiations =
|
189
|
+
weights, activation_potentiations = learner(weights, activation_potentiations, 1, fitness, info=True, strategy='cross_over', policy='normal_selective')
|
190
190
|
```
|
191
191
|
|
192
192
|
- The function returns the updated weights and activations after processing based on the chosen strategy, policy, and mutation parameters.
|
@@ -230,15 +230,17 @@ Example:
|
|
230
230
|
else:
|
231
231
|
raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
|
232
232
|
|
233
|
+
sort_indices = cp.argsort(fitness)
|
234
|
+
|
233
235
|
### FITNESS LIST IS SORTED IN ASCENDING (OR DESCENDING) ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
|
234
236
|
|
235
|
-
if target_fitness == 'max': sort_indices =
|
236
|
-
elif target_fitness == 'min': sort_indices =
|
237
|
+
if target_fitness == 'max': sort_indices = np.argsort(fitness)
|
238
|
+
elif target_fitness == 'min': sort_indices = np.argsort(-fitness)
|
237
239
|
|
238
240
|
fitness = fitness[sort_indices]
|
239
241
|
weights = weights[sort_indices]
|
240
242
|
|
241
|
-
activation_potentiations = [activation_potentiations[
|
243
|
+
activation_potentiations = [activation_potentiations[i] for i in sort_indices]
|
242
244
|
|
243
245
|
### GENOMES ARE DIVIDED INTO TWO GROUPS: GOOD GENOMES AND BAD GENOMES:
|
244
246
|
|
@@ -353,9 +355,9 @@ Example:
|
|
353
355
|
print(" ACTIVATION SELECTION RATE (THRESHOLD VALUE FOR SINGLE CROSS OVER):", str(activation_selection_rate) + '\n')
|
354
356
|
|
355
357
|
print("*** Performance ***")
|
356
|
-
print("
|
357
|
-
print(" MEAN REWARD: ", str(
|
358
|
-
print(" MIN REWARD: ", str(
|
358
|
+
print(" MAX REWARD: ", str(round(max(fitness), 2)))
|
359
|
+
print(" MEAN REWARD: ", str(round(cp.mean(fitness), 2)))
|
360
|
+
print(" MIN REWARD: ", str(round(min(fitness), 2)) + '\n')
|
359
361
|
|
360
362
|
print(" BEST GENOME INDEX: ", str(len(weights)-1))
|
361
363
|
print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
|
@@ -358,7 +358,7 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
|
|
358
358
|
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
359
359
|
|
360
360
|
roc_auc = cp.trapz(tpr, fpr)
|
361
|
-
axs[1, 0].plot(fpr
|
361
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
362
362
|
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
363
363
|
axs[1, 0].set_xlim([0.0, 1.0])
|
364
364
|
axs[1, 0].set_ylim([0.0, 1.05])
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.1.
|
3
|
+
Version: 4.1.8b0
|
4
4
|
Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,4 +1,4 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=WB6frZ8e1IWyGHYt3RE7bsg4yn7Ozrudnf6rk60_H3o,2177
|
2
2
|
pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
|
3
3
|
pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
|
4
4
|
pyerualjetwork/data_operations.py,sha256=ZM24BuPsIAtI0a_Exr4HgCjmlb285wEeO8juFY9sJr0,14680
|
@@ -11,14 +11,14 @@ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,607
|
|
11
11
|
pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
|
12
12
|
pyerualjetwork/model_operations.py,sha256=hnhR8dtoICNJWIwGgJ65-LN3GYN_DYH4LMe6YpZVbnI,12967
|
13
13
|
pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
|
14
|
-
pyerualjetwork/plan.py,sha256=
|
15
|
-
pyerualjetwork/plan_cuda.py,sha256=
|
16
|
-
pyerualjetwork/planeat.py,sha256=
|
17
|
-
pyerualjetwork/planeat_cuda.py,sha256=
|
14
|
+
pyerualjetwork/plan.py,sha256=jE3xQUf2h8Vi0GshBktpn4Tyn3IiyJOo2GMZOTPsJQA,35359
|
15
|
+
pyerualjetwork/plan_cuda.py,sha256=xdD6CFYiyta93L8VWA9NGXcXjzFzvNYr29XSZ-9wZiQ,35996
|
16
|
+
pyerualjetwork/planeat.py,sha256=pVp8ndi5E_muwOTFmlcav70-5LLV5A2yA0_SgURvT08,40236
|
17
|
+
pyerualjetwork/planeat_cuda.py,sha256=3Vt5_zHUK4Jt_vW6LugQOy3to8gzQfT0_poPxeJTy68,40253
|
18
18
|
pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
|
19
19
|
pyerualjetwork/visualizations.py,sha256=QaYSIyVkJZ8NqpBKArQKkI1y37nCQo_KIM98IMssnRc,28766
|
20
|
-
pyerualjetwork/visualizations_cuda.py,sha256=
|
21
|
-
pyerualjetwork-4.1.
|
22
|
-
pyerualjetwork-4.1.
|
23
|
-
pyerualjetwork-4.1.
|
24
|
-
pyerualjetwork-4.1.
|
20
|
+
pyerualjetwork/visualizations_cuda.py,sha256=9qw46Y4bo67l0nVVF1FSNS8ksyzbIAJdaPDFOhN5J8Y,29188
|
21
|
+
pyerualjetwork-4.1.8b0.dist-info/METADATA,sha256=ZW6TS4CJncDtHKSJSuOhiFgyy3vBZVqeExiVXcmcgAU,7795
|
22
|
+
pyerualjetwork-4.1.8b0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
23
|
+
pyerualjetwork-4.1.8b0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
24
|
+
pyerualjetwork-4.1.8b0.dist-info/RECORD,,
|
File without changes
|
File without changes
|