pyerualjetwork 4.1.8__py3-none-any.whl → 4.1.8b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/plan.py +61 -29
- pyerualjetwork/plan_cuda.py +39 -30
- pyerualjetwork/planeat.py +1 -1
- pyerualjetwork/planeat_cuda.py +9 -7
- pyerualjetwork/visualizations_cuda.py +1 -1
- {pyerualjetwork-4.1.8.dist-info → pyerualjetwork-4.1.8b1.dist-info}/METADATA +1 -1
- {pyerualjetwork-4.1.8.dist-info → pyerualjetwork-4.1.8b1.dist-info}/RECORD +10 -10
- {pyerualjetwork-4.1.8.dist-info → pyerualjetwork-4.1.8b1.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.1.8.dist-info → pyerualjetwork-4.1.8b1.dist-info}/top_level.txt +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -48,7 +48,7 @@ for package_name in package_names:
|
|
48
48
|
|
49
49
|
print(f"PyerualJetwork is ready to use with {err} errors")
|
50
50
|
|
51
|
-
__version__ = "4.1.
|
51
|
+
__version__ = "4.1.8b1"
|
52
52
|
__update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
|
53
53
|
|
54
54
|
def print_version(__version__):
|
pyerualjetwork/plan.py
CHANGED
@@ -24,7 +24,6 @@ from .loss_functions import binary_crossentropy, categorical_crossentropy
|
|
24
24
|
from .activation_functions import apply_activation, Softmax, all_activations
|
25
25
|
from .metrics import metrics
|
26
26
|
from .model_operations import get_acc, get_preds, get_preds_softmax
|
27
|
-
from .memory_operations import optimize_labels
|
28
27
|
from .visualizations import (
|
29
28
|
draw_neural_web,
|
30
29
|
update_neural_web_for_fit,
|
@@ -103,6 +102,8 @@ def fit(
|
|
103
102
|
Returns:
|
104
103
|
numpyarray([num]): (Weight matrix).
|
105
104
|
"""
|
105
|
+
|
106
|
+
from model_operations import get_acc
|
106
107
|
|
107
108
|
# Pre-checks
|
108
109
|
|
@@ -172,7 +173,7 @@ def fit(
|
|
172
173
|
return normalization(LTPW, dtype=dtype)
|
173
174
|
|
174
175
|
|
175
|
-
def learner(x_train, y_train,
|
176
|
+
def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
|
176
177
|
neural_web_history=False, show_current_activations=False, auto_normalization=True,
|
177
178
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
178
179
|
interval=33.33, target_acc=None, target_loss=None, except_this=None,
|
@@ -191,9 +192,9 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
191
192
|
|
192
193
|
y_train (array-like): Labels for training data. one-hot encoded.
|
193
194
|
|
194
|
-
|
195
|
+
optimizator (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizator = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
195
196
|
```python
|
196
|
-
|
197
|
+
genetic_optimizator = lambda *args, **kwargs: planeat.evolve(*args,
|
197
198
|
activation_add_prob=0.85,
|
198
199
|
mutations=False,
|
199
200
|
strategy='cross_over',
|
@@ -201,7 +202,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
201
202
|
|
202
203
|
model = plan.learner(x_train,
|
203
204
|
y_train,
|
204
|
-
|
205
|
+
optimizator=genetic_optimizator,
|
205
206
|
strategy='accuracy',
|
206
207
|
show_history=True,
|
207
208
|
target_acc=0.94,
|
@@ -255,37 +256,52 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
255
256
|
|
256
257
|
"""
|
257
258
|
|
258
|
-
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'
|
259
|
+
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
|
259
260
|
|
260
261
|
activation_potentiation = all_activations()
|
261
262
|
|
262
263
|
# Pre-checks
|
263
264
|
|
264
265
|
x_train = x_train.astype(dtype, copy=False)
|
265
|
-
|
266
|
+
|
267
|
+
if len(y_train[0]) < 256:
|
268
|
+
if y_train.dtype != np.uint8:
|
269
|
+
y_train = np.array(y_train, copy=False).astype(np.uint8, copy=False)
|
270
|
+
elif len(y_train[0]) <= 32767:
|
271
|
+
if y_train.dtype != np.uint16:
|
272
|
+
y_train = np.array(y_train, copy=False).astype(np.uint16, copy=False)
|
273
|
+
else:
|
274
|
+
if y_train.dtype != np.uint32:
|
275
|
+
y_train = np.array(y_train, copy=False).astype(np.uint32, copy=False)
|
276
|
+
|
277
|
+
if x_test is not None:
|
278
|
+
x_test = x_test.astype(dtype, copy=False)
|
279
|
+
|
280
|
+
if len(y_test[0]) < 256:
|
281
|
+
if y_test.dtype != np.uint8:
|
282
|
+
y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
|
283
|
+
elif len(y_test[0]) <= 32767:
|
284
|
+
if y_test.dtype != np.uint16:
|
285
|
+
y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
|
286
|
+
else:
|
287
|
+
if y_test.dtype != np.uint32:
|
288
|
+
y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
|
266
289
|
|
267
290
|
if x_test is None and y_test is None:
|
268
291
|
x_test = x_train
|
269
292
|
y_test = y_train
|
270
293
|
data = 'Train'
|
271
294
|
else:
|
272
|
-
x_test = x_test.astype(dtype, copy=False)
|
273
|
-
y_test = optimize_labels(y_test, cuda=False)
|
274
295
|
data = 'Test'
|
275
296
|
|
276
297
|
# Filter activation functions
|
277
|
-
if only_this
|
298
|
+
if only_this != None:
|
278
299
|
activation_potentiation = only_this
|
279
|
-
if except_this
|
300
|
+
if except_this != None:
|
280
301
|
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
281
302
|
if gen is None:
|
282
303
|
gen = len(activation_potentiation)
|
283
304
|
|
284
|
-
if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
|
285
|
-
|
286
|
-
if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
287
|
-
if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
288
|
-
|
289
305
|
# Initialize visualization components
|
290
306
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
291
307
|
|
@@ -320,8 +336,32 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
320
336
|
best_acc_per_gen_list = []
|
321
337
|
postfix_dict = {}
|
322
338
|
loss_list = []
|
339
|
+
weight_pop = []
|
323
340
|
target_pop = []
|
324
341
|
|
342
|
+
for i in range(len(activation_potentiation)):
|
343
|
+
print(f"\rPre-Run {i}/{len(activation_potentiation)}",end='')
|
344
|
+
|
345
|
+
if i == 0 and start_this_act is not None:
|
346
|
+
act_pop[0] = activation_potentiation[i]
|
347
|
+
weight_pop[0] = W
|
348
|
+
else:
|
349
|
+
act_pop.append(activation_potentiation[i])
|
350
|
+
weight_pop.append(W)
|
351
|
+
|
352
|
+
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
353
|
+
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
354
|
+
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=act_pop[-1], dtype=dtype)
|
355
|
+
|
356
|
+
if strategy == 'accuracy': target_pop.append(model[get_acc()])
|
357
|
+
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
358
|
+
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
359
|
+
|
360
|
+
if strategy == 'precision': target_pop.append(precision_score)
|
361
|
+
if strategy == 'recall': target_pop.append(recall_score)
|
362
|
+
if strategy == 'f1': target_pop.append(f1_score)
|
363
|
+
|
364
|
+
weight_pop, act_pop = optimizator(np.array(weight_pop, dtype=dtype), act_pop, 0, np.array(target_pop, dtype=dtype), target_fitness=target_fitness, bar_status=False)
|
325
365
|
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
326
366
|
|
327
367
|
for i in range(gen):
|
@@ -331,19 +371,16 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
331
371
|
progress.n = 0
|
332
372
|
progress.last_print_n = 0
|
333
373
|
progress.update(0)
|
374
|
+
|
375
|
+
weight_pop, act_pop = optimizator(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
376
|
+
target_pop = []
|
334
377
|
|
335
378
|
for j in range(len(activation_potentiation)):
|
336
379
|
|
337
380
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
338
|
-
|
339
|
-
if i == 0:
|
340
|
-
act_pop.append(activation_potentiation[j])
|
341
|
-
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
342
|
-
weight_pop.append(W)
|
343
|
-
|
344
381
|
model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
|
382
|
+
|
345
383
|
acc = model[get_acc()]
|
346
|
-
|
347
384
|
if strategy == 'accuracy': target_pop.append(acc)
|
348
385
|
|
349
386
|
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
@@ -369,7 +406,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
369
406
|
|
370
407
|
if ((strategy == 'accuracy' and acc >= best_acc) or
|
371
408
|
(strategy == 'f1' and f1_score >= best_f1) or
|
372
|
-
(strategy == 'precision' and precision_score >= best_precision) or
|
409
|
+
(strategy == 'precision' and precision_score >= best_precision) or
|
373
410
|
(strategy == 'recall' and recall_score >= best_recall)):
|
374
411
|
|
375
412
|
best_acc = acc
|
@@ -377,8 +414,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
377
414
|
final_activations = act_pop[j]
|
378
415
|
best_model = model
|
379
416
|
|
380
|
-
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
381
|
-
|
382
417
|
if batch_size == 1:
|
383
418
|
postfix_dict[f"{data} Accuracy"] = best_acc
|
384
419
|
else:
|
@@ -477,9 +512,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
477
512
|
best_acc_per_gen_list.append(best_acc)
|
478
513
|
loss_list.append(best_loss)
|
479
514
|
|
480
|
-
weight_pop, act_pop = optimizer(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
481
|
-
target_pop = []
|
482
|
-
|
483
515
|
# Early stopping check
|
484
516
|
if early_stop == True and i > 0:
|
485
517
|
if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
|
pyerualjetwork/plan_cuda.py
CHANGED
@@ -14,7 +14,6 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
14
14
|
"""
|
15
15
|
|
16
16
|
import cupy as cp
|
17
|
-
import numpy as np
|
18
17
|
from colorama import Fore
|
19
18
|
import math
|
20
19
|
|
@@ -25,7 +24,7 @@ from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
|
|
25
24
|
from .activation_functions_cuda import apply_activation, Softmax, all_activations
|
26
25
|
from .metrics_cuda import metrics
|
27
26
|
from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
|
28
|
-
from .memory_operations import transfer_to_gpu, transfer_to_cpu
|
27
|
+
from .memory_operations import transfer_to_gpu, transfer_to_cpu
|
29
28
|
from .visualizations_cuda import (
|
30
29
|
draw_neural_web,
|
31
30
|
update_neural_web_for_fit,
|
@@ -108,6 +107,8 @@ def fit(
|
|
108
107
|
numpyarray([num]): (Weight matrix).
|
109
108
|
"""
|
110
109
|
# Pre-checks
|
110
|
+
|
111
|
+
from memory_operations import transfer_to_gpu, transfer_to_cpu
|
111
112
|
|
112
113
|
if train_bar and val:
|
113
114
|
train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
|
@@ -188,7 +189,7 @@ def fit(
|
|
188
189
|
return normalization(LTPW, dtype=dtype)
|
189
190
|
|
190
191
|
|
191
|
-
def learner(x_train, y_train,
|
192
|
+
def learner(x_train, y_train, optimizator, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
|
192
193
|
neural_web_history=False, show_current_activations=False, auto_normalization=True,
|
193
194
|
neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
|
194
195
|
interval=33.33, target_acc=None, target_loss=None, except_this=None,
|
@@ -207,9 +208,9 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
207
208
|
|
208
209
|
y_train (array-like): Labels for training data.
|
209
210
|
|
210
|
-
|
211
|
+
optimizator (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizator = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
|
211
212
|
```python
|
212
|
-
|
213
|
+
genetic_optimizator = lambda *args, **kwargs: planeat_cuda.evolve(*args,
|
213
214
|
activation_add_prob=0.85,
|
214
215
|
mutations=False,
|
215
216
|
strategy='cross_over',
|
@@ -217,7 +218,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
217
218
|
|
218
219
|
model = plan_cuda.learner(x_train,
|
219
220
|
y_train,
|
220
|
-
|
221
|
+
optimizator=genetic_optimizator,
|
221
222
|
strategy='accuracy',
|
222
223
|
show_history=True,
|
223
224
|
target_acc=0.94,
|
@@ -273,19 +274,16 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
273
274
|
|
274
275
|
"""
|
275
276
|
|
276
|
-
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'
|
277
|
+
print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
|
277
278
|
|
278
279
|
activation_potentiation = all_activations()
|
279
280
|
|
280
|
-
y_train = optimize_labels(y_train, cuda=True)
|
281
|
-
|
282
281
|
if x_test is None and y_test is None:
|
283
282
|
x_test = x_train
|
284
283
|
y_test = y_train
|
285
284
|
data = 'Train'
|
286
285
|
else:
|
287
286
|
data = 'Test'
|
288
|
-
y_test = optimize_labels(y_test, cuda=True)
|
289
287
|
|
290
288
|
if memory == 'gpu':
|
291
289
|
x_train = transfer_to_gpu(x_train, dtype=dtype)
|
@@ -308,27 +306,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
308
306
|
else:
|
309
307
|
raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
|
310
308
|
|
311
|
-
if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
|
312
|
-
|
313
309
|
# Filter activation functions
|
314
|
-
if only_this
|
310
|
+
if only_this != None:
|
315
311
|
activation_potentiation = only_this
|
316
|
-
if except_this
|
312
|
+
if except_this != None:
|
317
313
|
activation_potentiation = [item for item in activation_potentiation if item not in except_this]
|
318
314
|
if gen is None:
|
319
315
|
gen = len(activation_potentiation)
|
320
316
|
|
321
|
-
if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
322
|
-
if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
|
323
|
-
|
324
317
|
# Initialize visualization components
|
325
318
|
viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
|
326
319
|
|
327
320
|
# Initialize progress bar
|
328
321
|
if batch_size == 1:
|
329
|
-
ncols =
|
322
|
+
ncols = 76
|
330
323
|
else:
|
331
|
-
ncols =
|
324
|
+
ncols = 89
|
332
325
|
|
333
326
|
# Initialize variables
|
334
327
|
act_pop = []
|
@@ -341,6 +334,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
341
334
|
weight_pop.append(start_this_W)
|
342
335
|
|
343
336
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
337
|
+
|
344
338
|
model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype, memory=memory)
|
345
339
|
|
346
340
|
if loss == 'categorical_crossentropy':
|
@@ -356,6 +350,31 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
356
350
|
loss_list = []
|
357
351
|
target_pop = []
|
358
352
|
|
353
|
+
for i in range(len(activation_potentiation)):
|
354
|
+
print(f"\rPre-Run {i}/{len(activation_potentiation)}",end='')
|
355
|
+
|
356
|
+
if i == 0 and start_this_act is not None:
|
357
|
+
act_pop[0] = activation_potentiation[i]
|
358
|
+
weight_pop[0] = W
|
359
|
+
else:
|
360
|
+
act_pop.append(activation_potentiation[i])
|
361
|
+
weight_pop.append(W)
|
362
|
+
|
363
|
+
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
364
|
+
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype, memory=memory)
|
365
|
+
model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=act_pop[-1], dtype=dtype, memory=memory)
|
366
|
+
|
367
|
+
if strategy == 'accuracy': target_pop.append(model[get_acc()])
|
368
|
+
elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
|
369
|
+
precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
|
370
|
+
|
371
|
+
if strategy == 'precision': target_pop.append(precision_score)
|
372
|
+
if strategy == 'recall': target_pop.append(recall_score)
|
373
|
+
if strategy == 'f1': target_pop.append(f1_score)
|
374
|
+
|
375
|
+
|
376
|
+
|
377
|
+
weight_pop, act_pop = optimizator(cp.array(weight_pop, dtype=dtype), act_pop, 0, cp.array(target_pop, dtype=dtype), target_fitness=target_fitness, bar_status=False)
|
359
378
|
progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
|
360
379
|
|
361
380
|
for i in range(gen):
|
@@ -369,12 +388,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
369
388
|
for j in range(len(activation_potentiation)):
|
370
389
|
|
371
390
|
x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
|
372
|
-
|
373
|
-
if i == 0:
|
374
|
-
act_pop.append(activation_potentiation[j])
|
375
|
-
W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
|
376
|
-
weight_pop.append(W)
|
377
|
-
|
378
391
|
model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
|
379
392
|
|
380
393
|
acc = model[get_acc()]
|
@@ -411,8 +424,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
411
424
|
final_activations = act_pop[j]
|
412
425
|
best_model = model
|
413
426
|
|
414
|
-
final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
|
415
|
-
|
416
427
|
if batch_size == 1:
|
417
428
|
postfix_dict[f"{data} Accuracy"] = best_acc
|
418
429
|
else:
|
@@ -510,9 +521,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
|
|
510
521
|
|
511
522
|
best_acc_per_gen_list.append(best_acc)
|
512
523
|
loss_list.append(best_loss)
|
513
|
-
|
514
|
-
weight_pop, act_pop = optimizer(cp.array(weight_pop, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
|
515
|
-
target_pop = []
|
516
524
|
|
517
525
|
# Early stopping check
|
518
526
|
if early_stop == True and i > 0:
|
@@ -642,6 +650,7 @@ def evaluate(
|
|
642
650
|
Returns:
|
643
651
|
tuple: Model (list).
|
644
652
|
"""
|
653
|
+
from memory_operations import transfer_to_cpu, transfer_to_gpu
|
645
654
|
|
646
655
|
if memory == 'gpu':
|
647
656
|
x_test = transfer_to_gpu(x_test, dtype=dtype)
|
pyerualjetwork/planeat.py
CHANGED
@@ -185,7 +185,7 @@ Notes:
|
|
185
185
|
|
186
186
|
Example:
|
187
187
|
```python
|
188
|
-
weights, activation_potentiations =
|
188
|
+
weights, activation_potentiations = learner(weights, activation_potentiations, 1, fitness, info=True, strategy='cross_over', policy='normal_selective')
|
189
189
|
```
|
190
190
|
|
191
191
|
- The function returns the updated weights and activations after processing based on the chosen strategy, policy, and mutation parameters.
|
pyerualjetwork/planeat_cuda.py
CHANGED
@@ -186,7 +186,7 @@ Notes:
|
|
186
186
|
|
187
187
|
Example:
|
188
188
|
```python
|
189
|
-
weights, activation_potentiations =
|
189
|
+
weights, activation_potentiations = learner(weights, activation_potentiations, 1, fitness, info=True, strategy='cross_over', policy='normal_selective')
|
190
190
|
```
|
191
191
|
|
192
192
|
- The function returns the updated weights and activations after processing based on the chosen strategy, policy, and mutation parameters.
|
@@ -230,15 +230,17 @@ Example:
|
|
230
230
|
else:
|
231
231
|
raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
|
232
232
|
|
233
|
+
sort_indices = cp.argsort(fitness)
|
234
|
+
|
233
235
|
### FITNESS LIST IS SORTED IN ASCENDING (OR DESCENDING) ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
|
234
236
|
|
235
|
-
if target_fitness == 'max': sort_indices =
|
236
|
-
elif target_fitness == 'min': sort_indices =
|
237
|
+
if target_fitness == 'max': sort_indices = np.argsort(fitness)
|
238
|
+
elif target_fitness == 'min': sort_indices = np.argsort(-fitness)
|
237
239
|
|
238
240
|
fitness = fitness[sort_indices]
|
239
241
|
weights = weights[sort_indices]
|
240
242
|
|
241
|
-
activation_potentiations = [activation_potentiations[
|
243
|
+
activation_potentiations = [activation_potentiations[i] for i in sort_indices]
|
242
244
|
|
243
245
|
### GENOMES ARE DIVIDED INTO TWO GROUPS: GOOD GENOMES AND BAD GENOMES:
|
244
246
|
|
@@ -353,9 +355,9 @@ Example:
|
|
353
355
|
print(" ACTIVATION SELECTION RATE (THRESHOLD VALUE FOR SINGLE CROSS OVER):", str(activation_selection_rate) + '\n')
|
354
356
|
|
355
357
|
print("*** Performance ***")
|
356
|
-
print("
|
357
|
-
print(" MEAN REWARD: ", str(
|
358
|
-
print(" MIN REWARD: ", str(
|
358
|
+
print(" MAX REWARD: ", str(round(max(fitness), 2)))
|
359
|
+
print(" MEAN REWARD: ", str(round(cp.mean(fitness), 2)))
|
360
|
+
print(" MIN REWARD: ", str(round(min(fitness), 2)) + '\n')
|
359
361
|
|
360
362
|
print(" BEST GENOME INDEX: ", str(len(weights)-1))
|
361
363
|
print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
|
@@ -358,7 +358,7 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
|
|
358
358
|
fpr, tpr, thresholds = roc_curve(y_true, y_preds)
|
359
359
|
|
360
360
|
roc_auc = cp.trapz(tpr, fpr)
|
361
|
-
axs[1, 0].plot(fpr
|
361
|
+
axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
|
362
362
|
axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
|
363
363
|
axs[1, 0].set_xlim([0.0, 1.0])
|
364
364
|
axs[1, 0].set_ylim([0.0, 1.05])
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 4.1.
|
3
|
+
Version: 4.1.8b1
|
4
4
|
Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,4 +1,4 @@
|
|
1
|
-
pyerualjetwork/__init__.py,sha256=
|
1
|
+
pyerualjetwork/__init__.py,sha256=oroCPYREyLkdAV9z-6EW8beNYhimUdntMjWc1Uhd0mM,2177
|
2
2
|
pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
|
3
3
|
pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
|
4
4
|
pyerualjetwork/data_operations.py,sha256=ZM24BuPsIAtI0a_Exr4HgCjmlb285wEeO8juFY9sJr0,14680
|
@@ -11,14 +11,14 @@ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,607
|
|
11
11
|
pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
|
12
12
|
pyerualjetwork/model_operations.py,sha256=hnhR8dtoICNJWIwGgJ65-LN3GYN_DYH4LMe6YpZVbnI,12967
|
13
13
|
pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
|
14
|
-
pyerualjetwork/plan.py,sha256=
|
15
|
-
pyerualjetwork/plan_cuda.py,sha256=
|
16
|
-
pyerualjetwork/planeat.py,sha256=
|
17
|
-
pyerualjetwork/planeat_cuda.py,sha256=
|
14
|
+
pyerualjetwork/plan.py,sha256=atpdeF1wDu4HIngP9RN6iUBLKOzkmrFwHyCJQKUj_38,35419
|
15
|
+
pyerualjetwork/plan_cuda.py,sha256=sl2Od8-STwAr6GQmwisryU-IsOvWIGSHeFSwHuEa1iM,36035
|
16
|
+
pyerualjetwork/planeat.py,sha256=pVp8ndi5E_muwOTFmlcav70-5LLV5A2yA0_SgURvT08,40236
|
17
|
+
pyerualjetwork/planeat_cuda.py,sha256=3Vt5_zHUK4Jt_vW6LugQOy3to8gzQfT0_poPxeJTy68,40253
|
18
18
|
pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
|
19
19
|
pyerualjetwork/visualizations.py,sha256=QaYSIyVkJZ8NqpBKArQKkI1y37nCQo_KIM98IMssnRc,28766
|
20
|
-
pyerualjetwork/visualizations_cuda.py,sha256=
|
21
|
-
pyerualjetwork-4.1.
|
22
|
-
pyerualjetwork-4.1.
|
23
|
-
pyerualjetwork-4.1.
|
24
|
-
pyerualjetwork-4.1.
|
20
|
+
pyerualjetwork/visualizations_cuda.py,sha256=9qw46Y4bo67l0nVVF1FSNS8ksyzbIAJdaPDFOhN5J8Y,29188
|
21
|
+
pyerualjetwork-4.1.8b1.dist-info/METADATA,sha256=_mbbAWUIEQ5V5T_iqWlzkZX2IQyKtWdTETlMSvvPw2g,7795
|
22
|
+
pyerualjetwork-4.1.8b1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
23
|
+
pyerualjetwork-4.1.8b1.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
|
24
|
+
pyerualjetwork-4.1.8b1.dist-info/RECORD,,
|
File without changes
|
File without changes
|