pyerualjetwork 5.48__tar.gz → 5.48a0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/PKG-INFO +1 -1
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/__init__.py +1 -1
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cpu/activation_functions.py +14 -1
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cpu/data_ops.py +5 -5
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cpu/loss_functions.py +11 -1
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cuda/data_ops.py +7 -7
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/nn.py +16 -5
- pyerualjetwork-5.48a0/pyerualjetwork/optimizers/__init__.py +43 -0
- pyerualjetwork-5.48a0/pyerualjetwork/optimizers/backprop.py +48 -0
- {pyerualjetwork-5.48/pyerualjetwork → pyerualjetwork-5.48a0/pyerualjetwork/optimizers}/ene.py +4 -4
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork.egg-info/PKG-INFO +1 -1
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork.egg-info/SOURCES.txt +4 -3
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/setup.py +1 -1
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/README.md +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cpu/__init__.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cpu/metrics.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cpu/visualizations.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cuda/__init__.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cuda/activation_functions.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cuda/loss_functions.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cuda/metrics.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/cuda/visualizations.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/fitness_functions.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/help.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/issue_solver.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/memory_ops.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/model_ops.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/old_cpu_model_ops.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/old_cuda_model_ops.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork/ui.py +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/pyerualjetwork.egg-info/top_level.txt +0 -0
- {pyerualjetwork-5.48 → pyerualjetwork-5.48a0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 5.
|
3
|
+
Version: 5.48a0
|
4
4
|
Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -42,7 +42,7 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
|
|
42
42
|
- Contact: tchasancan@gmail.com
|
43
43
|
"""
|
44
44
|
|
45
|
-
__version__ = "5.
|
45
|
+
__version__ = "5.48a0"
|
46
46
|
__update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
|
47
47
|
* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
|
48
48
|
* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
@@ -200,6 +200,18 @@ def sine_offset(x, beta=0.0):
|
|
200
200
|
return np.sin(x + beta)
|
201
201
|
|
202
202
|
|
203
|
+
def apply_activation_derivative(x, name):
|
204
|
+
if name == 'sigmoid':
|
205
|
+
s = apply_activation(x, 'sigmoid')
|
206
|
+
return s * (1 - s)
|
207
|
+
elif name == 'relu':
|
208
|
+
return (x > 0).astype(float)
|
209
|
+
elif name == 'tanh':
|
210
|
+
return 1 - np.tanh(x) ** 2
|
211
|
+
else:
|
212
|
+
raise ValueError(f"Unknown activation derivative: {name}")
|
213
|
+
|
214
|
+
|
203
215
|
def apply_activation(Input, activation_list):
|
204
216
|
"""
|
205
217
|
Applies activation functions for inputs
|
@@ -251,4 +263,5 @@ def apply_activation(Input, activation_list):
|
|
251
263
|
|
252
264
|
except Exception as e:
|
253
265
|
warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
|
254
|
-
return Input
|
266
|
+
return Input
|
267
|
+
|
@@ -436,12 +436,12 @@ def find_closest_factors(a):
|
|
436
436
|
return i, j
|
437
437
|
|
438
438
|
|
439
|
-
def batcher(
|
439
|
+
def batcher(x_test, y_test, batch_size=1):
|
440
440
|
|
441
441
|
if batch_size == 1:
|
442
|
-
return
|
442
|
+
return x_test, y_test
|
443
443
|
|
444
|
-
y_labels = np.argmax(
|
444
|
+
y_labels = np.argmax(y_test, axis=1)
|
445
445
|
|
446
446
|
sampled_x, sampled_y = [], []
|
447
447
|
|
@@ -453,7 +453,7 @@ def batcher(x, y, batch_size=1):
|
|
453
453
|
|
454
454
|
sampled_indices = np.random.choice(class_indices, num_samples, replace=False)
|
455
455
|
|
456
|
-
sampled_x.append(
|
457
|
-
sampled_y.append(
|
456
|
+
sampled_x.append(x_test[sampled_indices])
|
457
|
+
sampled_y.append(y_test[sampled_indices])
|
458
458
|
|
459
459
|
return np.concatenate(sampled_x), np.concatenate(sampled_y)
|
@@ -18,4 +18,14 @@ def binary_crossentropy(y_true_batch, y_pred_batch):
|
|
18
18
|
losses = -np.mean(y_true_batch * np.log(y_pred_batch) + (1 - y_true_batch) * np.log(1 - y_pred_batch), axis=1)
|
19
19
|
|
20
20
|
mean_loss = np.mean(losses)
|
21
|
-
return mean_loss
|
21
|
+
return mean_loss
|
22
|
+
|
23
|
+
def categorical_crossentropy_derivative(y_true, y_pred):
|
24
|
+
epsilon = 1e-7
|
25
|
+
y_pred = np.clip(y_pred, epsilon, 1. - epsilon)
|
26
|
+
return - (y_true / y_pred)
|
27
|
+
|
28
|
+
def binary_crossentropy_derivative(y_true, y_pred):
|
29
|
+
epsilon = 1e-7
|
30
|
+
y_pred = np.clip(y_pred, epsilon, 1. - epsilon)
|
31
|
+
return (y_pred - y_true) / (y_pred * (1 - y_pred))
|
@@ -484,20 +484,20 @@ def find_closest_factors(a):
|
|
484
484
|
j = a // i
|
485
485
|
return i, j
|
486
486
|
|
487
|
-
def batcher(
|
487
|
+
def batcher(x_test, y_test, batch_size=1):
|
488
488
|
|
489
489
|
if batch_size == 1:
|
490
|
-
return
|
490
|
+
return x_test, y_test
|
491
491
|
|
492
|
-
y_labels = cp.argmax(
|
492
|
+
y_labels = cp.argmax(y_test, axis=1)
|
493
493
|
|
494
494
|
unique_labels = cp.unique(y_labels)
|
495
495
|
total_samples = sum(
|
496
496
|
int(cp.sum(y_labels == class_label) * batch_size) for class_label in unique_labels
|
497
497
|
)
|
498
498
|
|
499
|
-
sampled_x = cp.empty((total_samples,
|
500
|
-
sampled_y = cp.empty((total_samples,
|
499
|
+
sampled_x = cp.empty((total_samples, x_test.shape[1]), dtype=x_test.dtype)
|
500
|
+
sampled_y = cp.empty((total_samples, y_test.shape[1]), dtype=y_test.dtype)
|
501
501
|
|
502
502
|
offset = 0
|
503
503
|
for class_label in unique_labels:
|
@@ -507,8 +507,8 @@ def batcher(x, y, batch_size=1):
|
|
507
507
|
|
508
508
|
sampled_indices = cp.random.choice(class_indices, num_samples, replace=False)
|
509
509
|
|
510
|
-
sampled_x[offset:offset + num_samples] =
|
511
|
-
sampled_y[offset:offset + num_samples] =
|
510
|
+
sampled_x[offset:offset + num_samples] = x_test[sampled_indices]
|
511
|
+
sampled_y[offset:offset + num_samples] = y_test[sampled_indices]
|
512
512
|
|
513
513
|
offset += num_samples
|
514
514
|
|
@@ -198,7 +198,8 @@ def learn(x_train, y_train, optimizer, template_model, gen, pop_size, fit_start=
|
|
198
198
|
tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]].
|
199
199
|
"""
|
200
200
|
|
201
|
-
from .ene import define_genomes
|
201
|
+
from .optimizers.ene import define_genomes
|
202
|
+
from .optimizers.backprop import backprop_update_general
|
202
203
|
from .cpu.visualizations import display_decision_boundary_history, create_decision_boundary_hist, plot_decision_boundary
|
203
204
|
|
204
205
|
if cuda is False:
|
@@ -351,10 +352,13 @@ def learn(x_train, y_train, optimizer, template_model, gen, pop_size, fit_start=
|
|
351
352
|
progress.last_print_n = 0
|
352
353
|
progress.update(0)
|
353
354
|
|
355
|
+
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
356
|
+
|
357
|
+
if optimizer == 'backprop':
|
358
|
+
pop_size = 1
|
359
|
+
|
354
360
|
for j in range(pop_size):
|
355
361
|
|
356
|
-
x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
|
357
|
-
|
358
362
|
if fit_start is True and i == 0:
|
359
363
|
if start_this_act is not None and j == 0:
|
360
364
|
pass
|
@@ -369,6 +373,12 @@ def learn(x_train, y_train, optimizer, template_model, gen, pop_size, fit_start=
|
|
369
373
|
weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], cuda=cuda, auto_normalization=auto_normalization, dtype=dtype)
|
370
374
|
|
371
375
|
|
376
|
+
if optimizer == 'backprop':
|
377
|
+
weight_pop[0], current_loss = backprop_update_general(
|
378
|
+
x_train_batch, y_train_batch, weight_pop[0], act_pop[0],
|
379
|
+
learning_rate=0.01
|
380
|
+
)
|
381
|
+
|
372
382
|
model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], activation_potentiations=activation_potentiations[j], auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
|
373
383
|
acc = model[get_acc()]
|
374
384
|
|
@@ -529,8 +539,9 @@ def learn(x_train, y_train, optimizer, template_model, gen, pop_size, fit_start=
|
|
529
539
|
if model_type == 'PLAN': weight_pop = np.array(weight_pop, copy=False, dtype=dtype)
|
530
540
|
else: weight_pop = np.array(weight_pop, copy=False, dtype=object)
|
531
541
|
|
532
|
-
|
533
|
-
|
542
|
+
if optimizer != 'backprop':
|
543
|
+
weight_pop, act_pop = optimizer(weight_pop, act_pop, i, np.array(target_pop, dtype=dtype, copy=False), weight_evolve=weight_evolve, is_mlp=is_mlp, bar_status=False)
|
544
|
+
target_pop = []
|
534
545
|
|
535
546
|
# Early stopping check
|
536
547
|
if early_stop == True and i > 0:
|
@@ -0,0 +1,43 @@
|
|
1
|
+
"""
|
2
|
+
|
3
|
+
Optimizers
|
4
|
+
==============
|
5
|
+
PyereualJetwork is a large wide GPU-accelerated machine learning library in Python designed for professionals and researchers.
|
6
|
+
It features PLAN, MLP Deep Learning and PTNN training, as well as ENE (Eugenic NeuroEvolution) for genetic optimization,
|
7
|
+
which can also be applied to genetic algorithms or Reinforcement Learning (RL) problems.
|
8
|
+
The library includes functions for data pre-processing, visualizations, model saving and loading, prediction and evaluation,
|
9
|
+
training, and both detailed and simplified memory management.
|
10
|
+
|
11
|
+
|
12
|
+
PyerualJetwork Main Modules:
|
13
|
+
----------------------------
|
14
|
+
- nn
|
15
|
+
- ene
|
16
|
+
- model_ops
|
17
|
+
|
18
|
+
CPU Main Modules:
|
19
|
+
---------------------------
|
20
|
+
- cpu.data_ops
|
21
|
+
|
22
|
+
GPU Main Modules:
|
23
|
+
---------------------------
|
24
|
+
- cuda.data_ops
|
25
|
+
|
26
|
+
Memory Module:
|
27
|
+
--------------
|
28
|
+
- memory_ops
|
29
|
+
|
30
|
+
Issue Solver Module:
|
31
|
+
--------------
|
32
|
+
- issue_solver
|
33
|
+
|
34
|
+
Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
|
35
|
+
|
36
|
+
PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
|
37
|
+
|
38
|
+
- Creator: Hasan Can Beydili
|
39
|
+
- YouTube: https://www.youtube.com/@HasanCanBeydili
|
40
|
+
- Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
|
41
|
+
- Instagram: https://www.instagram.com/canbeydilj
|
42
|
+
- Contact: tchasancan@gmail.com
|
43
|
+
"""
|
@@ -0,0 +1,48 @@
|
|
1
|
+
import numpy as np
|
2
|
+
|
3
|
+
from ..cpu.loss_functions import categorical_crossentropy, categorical_crossentropy_derivative
|
4
|
+
from ..cpu.activation_functions import apply_activation, apply_activation_derivative
|
5
|
+
|
6
|
+
def backprop_update_general(
|
7
|
+
X, y, weights, activations_list,
|
8
|
+
learning_rate=0.01
|
9
|
+
):
|
10
|
+
"""
|
11
|
+
X: (batch_size, input_dim)
|
12
|
+
y: (batch_size, output_dim)
|
13
|
+
weights: numpy object array, her eleman (in_dim, out_dim) şeklinde ağırlık matrisi
|
14
|
+
activations_list: her katman için aktivasyon ismi listesi (örn: ['relu', 'sigmoid', 'softmax'])
|
15
|
+
apply_activation: x ve aktivasyon adı alır, çıktı verir
|
16
|
+
apply_activation_derivative: x ve aktivasyon adı alır, türev verir
|
17
|
+
loss_function: y_true ve y_pred alır, loss döner
|
18
|
+
loss_derivative: y_true ve y_pred alır, d_loss/d_y_pred döner
|
19
|
+
learning_rate: öğrenme oranı
|
20
|
+
"""
|
21
|
+
num_layers = len(weights) + 1
|
22
|
+
activations = [X]
|
23
|
+
inputs = []
|
24
|
+
|
25
|
+
# Forward pass
|
26
|
+
for i, w in enumerate(weights):
|
27
|
+
inp = np.dot(activations[-1], w)
|
28
|
+
out = apply_activation(inp, activations_list[i])
|
29
|
+
inputs.append(inp)
|
30
|
+
activations.append(out)
|
31
|
+
|
32
|
+
y_pred = activations[-1]
|
33
|
+
loss = categorical_crossentropy(y, y_pred)
|
34
|
+
|
35
|
+
# Calculate output error (using provided derivative)
|
36
|
+
error = categorical_crossentropy_derivative(y, y_pred)
|
37
|
+
deltas = [error * apply_activation_derivative(inputs[-1], activations_list[-1])]
|
38
|
+
|
39
|
+
# Backpropagate
|
40
|
+
for i in reversed(range(len(weights) - 1)):
|
41
|
+
delta = np.dot(deltas[0], weights[i + 1].T) * apply_activation_derivative(inputs[i], activations_list[i])
|
42
|
+
deltas.insert(0, delta)
|
43
|
+
|
44
|
+
# Update weights
|
45
|
+
for i in range(len(weights)):
|
46
|
+
weights[i] += learning_rate * np.dot(activations[i].T, deltas[i])
|
47
|
+
|
48
|
+
return weights, loss
|
{pyerualjetwork-5.48/pyerualjetwork → pyerualjetwork-5.48a0/pyerualjetwork/optimizers}/ene.py
RENAMED
@@ -1,4 +1,4 @@
|
|
1
|
-
"""
|
1
|
+
"""
|
2
2
|
|
3
3
|
|
4
4
|
ENE (Eugenic NeuroEvolution)
|
@@ -33,9 +33,9 @@ import math
|
|
33
33
|
import copy
|
34
34
|
|
35
35
|
### LIBRARY IMPORTS ###
|
36
|
-
from
|
37
|
-
from
|
38
|
-
from
|
36
|
+
from ..cpu.data_ops import non_neg_normalization
|
37
|
+
from ..ui import loading_bars, initialize_loading_bar
|
38
|
+
from ..cpu.activation_functions import all_activations
|
39
39
|
|
40
40
|
def define_genomes(input_shape, output_shape, population_size, neurons=[], activation_functions=[], dtype=np.float32):
|
41
41
|
"""
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pyerualjetwork
|
3
|
-
Version: 5.
|
3
|
+
Version: 5.48a0
|
4
4
|
Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
|
5
5
|
Author: Hasan Can Beydili
|
6
6
|
Author-email: tchasancan@gmail.com
|
@@ -1,8 +1,6 @@
|
|
1
1
|
README.md
|
2
|
-
setup.cfg
|
3
2
|
setup.py
|
4
3
|
pyerualjetwork/__init__.py
|
5
|
-
pyerualjetwork/ene.py
|
6
4
|
pyerualjetwork/fitness_functions.py
|
7
5
|
pyerualjetwork/help.py
|
8
6
|
pyerualjetwork/issue_solver.py
|
@@ -27,4 +25,7 @@ pyerualjetwork/cuda/activation_functions.py
|
|
27
25
|
pyerualjetwork/cuda/data_ops.py
|
28
26
|
pyerualjetwork/cuda/loss_functions.py
|
29
27
|
pyerualjetwork/cuda/metrics.py
|
30
|
-
pyerualjetwork/cuda/visualizations.py
|
28
|
+
pyerualjetwork/cuda/visualizations.py
|
29
|
+
pyerualjetwork/optimizers/__init__.py
|
30
|
+
pyerualjetwork/optimizers/backprop.py
|
31
|
+
pyerualjetwork/optimizers/ene.py
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|