pyerualjetwork 5.48a0__py3-none-any.whl → 5.49__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -42,7 +42,7 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
42
42
  - Contact: tchasancan@gmail.com
43
43
  """
44
44
 
45
- __version__ = "5.48a0"
45
+ __version__ = "5.49"
46
46
  __update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
47
47
  * PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
48
48
  * PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
@@ -200,18 +200,6 @@ def sine_offset(x, beta=0.0):
200
200
  return np.sin(x + beta)
201
201
 
202
202
 
203
- def apply_activation_derivative(x, name):
204
- if name == 'sigmoid':
205
- s = apply_activation(x, 'sigmoid')
206
- return s * (1 - s)
207
- elif name == 'relu':
208
- return (x > 0).astype(float)
209
- elif name == 'tanh':
210
- return 1 - np.tanh(x) ** 2
211
- else:
212
- raise ValueError(f"Unknown activation derivative: {name}")
213
-
214
-
215
203
  def apply_activation(Input, activation_list):
216
204
  """
217
205
  Applies activation functions for inputs
@@ -263,5 +251,4 @@ def apply_activation(Input, activation_list):
263
251
 
264
252
  except Exception as e:
265
253
  warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
266
- return Input
267
-
254
+ return Input
@@ -436,12 +436,12 @@ def find_closest_factors(a):
436
436
  return i, j
437
437
 
438
438
 
439
- def batcher(x_test, y_test, batch_size=1):
439
+ def batcher(x, y, batch_size=1):
440
440
 
441
441
  if batch_size == 1:
442
- return x_test, y_test
442
+ return x, y
443
443
 
444
- y_labels = np.argmax(y_test, axis=1)
444
+ y_labels = np.argmax(y, axis=1)
445
445
 
446
446
  sampled_x, sampled_y = [], []
447
447
 
@@ -453,7 +453,7 @@ def batcher(x_test, y_test, batch_size=1):
453
453
 
454
454
  sampled_indices = np.random.choice(class_indices, num_samples, replace=False)
455
455
 
456
- sampled_x.append(x_test[sampled_indices])
457
- sampled_y.append(y_test[sampled_indices])
456
+ sampled_x.append(x[sampled_indices])
457
+ sampled_y.append(y[sampled_indices])
458
458
 
459
459
  return np.concatenate(sampled_x), np.concatenate(sampled_y)
@@ -18,14 +18,4 @@ def binary_crossentropy(y_true_batch, y_pred_batch):
18
18
  losses = -np.mean(y_true_batch * np.log(y_pred_batch) + (1 - y_true_batch) * np.log(1 - y_pred_batch), axis=1)
19
19
 
20
20
  mean_loss = np.mean(losses)
21
- return mean_loss
22
-
23
- def categorical_crossentropy_derivative(y_true, y_pred):
24
- epsilon = 1e-7
25
- y_pred = np.clip(y_pred, epsilon, 1. - epsilon)
26
- return - (y_true / y_pred)
27
-
28
- def binary_crossentropy_derivative(y_true, y_pred):
29
- epsilon = 1e-7
30
- y_pred = np.clip(y_pred, epsilon, 1. - epsilon)
31
- return (y_pred - y_true) / (y_pred * (1 - y_pred))
21
+ return mean_loss
@@ -484,20 +484,20 @@ def find_closest_factors(a):
484
484
  j = a // i
485
485
  return i, j
486
486
 
487
- def batcher(x_test, y_test, batch_size=1):
487
+ def batcher(x, y, batch_size=1):
488
488
 
489
489
  if batch_size == 1:
490
- return x_test, y_test
490
+ return x, y
491
491
 
492
- y_labels = cp.argmax(y_test, axis=1)
492
+ y_labels = cp.argmax(y, axis=1)
493
493
 
494
494
  unique_labels = cp.unique(y_labels)
495
495
  total_samples = sum(
496
496
  int(cp.sum(y_labels == class_label) * batch_size) for class_label in unique_labels
497
497
  )
498
498
 
499
- sampled_x = cp.empty((total_samples, x_test.shape[1]), dtype=x_test.dtype)
500
- sampled_y = cp.empty((total_samples, y_test.shape[1]), dtype=y_test.dtype)
499
+ sampled_x = cp.empty((total_samples, x.shape[1]), dtype=x.dtype)
500
+ sampled_y = cp.empty((total_samples, y.shape[1]), dtype=y.dtype)
501
501
 
502
502
  offset = 0
503
503
  for class_label in unique_labels:
@@ -507,8 +507,8 @@ def batcher(x_test, y_test, batch_size=1):
507
507
 
508
508
  sampled_indices = cp.random.choice(class_indices, num_samples, replace=False)
509
509
 
510
- sampled_x[offset:offset + num_samples] = x_test[sampled_indices]
511
- sampled_y[offset:offset + num_samples] = y_test[sampled_indices]
510
+ sampled_x[offset:offset + num_samples] = x[sampled_indices]
511
+ sampled_y[offset:offset + num_samples] = y[sampled_indices]
512
512
 
513
513
  offset += num_samples
514
514
 
@@ -1,4 +1,4 @@
1
- """
1
+ """
2
2
 
3
3
 
4
4
  ENE (Eugenic NeuroEvolution)
@@ -33,9 +33,9 @@ import math
33
33
  import copy
34
34
 
35
35
  ### LIBRARY IMPORTS ###
36
- from ..cpu.data_ops import non_neg_normalization
37
- from ..ui import loading_bars, initialize_loading_bar
38
- from ..cpu.activation_functions import all_activations
36
+ from .cpu.data_ops import non_neg_normalization
37
+ from .ui import loading_bars, initialize_loading_bar
38
+ from .cpu.activation_functions import all_activations
39
39
 
40
40
  def define_genomes(input_shape, output_shape, population_size, neurons=[], activation_functions=[], dtype=np.float32):
41
41
  """
@@ -122,7 +122,7 @@ def evolver(weights,
122
122
  policy='aggressive',
123
123
  bad_genomes_selection_prob=None,
124
124
  bar_status=True,
125
- strategy='normal_selective',
125
+ strategy='more_selective',
126
126
  bad_genomes_mutation_prob=None,
127
127
  fitness_bias=1,
128
128
  cross_over_mode='tpm',
@@ -137,7 +137,7 @@ def evolver(weights,
137
137
  weight_mutate_threshold=16,
138
138
  weight_mutate_prob=1,
139
139
  is_mlp=False,
140
- save_best_genome=False,
140
+ save_best_genome=True,
141
141
  dtype=np.float32):
142
142
  """
143
143
  Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
@@ -167,7 +167,7 @@ def evolver(weights,
167
167
  - 'normal_selective': Normal selection based on fitness, where a portion of the bad genes are discarded.
168
168
  - 'more_selective': A more selective strategy, where fewer bad genes survive.
169
169
  - 'less_selective': A less selective strategy, where more bad genes survive.
170
- Default is 'normal_selective'.
170
+ Default is 'more_selective'.
171
171
 
172
172
  bar_status (bool, optional): Loading bar status during evolving process of genomes. True or False. Default: True
173
173
 
@@ -226,7 +226,7 @@ def evolver(weights,
226
226
 
227
227
  is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
228
228
 
229
- save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. (index of best individual: 0) Default: False
229
+ save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. (index of best individual: 0) Default: True
230
230
 
231
231
  dtype (numpy.dtype, optional): Data type for the arrays. Default: np.float32.
232
232
  Example: np.float64 or np.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
@@ -499,11 +499,14 @@ def predict_from_storage(Input, model_name, cuda=False, model_path=''):
499
499
  activations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activations]
500
500
 
501
501
  if model_type == 'MLP':
502
+
502
503
  layer = Input
503
504
  for i in range(len(W)):
504
- if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
505
-
506
- layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
505
+ if i == 0:
506
+ layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
507
+ else:
508
+ layer = apply_activation(layer, activations[i])
509
+ layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
507
510
 
508
511
  result = layer
509
512
 
@@ -523,8 +526,7 @@ def predict_from_storage(Input, model_name, cuda=False, model_path=''):
523
526
  layer = Input @ cp.array(W[0]).T if cuda else Input @ W[0].T
524
527
 
525
528
  for i in range(1, len(W)):
526
- if i != len(W) - 1: layer = apply_activation(layer, activations[i])
527
-
529
+ layer = apply_activation(layer, activations[i])
528
530
  layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
529
531
 
530
532
  result = layer
@@ -617,9 +619,11 @@ def predict_from_memory(Input, model, cuda=False):
617
619
  if model_type == 'MLP':
618
620
  layer = Input
619
621
  for i in range(len(W)):
620
- if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activations[i])
621
-
622
- layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
622
+ if i == 0:
623
+ layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
624
+ else:
625
+ layer = apply_activation(layer, activations[i])
626
+ layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
623
627
 
624
628
  result = layer
625
629
 
@@ -639,8 +643,7 @@ def predict_from_memory(Input, model, cuda=False):
639
643
  layer = Input @ cp.array(W[0]).T if cuda else Input @ W[0].T
640
644
 
641
645
  for i in range(1, len(W)):
642
- if i != len(W) - 1: layer = apply_activation(layer, activations[i])
643
-
646
+ layer = apply_activation(layer, activations[i])
644
647
  layer = layer @ cp.array(W[i]).T if cuda else layer @ W[i].T
645
648
 
646
649
  result = layer
pyerualjetwork/nn.py CHANGED
@@ -198,8 +198,7 @@ def learn(x_train, y_train, optimizer, template_model, gen, pop_size, fit_start=
198
198
  tuple: A list for model parameters: [Weight matrix, Train Preds, Train Accuracy, [Activations functions]].
199
199
  """
200
200
 
201
- from .optimizers.ene import define_genomes
202
- from .optimizers.backprop import backprop_update_general
201
+ from .ene import define_genomes
203
202
  from .cpu.visualizations import display_decision_boundary_history, create_decision_boundary_hist, plot_decision_boundary
204
203
 
205
204
  if cuda is False:
@@ -352,13 +351,10 @@ def learn(x_train, y_train, optimizer, template_model, gen, pop_size, fit_start=
352
351
  progress.last_print_n = 0
353
352
  progress.update(0)
354
353
 
355
- x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
356
-
357
- if optimizer == 'backprop':
358
- pop_size = 1
359
-
360
354
  for j in range(pop_size):
361
355
 
356
+ x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
357
+
362
358
  if fit_start is True and i == 0:
363
359
  if start_this_act is not None and j == 0:
364
360
  pass
@@ -373,12 +369,6 @@ def learn(x_train, y_train, optimizer, template_model, gen, pop_size, fit_start=
373
369
  weight_pop[j] = plan_fit(x_train_batch, y_train_batch, activations=act_pop[j], cuda=cuda, auto_normalization=auto_normalization, dtype=dtype)
374
370
 
375
371
 
376
- if optimizer == 'backprop':
377
- weight_pop[0], current_loss = backprop_update_general(
378
- x_train_batch, y_train_batch, weight_pop[0], act_pop[0],
379
- learning_rate=0.01
380
- )
381
-
382
372
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activations=act_pop[j], activation_potentiations=activation_potentiations[j], auto_normalization=auto_normalization, cuda=cuda, model_type=model_type)
383
373
  acc = model[get_acc()]
384
374
 
@@ -539,9 +529,8 @@ def learn(x_train, y_train, optimizer, template_model, gen, pop_size, fit_start=
539
529
  if model_type == 'PLAN': weight_pop = np.array(weight_pop, copy=False, dtype=dtype)
540
530
  else: weight_pop = np.array(weight_pop, copy=False, dtype=object)
541
531
 
542
- if optimizer != 'backprop':
543
- weight_pop, act_pop = optimizer(weight_pop, act_pop, i, np.array(target_pop, dtype=dtype, copy=False), weight_evolve=weight_evolve, is_mlp=is_mlp, bar_status=False)
544
- target_pop = []
532
+ weight_pop, act_pop = optimizer(weight_pop, act_pop, i, np.array(target_pop, dtype=dtype, copy=False), weight_evolve=weight_evolve, is_mlp=is_mlp, bar_status=False)
533
+ target_pop = []
545
534
 
546
535
  # Early stopping check
547
536
  if early_stop == True and i > 0:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 5.48a0
3
+ Version: 5.49
4
4
  Summary: PyereualJetwork is a GPU-accelerated machine learning library in Python for professionals and researchers. It features PLAN, MLP, Deep Learning training, and ENE (Eugenic NeuroEvolution) for genetic optimization, applicable to genetic algorithms or Reinforcement Learning (RL). The library includes data pre-processing, visualizations, model saving/loading, prediction, evaluation, training, and detailed or simplified memory management.
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -1,29 +1,27 @@
1
- pyerualjetwork/__init__.py,sha256=80ynIu2ljiuU0i-1VfkQ9ZK7LstUGJFRTm_KFWgiKeU,3022
1
+ pyerualjetwork/__init__.py,sha256=mFxYGSQWvUNRiGLa2dCpWcHMskjuUbPZcpgjMKxMq4A,3020
2
+ pyerualjetwork/ene.py,sha256=luTvspHRTose6s3uRas40pNXyKoxU9siaHiMBNI5yoc,42136
2
3
  pyerualjetwork/fitness_functions.py,sha256=D9JVCr9DFid_xXgBD4uCKxdW2k10MVDE5HZRSOK4Igg,1237
3
4
  pyerualjetwork/help.py,sha256=sn9jBzXkQsTZvdgsUXUpSs_BbYYIgY3whofg6dj8peI,848
4
5
  pyerualjetwork/issue_solver.py,sha256=uay_9XK6xWnLmK2P_BeyDQlyNXzg_zYffnXYd228wZk,4102
5
6
  pyerualjetwork/memory_ops.py,sha256=TUFh9SYWCKL6N-vNdWId_EwU313TuZomQCHOrltrD-4,14280
6
- pyerualjetwork/model_ops.py,sha256=39eUKrj0VKYiEYWKcq1U8O0TV_QMrxkuy8IhCHQsEcw,25101
7
- pyerualjetwork/nn.py,sha256=Ag8HEaJxsFdEcormqRgYIkdth7yI20IeqQHmKliiso8,37087
7
+ pyerualjetwork/model_ops.py,sha256=reky09eiECdhuiaWQwz4iMtIxPxKHBNPETGYlNGe2U8,25287
8
+ pyerualjetwork/nn.py,sha256=t1Jf99F6PqfEfCH6erPcwN6q-tF3DPYgHUlQ7OMtnv8,36656
8
9
  pyerualjetwork/old_cpu_model_ops.py,sha256=1KNgjUeYCO_TsA5RtbNiuIiBJzq8-rL2dE6jxKqCBU0,21481
9
10
  pyerualjetwork/old_cuda_model_ops.py,sha256=KAscAd8e_I8Vqdd9BJaHd6-IG6fhxFglAFxys0sqmEo,23079
10
11
  pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
11
12
  pyerualjetwork/cpu/__init__.py,sha256=9apRbPqvGKLJwyI3Md6R5a478YbZ7kIq0dRRa_lqgrY,789
12
- pyerualjetwork/cpu/activation_functions.py,sha256=NH0TDmAS9CRlviXAaC2xa4zUGU_WyZRWjCeWjXV5-N4,7054
13
- pyerualjetwork/cpu/data_ops.py,sha256=SPsIcjU0JPHfsnEmGjD8q-yTlpgYk-KPOPJ44dfp-nU,16143
14
- pyerualjetwork/cpu/loss_functions.py,sha256=w3eQREZBPS0cnHJ4637MlLF5xUsIxZTOtCQhicVH86s,969
13
+ pyerualjetwork/cpu/activation_functions.py,sha256=zZSoOQ452Ykp_RsHVxklxesJmmFgufyIB4F3WQjudEQ,6689
14
+ pyerualjetwork/cpu/data_ops.py,sha256=9fCUrBmAc2WJQ3WkWEkDNSJyPdkkKsYX4rwSEy2TSvc,16108
15
+ pyerualjetwork/cpu/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
15
16
  pyerualjetwork/cpu/metrics.py,sha256=NF8FARAqtuGlf4omVkQT4pOQZy7uePqzuHZGX9Y_Pn4,6076
16
17
  pyerualjetwork/cpu/visualizations.py,sha256=RcEZXX-U3BStOna1-C_a7z2VpXHuLAigeg1pD4u8I9I,26923
17
18
  pyerualjetwork/cuda/__init__.py,sha256=Kja6OmNaJ0giOhRNYw9hgGkh5N4F1EUS2v94E_rmp2k,839
18
19
  pyerualjetwork/cuda/activation_functions.py,sha256=Gj-qalU0GoAWoZzbFFHsD-R0c0KzHwOK1wwUQneBE44,6872
19
- pyerualjetwork/cuda/data_ops.py,sha256=k7NX-ckZ6-NwvioigACUHrekG7L5lO4bzTtQbBwH1Fc,18508
20
+ pyerualjetwork/cuda/data_ops.py,sha256=BEXh4M7BWXaTpYlVS9D2i3CGgOmL5131vy7FZyuTQBA,18453
20
21
  pyerualjetwork/cuda/loss_functions.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
21
22
  pyerualjetwork/cuda/metrics.py,sha256=PjDBoRvr6va8vRvDIJJGBO4-I4uumrk3NCM1Vz4NJTo,5054
22
23
  pyerualjetwork/cuda/visualizations.py,sha256=2mHE7iqqsN3K6xtCnemS4o_YWGS0bIV2IxF4cG6Ur9k,20090
23
- pyerualjetwork/optimizers/__init__.py,sha256=vMHWczFD28ndwipUM4diF5-_9hV1sU4Q_Pi5q6iK9X8,1400
24
- pyerualjetwork/optimizers/backprop.py,sha256=FoUcV_ljMSRpUp4HIt0KhC4cRY-YYeYYPyFzdRu_Fdw,1868
25
- pyerualjetwork/optimizers/ene.py,sha256=JX8hXtd-TpsUca-YrRLbGiUl-Bn2bmBxrkQsX7q7KIo,42144
26
- pyerualjetwork-5.48a0.dist-info/METADATA,sha256=_xHtppTJUQ3zSnUz7pY3nAByTMo5RcnFVZugDFfe1d0,7990
27
- pyerualjetwork-5.48a0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
28
- pyerualjetwork-5.48a0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
29
- pyerualjetwork-5.48a0.dist-info/RECORD,,
24
+ pyerualjetwork-5.49.dist-info/METADATA,sha256=j27spk_SvvDITw89XDgCCPX3oc5SWyaGhDXdMJ4IsNA,7988
25
+ pyerualjetwork-5.49.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
26
+ pyerualjetwork-5.49.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
27
+ pyerualjetwork-5.49.dist-info/RECORD,,
@@ -1,43 +0,0 @@
1
- """
2
-
3
- Optimizers
4
- ==============
5
- PyereualJetwork is a large wide GPU-accelerated machine learning library in Python designed for professionals and researchers.
6
- It features PLAN, MLP Deep Learning and PTNN training, as well as ENE (Eugenic NeuroEvolution) for genetic optimization,
7
- which can also be applied to genetic algorithms or Reinforcement Learning (RL) problems.
8
- The library includes functions for data pre-processing, visualizations, model saving and loading, prediction and evaluation,
9
- training, and both detailed and simplified memory management.
10
-
11
-
12
- PyerualJetwork Main Modules:
13
- ----------------------------
14
- - nn
15
- - ene
16
- - model_ops
17
-
18
- CPU Main Modules:
19
- ---------------------------
20
- - cpu.data_ops
21
-
22
- GPU Main Modules:
23
- ---------------------------
24
- - cuda.data_ops
25
-
26
- Memory Module:
27
- --------------
28
- - memory_ops
29
-
30
- Issue Solver Module:
31
- --------------
32
- - issue_solver
33
-
34
- Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
35
-
36
- PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
37
-
38
- - Creator: Hasan Can Beydili
39
- - YouTube: https://www.youtube.com/@HasanCanBeydili
40
- - Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
41
- - Instagram: https://www.instagram.com/canbeydilj
42
- - Contact: tchasancan@gmail.com
43
- """
@@ -1,48 +0,0 @@
1
- import numpy as np
2
-
3
- from ..cpu.loss_functions import categorical_crossentropy, categorical_crossentropy_derivative
4
- from ..cpu.activation_functions import apply_activation, apply_activation_derivative
5
-
6
- def backprop_update_general(
7
- X, y, weights, activations_list,
8
- learning_rate=0.01
9
- ):
10
- """
11
- X: (batch_size, input_dim)
12
- y: (batch_size, output_dim)
13
- weights: numpy object array, her eleman (in_dim, out_dim) şeklinde ağırlık matrisi
14
- activations_list: her katman için aktivasyon ismi listesi (örn: ['relu', 'sigmoid', 'softmax'])
15
- apply_activation: x ve aktivasyon adı alır, çıktı verir
16
- apply_activation_derivative: x ve aktivasyon adı alır, türev verir
17
- loss_function: y_true ve y_pred alır, loss döner
18
- loss_derivative: y_true ve y_pred alır, d_loss/d_y_pred döner
19
- learning_rate: öğrenme oranı
20
- """
21
- num_layers = len(weights) + 1
22
- activations = [X]
23
- inputs = []
24
-
25
- # Forward pass
26
- for i, w in enumerate(weights):
27
- inp = np.dot(activations[-1], w)
28
- out = apply_activation(inp, activations_list[i])
29
- inputs.append(inp)
30
- activations.append(out)
31
-
32
- y_pred = activations[-1]
33
- loss = categorical_crossentropy(y, y_pred)
34
-
35
- # Calculate output error (using provided derivative)
36
- error = categorical_crossentropy_derivative(y, y_pred)
37
- deltas = [error * apply_activation_derivative(inputs[-1], activations_list[-1])]
38
-
39
- # Backpropagate
40
- for i in reversed(range(len(weights) - 1)):
41
- delta = np.dot(deltas[0], weights[i + 1].T) * apply_activation_derivative(inputs[i], activations_list[i])
42
- deltas.insert(0, delta)
43
-
44
- # Update weights
45
- for i in range(len(weights)):
46
- weights[i] += learning_rate * np.dot(activations[i].T, deltas[i])
47
-
48
- return weights, loss