pyerualjetwork 4.3.3__tar.gz → 4.3.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/PKG-INFO +27 -25
  2. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/README.md +26 -24
  3. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/__init__.py +1 -1
  4. {pyerualjetwork-4.3.3/pyerualjetwork_afterburner → pyerualjetwork-4.3.4/pyerualjetwork}/activation_functions.py +9 -10
  5. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/plan.py +1 -1
  6. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/plan_cuda.py +5 -8
  7. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/planeat.py +1 -1
  8. {pyerualjetwork-4.3.3/pyerualjetwork_afterburner → pyerualjetwork-4.3.4/pyerualjetwork}/visualizations.py +2 -2
  9. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork.egg-info/PKG-INFO +27 -25
  10. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork.egg-info/SOURCES.txt +1 -0
  11. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/__init__.py +1 -1
  12. pyerualjetwork-4.3.4/pyerualjetwork_afterburner/activation_functions.py +290 -0
  13. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/activation_functions_cuda.py +5 -6
  14. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/data_operations_cuda.py +2 -2
  15. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/plan_cuda.py +7 -6
  16. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/planeat_cuda.py +2 -3
  17. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/setup.py +1 -1
  18. pyerualjetwork-4.3.3/pyerualjetwork/activation_functions.py +0 -343
  19. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/activation_functions_cuda.py +0 -0
  20. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/data_operations.py +0 -0
  21. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/data_operations_cuda.py +0 -0
  22. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/help.py +0 -0
  23. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/loss_functions.py +0 -0
  24. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/loss_functions_cuda.py +0 -0
  25. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/memory_operations.py +0 -0
  26. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/metrics.py +0 -0
  27. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/metrics_cuda.py +0 -0
  28. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/model_operations.py +0 -0
  29. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/model_operations_cuda.py +0 -0
  30. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/planeat_cuda.py +0 -0
  31. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/ui.py +0 -0
  32. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork/visualizations_cuda.py +0 -0
  33. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
  34. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork.egg-info/top_level.txt +0 -0
  35. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/data_operations.py +0 -0
  36. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/help.py +0 -0
  37. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/loss_functions.py +0 -0
  38. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/loss_functions_cuda.py +0 -0
  39. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/memory_operations.py +0 -0
  40. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/metrics.py +0 -0
  41. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/metrics_cuda.py +0 -0
  42. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/model_operations.py +0 -0
  43. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/model_operations_cuda.py +0 -0
  44. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/plan.py +0 -0
  45. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/planeat.py +0 -0
  46. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/ui.py +0 -0
  47. {pyerualjetwork-4.3.3/pyerualjetwork → pyerualjetwork-4.3.4/pyerualjetwork_afterburner}/visualizations.py +0 -0
  48. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/pyerualjetwork_afterburner/visualizations_cuda.py +0 -0
  49. {pyerualjetwork-4.3.3 → pyerualjetwork-4.3.4}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.3.3
3
+ Version: 4.3.4
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -23,31 +23,33 @@ PyPi Page: https://pypi.org/project/pyerualjetwork/
23
23
  GitHub Page: https://github.com/HCB06/PyerualJetwork
24
24
 
25
25
 
26
- pip install pyerualjetwork
27
-
28
- 'use if your data small, medium or large:'
26
+ pip install pyerualjetwork
29
27
 
30
- from pyerualjetwork import plan
31
- from pyerualjetwork import planeat
32
- from pyerualjetwork import data_operations
33
- from pyerualjetwork import model_operations
34
-
35
- from pyerualjetwork import plan_cuda
36
- from pyerualjetwork import planeat_cuda
37
- from pyerualjetwork import data_operations_cuda
38
- from pyerualjetwork import model_operations_cuda
39
-
40
- 'use if your data huge: _afterburner package (afterburner package comes with powerful paralellism, afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded. Specially designed for LLM training and other massive model training)'
41
-
42
- from pyerualjetwork_afterburner import plan
43
- from pyerualjetwork_afterburner import planeat
44
- from pyerualjetwork_afterburner import data_operations
45
- from pyerualjetwork_afterburner import model_operations
46
-
47
- from pyerualjetwork_afterburner import plan_cuda
48
- from pyerualjetwork_afterburner import planeat_cuda
49
- from pyerualjetwork_afterburner import data_operations_cuda
50
- from pyerualjetwork_afterburner import model_operations_cuda
28
+ 'use this if your data small or memory management is a problem :'
29
+
30
+ from pyerualjetwork import plan
31
+ from pyerualjetwork import planeat
32
+ from pyerualjetwork import data_operations
33
+ from pyerualjetwork import model_operations
34
+
35
+ from pyerualjetwork import plan_cuda
36
+ from pyerualjetwork import planeat_cuda
37
+ from pyerualjetwork import data_operations_cuda
38
+ from pyerualjetwork import model_operations_cuda
39
+
40
+ 'use this if your data large or memory management is not a problem : _afterburner package (afterburner package comes with powerful paralellism,
41
+ afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded.
42
+ Specially designed for LLM training and other massive model training)'
43
+
44
+ from pyerualjetwork_afterburner import plan
45
+ from pyerualjetwork_afterburner import planeat
46
+ from pyerualjetwork_afterburner import data_operations
47
+ from pyerualjetwork_afterburner import model_operations
48
+
49
+ from pyerualjetwork_afterburner import plan_cuda
50
+ from pyerualjetwork_afterburner import planeat_cuda
51
+ from pyerualjetwork_afterburner import data_operations_cuda
52
+ from pyerualjetwork_afterburner import model_operations_cuda
51
53
 
52
54
  Optimized for Visual Studio Code
53
55
 
@@ -14,31 +14,33 @@ PyPi Page: https://pypi.org/project/pyerualjetwork/
14
14
  GitHub Page: https://github.com/HCB06/PyerualJetwork
15
15
 
16
16
 
17
- pip install pyerualjetwork
18
-
19
- 'use if your data small, medium or large:'
17
+ pip install pyerualjetwork
20
18
 
21
- from pyerualjetwork import plan
22
- from pyerualjetwork import planeat
23
- from pyerualjetwork import data_operations
24
- from pyerualjetwork import model_operations
25
-
26
- from pyerualjetwork import plan_cuda
27
- from pyerualjetwork import planeat_cuda
28
- from pyerualjetwork import data_operations_cuda
29
- from pyerualjetwork import model_operations_cuda
30
-
31
- 'use if your data huge: _afterburner package (afterburner package comes with powerful paralellism, afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded. Specially designed for LLM training and other massive model training)'
32
-
33
- from pyerualjetwork_afterburner import plan
34
- from pyerualjetwork_afterburner import planeat
35
- from pyerualjetwork_afterburner import data_operations
36
- from pyerualjetwork_afterburner import model_operations
37
-
38
- from pyerualjetwork_afterburner import plan_cuda
39
- from pyerualjetwork_afterburner import planeat_cuda
40
- from pyerualjetwork_afterburner import data_operations_cuda
41
- from pyerualjetwork_afterburner import model_operations_cuda
19
+ 'use this if your data small or memory management is a problem :'
20
+
21
+ from pyerualjetwork import plan
22
+ from pyerualjetwork import planeat
23
+ from pyerualjetwork import data_operations
24
+ from pyerualjetwork import model_operations
25
+
26
+ from pyerualjetwork import plan_cuda
27
+ from pyerualjetwork import planeat_cuda
28
+ from pyerualjetwork import data_operations_cuda
29
+ from pyerualjetwork import model_operations_cuda
30
+
31
+ 'use this if your data large or memory management is not a problem : _afterburner package (afterburner package comes with powerful paralellism,
32
+ afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded.
33
+ Specially designed for LLM training and other massive model training)'
34
+
35
+ from pyerualjetwork_afterburner import plan
36
+ from pyerualjetwork_afterburner import planeat
37
+ from pyerualjetwork_afterburner import data_operations
38
+ from pyerualjetwork_afterburner import model_operations
39
+
40
+ from pyerualjetwork_afterburner import plan_cuda
41
+ from pyerualjetwork_afterburner import planeat_cuda
42
+ from pyerualjetwork_afterburner import data_operations_cuda
43
+ from pyerualjetwork_afterburner import model_operations_cuda
42
44
 
43
45
  Optimized for Visual Studio Code
44
46
 
@@ -1,4 +1,4 @@
1
- __version__ = "4.3.3"
1
+ __version__ = "4.3.4"
2
2
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
3
3
 
4
4
  def print_version(__version__):
@@ -217,8 +217,8 @@ def scaled_cubic(x, alpha=1.0):
217
217
 
218
218
  def sine_offset(x, beta=0.0):
219
219
  return np.sin(x + beta)
220
-
221
-
220
+
221
+
222
222
  def apply_activation(Input, activation_list):
223
223
  """
224
224
  Applies activation functions for inputs
@@ -277,15 +277,14 @@ def apply_activation(Input, activation_list):
277
277
  }
278
278
 
279
279
  try:
280
- valid_activations = [act for act in activation_list if act in activation_functions]
281
-
282
- activation_outputs = np.stack([activation_functions[act](origin_input)
283
- for act in valid_activations])
284
280
 
285
- result = Input + np.sum(activation_outputs, axis=0)
286
-
287
- return result
281
+ valid_mask = np.array([act in activation_functions for act in activation_list])
282
+ valid_activations = np.array(activation_list)[valid_mask]
283
+
284
+ activation_outputs = np.array([activation_functions[act](origin_input) for act in valid_activations])
285
+
286
+ return Input + np.sum(activation_outputs, axis=0)
288
287
 
289
288
  except Exception as e:
290
289
  warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
291
- return Input
290
+ return Input
@@ -249,7 +249,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
249
249
 
250
250
  """
251
251
 
252
- from .planeat import define_genomes
252
+ from planeat import define_genomes
253
253
 
254
254
  data = 'Train'
255
255
 
@@ -48,7 +48,6 @@ bar_format_normal = loading_bars()[0]
48
48
  bar_format_learner = loading_bars()[1]
49
49
 
50
50
  # BUILD -----
51
-
52
51
  def fit(
53
52
  x_train,
54
53
  y_train,
@@ -265,7 +264,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
265
264
 
266
265
  """
267
266
 
268
- from .planeat_cuda import define_genomes
267
+ from planeat_cuda import define_genomes
269
268
 
270
269
  data = 'Train'
271
270
 
@@ -278,13 +277,13 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
278
277
  x_train = transfer_to_gpu(x_train, dtype=dtype)
279
278
  y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
280
279
 
281
- from .data_operations_cuda import batcher
280
+ from data_operations_cuda import batcher
282
281
 
283
282
  elif memory == 'cpu':
284
283
  x_train = transfer_to_cpu(x_train, dtype=dtype)
285
284
  y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
286
285
 
287
- from .data_operations import batcher
286
+ from data_operations import batcher
288
287
 
289
288
  else:
290
289
  raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
@@ -336,13 +335,12 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
336
335
  for j in range(activation_potentiation_len):
337
336
 
338
337
  x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
339
-
340
338
  if fit_start is True and i == 0:
341
339
  act_pop.append(activation_potentiation[j])
342
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
340
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], auto_normalization=auto_normalization, train_bar=False, dtype=dtype, memory=memory)
343
341
  weight_pop.append(W)
344
342
 
345
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
343
+ model = evaluate(x_train_batch, y_train_batch, weight_pop[j], act_pop[j], loading_bar_status=False, dtype=dtype, memory=memory)
346
344
  acc = model[get_acc()]
347
345
 
348
346
  if strategy == 'accuracy': target_pop.append(acc)
@@ -562,7 +560,6 @@ def feed_forward(
562
560
  neural_layer = cp.dot(w, Input)
563
561
 
564
562
  return neural_layer
565
-
566
563
 
567
564
  def evaluate(
568
565
  x_test,
@@ -20,7 +20,7 @@ import math
20
20
  from .plan import feed_forward
21
21
  from .data_operations import normalization
22
22
  from .ui import loading_bars, initialize_loading_bar
23
- from .activation_functions import apply_activation, all_activations
23
+ from. activation_functions import apply_activation, all_activations
24
24
 
25
25
  def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
26
26
  """
@@ -617,7 +617,7 @@ def update_neuron_history(LTPW, ax1, row, col, class_count, artist5, fig1, acc=F
617
617
 
618
618
  def initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train):
619
619
  """Initializes the visualization setup based on the parameters."""
620
- from .data_operations import find_closest_factors
620
+ from data_operations import find_closest_factors
621
621
  visualization_objects = {}
622
622
 
623
623
  if show_training or neurons_history:
@@ -704,7 +704,7 @@ def show():
704
704
 
705
705
  def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
706
706
  """Initialize all visualization components"""
707
- from .data_operations import find_closest_factors
707
+ from data_operations import find_closest_factors
708
708
  viz_objects = {}
709
709
 
710
710
  if show_history:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.3.3
3
+ Version: 4.3.4
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -23,31 +23,33 @@ PyPi Page: https://pypi.org/project/pyerualjetwork/
23
23
  GitHub Page: https://github.com/HCB06/PyerualJetwork
24
24
 
25
25
 
26
- pip install pyerualjetwork
27
-
28
- 'use if your data small, medium or large:'
26
+ pip install pyerualjetwork
29
27
 
30
- from pyerualjetwork import plan
31
- from pyerualjetwork import planeat
32
- from pyerualjetwork import data_operations
33
- from pyerualjetwork import model_operations
34
-
35
- from pyerualjetwork import plan_cuda
36
- from pyerualjetwork import planeat_cuda
37
- from pyerualjetwork import data_operations_cuda
38
- from pyerualjetwork import model_operations_cuda
39
-
40
- 'use if your data huge: _afterburner package (afterburner package comes with powerful paralellism, afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded. Specially designed for LLM training and other massive model training)'
41
-
42
- from pyerualjetwork_afterburner import plan
43
- from pyerualjetwork_afterburner import planeat
44
- from pyerualjetwork_afterburner import data_operations
45
- from pyerualjetwork_afterburner import model_operations
46
-
47
- from pyerualjetwork_afterburner import plan_cuda
48
- from pyerualjetwork_afterburner import planeat_cuda
49
- from pyerualjetwork_afterburner import data_operations_cuda
50
- from pyerualjetwork_afterburner import model_operations_cuda
28
+ 'use this if your data small or memory management is a problem :'
29
+
30
+ from pyerualjetwork import plan
31
+ from pyerualjetwork import planeat
32
+ from pyerualjetwork import data_operations
33
+ from pyerualjetwork import model_operations
34
+
35
+ from pyerualjetwork import plan_cuda
36
+ from pyerualjetwork import planeat_cuda
37
+ from pyerualjetwork import data_operations_cuda
38
+ from pyerualjetwork import model_operations_cuda
39
+
40
+ 'use this if your data large or memory management is not a problem : _afterburner package (afterburner package comes with powerful paralellism,
41
+ afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded.
42
+ Specially designed for LLM training and other massive model training)'
43
+
44
+ from pyerualjetwork_afterburner import plan
45
+ from pyerualjetwork_afterburner import planeat
46
+ from pyerualjetwork_afterburner import data_operations
47
+ from pyerualjetwork_afterburner import model_operations
48
+
49
+ from pyerualjetwork_afterburner import plan_cuda
50
+ from pyerualjetwork_afterburner import planeat_cuda
51
+ from pyerualjetwork_afterburner import data_operations_cuda
52
+ from pyerualjetwork_afterburner import model_operations_cuda
51
53
 
52
54
  Optimized for Visual Studio Code
53
55
 
@@ -1,4 +1,5 @@
1
1
  README.md
2
+ setup.cfg
2
3
  setup.py
3
4
  pyerualjetwork/__init__.py
4
5
  pyerualjetwork/activation_functions.py
@@ -1,4 +1,4 @@
1
- __version__ = "4.3.2.2b0-afterburner"
1
+ __version__ = "4.3.4-afterburner"
2
2
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
3
3
 
4
4
  def print_version(__version__):
@@ -0,0 +1,290 @@
1
+ import numpy as np
2
+ from scipy.special import expit, softmax
3
+ import warnings
4
+
5
+
6
+ # ACTIVATION FUNCTIONS -----
7
+
8
+ def all_activations():
9
+
10
+ activations_list = ['linear', 'sigmoid', 'relu', 'tanh', 'circular', 'spiral', 'swish', 'sin_plus', 'mod_circular', 'tanh_circular', 'leaky_relu', 'softplus', 'elu', 'gelu', 'selu', 'sinakt', 'p_squared', 'sglu', 'dlrelu', 'exsig', 'acos', 'gla', 'srelu', 'qelu', 'isra', 'waveakt', 'arctan', 'bent_identity', 'sech', 'softsign', 'pwl', 'cubic', 'gaussian', 'sine', 'tanh_square', 'mod_sigmoid', 'quartic', 'square_quartic', 'cubic_quadratic', 'exp_cubic', 'sine_square', 'logarithmic', 'scaled_cubic', 'sine_offset']
11
+
12
+ return activations_list
13
+
14
+ def spiral_activation(x):
15
+
16
+ r = np.sqrt(np.sum(x**2))
17
+
18
+ theta = np.arctan2(x[1:], x[:-1])
19
+
20
+ spiral_x = r * np.cos(theta + r)
21
+ spiral_y = r * np.sin(theta + r)
22
+
23
+
24
+ spiral_output = np.concatenate(([spiral_x[0]], spiral_y))
25
+
26
+ return spiral_output
27
+
28
+
29
+ def Softmax(
30
+ x # num: Input data to be transformed using softmax function.
31
+ ):
32
+ """
33
+ Applies the softmax function to the input data.
34
+
35
+ Args:
36
+ (num): Input data to be transformed using softmax function.
37
+
38
+ Returns:
39
+ (num): Transformed data after applying softmax function.
40
+ """
41
+
42
+ return softmax(x)
43
+
44
+
45
+ def Sigmoid(
46
+ x # num: Input data to be transformed using sigmoid function.
47
+ ):
48
+ """
49
+ Applies the sigmoid function to the input data.
50
+
51
+ Args:
52
+ (num): Input data to be transformed using sigmoid function.
53
+
54
+ Returns:
55
+ (num): Transformed data after applying sigmoid function.
56
+ """
57
+ return expit(x)
58
+
59
+
60
+ def Relu(
61
+ x # num: Input data to be transformed using ReLU function.
62
+ ):
63
+ """
64
+ Applies the Rectified Linear Unit (ReLU) function to the input data.
65
+
66
+ Args:
67
+ (num): Input data to be transformed using ReLU function.
68
+
69
+ Returns:
70
+ (num): Transformed data after applying ReLU function.
71
+ """
72
+
73
+ return np.maximum(0, x)
74
+
75
+
76
+ def tanh(x):
77
+ return np.tanh(x)
78
+
79
+ def swish(x):
80
+ return x * (1 / (1 + np.exp(-x)))
81
+
82
+ def sin_plus(x):
83
+ return (np.sin(x) + 1) / 2
84
+
85
+ def modular_circular_activation(x, period=2*np.pi):
86
+ return np.mod(x, period) / period
87
+
88
+ def tanh_circular_activation(x):
89
+ return (np.tanh(x) + 1) / 2
90
+
91
+ def leaky_relu(x, alpha=0.01):
92
+ return np.where(x > 0, x, alpha * x)
93
+
94
+ def softplus(x):
95
+ return np.log(1 + np.exp(x))
96
+
97
+ def elu(x, alpha=1.0):
98
+ return np.where(x > 0, x, alpha * (np.exp(x) - 1))
99
+
100
+ def gelu(x):
101
+ return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
102
+
103
+ def selu(x, lambda_=1.0507, alpha=1.6733):
104
+ return lambda_ * np.where(x > 0, x, alpha * (np.exp(x) - 1))
105
+
106
+ def sinakt(x):
107
+ return np.sin(x) + np.cos(x)
108
+
109
+ def p_squared(x, alpha=1.0, beta=0.0):
110
+ return alpha * x**2 + beta * x
111
+
112
+ def sglu(x, alpha=1.0):
113
+ return softmax(alpha * x) * x
114
+
115
+ # 4. Double Leaky ReLU (DLReLU)
116
+ def dlrelu(x):
117
+ return np.maximum(0.01 * x, x) + np.minimum(0.01 * x, 0.1 * x)
118
+
119
+ # 5. Exponential Sigmoid (ExSig)
120
+ def exsig(x):
121
+ return 1 / (1 + np.exp(-x**2))
122
+
123
+ # 6. Adaptive Cosine Activation (ACos)
124
+ def acos(x, alpha=1.0, beta=0.0):
125
+ return np.cos(alpha * x + beta)
126
+
127
+ # 7. Gaussian-like Activation (GLA)
128
+ def gla(x, alpha=1.0, mu=0.0):
129
+ return np.exp(-alpha * (x - mu)**2)
130
+
131
+ # 8. Swish ReLU (SReLU)
132
+ def srelu(x):
133
+ return x * (1 / (1 + np.exp(-x))) + np.maximum(0, x)
134
+
135
+ # 9. Quadratic Exponential Linear Unit (QELU)
136
+ def qelu(x):
137
+ return x**2 * np.exp(x) - 1
138
+
139
+ # 10. Inverse Square Root Activation (ISRA)
140
+ def isra(x):
141
+ return x / np.sqrt(np.abs(x) + 1)
142
+
143
+ def waveakt(x, alpha=1.0, beta=2.0, gamma=3.0):
144
+ return np.sin(alpha * x) * np.cos(beta * x) * np.sin(gamma * x)
145
+
146
+ def arctan(x):
147
+ return np.arctan(x)
148
+
149
+ def bent_identity(x):
150
+ return (np.sqrt(x**2 + 1) - 1) / 2 + x
151
+
152
+ def circular_activation(x, scale=2.0, frequency=1.0, shift=0.0):
153
+
154
+ n_features = x.shape[0]
155
+
156
+ circular_output = np.zeros_like(x)
157
+
158
+ for i in range(n_features):
159
+
160
+ r = np.sqrt(np.sum(x**2))
161
+ theta = 2 * np.pi * (i / n_features) + shift
162
+
163
+ circular_x = r * np.cos(theta + frequency * r) * scale
164
+ circular_y = r * np.sin(theta + frequency * r) * scale
165
+
166
+ if i % 2 == 0:
167
+ circular_output[i] = circular_x
168
+ else:
169
+ circular_output[i] = circular_y
170
+
171
+ return circular_output
172
+
173
+ def sech(x):
174
+ return 2 / (np.exp(x) + np.exp(-x))
175
+
176
+ def softsign(x):
177
+ return x / (1 + np.abs(x))
178
+
179
+ def pwl(x, alpha=0.5, beta=1.5):
180
+ return np.where(x <= 0, alpha * x, beta * x)
181
+
182
+ def cubic(x):
183
+ return x**3
184
+
185
+ def gaussian(x, alpha=1.0, mu=0.0):
186
+ return np.exp(-alpha * (x - mu)**2)
187
+
188
+ def sine(x, alpha=1.0):
189
+ return np.sin(alpha * x)
190
+
191
+ def tanh_square(x):
192
+ return np.tanh(x)**2
193
+
194
+ def mod_sigmoid(x, alpha=1.0, beta=0.0):
195
+ return 1 / (1 + np.exp(-alpha * x + beta))
196
+
197
+ def quartic(x):
198
+ return x**4
199
+
200
+ def square_quartic(x):
201
+ return (x**2)**2
202
+
203
+ def cubic_quadratic(x):
204
+ return x**3 * (x**2)
205
+
206
+ def exp_cubic(x):
207
+ return np.exp(x**3)
208
+
209
+ def sine_square(x):
210
+ return np.sin(x)**2
211
+
212
+ def logarithmic(x):
213
+ return np.log(x**2 + 1)
214
+
215
+ def scaled_cubic(x, alpha=1.0):
216
+ return alpha * x**3
217
+
218
+ def sine_offset(x, beta=0.0):
219
+ return np.sin(x + beta)
220
+
221
+
222
+ def apply_activation(Input, activation_list):
223
+ """
224
+ Applies activation functions for inputs
225
+
226
+ Args:
227
+ Input (numpy.ndarray):
228
+ activation_list (list):
229
+ """
230
+ origin_input = np.copy(Input)
231
+
232
+ activation_functions = {
233
+ 'sigmoid': Sigmoid,
234
+ 'swish': swish,
235
+ 'mod_circular': modular_circular_activation,
236
+ 'tanh_circular': tanh_circular_activation,
237
+ 'leaky_relu': leaky_relu,
238
+ 'relu': Relu,
239
+ 'softplus': softplus,
240
+ 'elu': elu,
241
+ 'gelu': gelu,
242
+ 'selu': selu,
243
+ 'tanh': tanh,
244
+ 'sinakt': sinakt,
245
+ 'p_squared': p_squared,
246
+ 'sglu': lambda x: sglu(x, alpha=1.0),
247
+ 'dlrelu': dlrelu,
248
+ 'exsig': exsig,
249
+ 'sin_plus': sin_plus,
250
+ 'acos': lambda x: acos(x, alpha=1.0, beta=0.0),
251
+ 'gla': lambda x: gla(x, alpha=1.0, mu=0.0),
252
+ 'srelu': srelu,
253
+ 'qelu': qelu,
254
+ 'isra': isra,
255
+ 'waveakt': waveakt,
256
+ 'arctan': arctan,
257
+ 'bent_identity': bent_identity,
258
+ 'sech': sech,
259
+ 'softsign': softsign,
260
+ 'pwl': pwl,
261
+ 'cubic': cubic,
262
+ 'gaussian': gaussian,
263
+ 'sine': sine,
264
+ 'tanh_square': tanh_square,
265
+ 'mod_sigmoid': mod_sigmoid,
266
+ 'linear': lambda x: x,
267
+ 'quartic': quartic,
268
+ 'square_quartic': square_quartic,
269
+ 'cubic_quadratic': cubic_quadratic,
270
+ 'exp_cubic': exp_cubic,
271
+ 'sine_square': sine_square,
272
+ 'logarithmic': logarithmic,
273
+ 'scaled_cubic': lambda x: scaled_cubic(x, 1.0),
274
+ 'sine_offset': lambda x: sine_offset(x, 1.0),
275
+ 'spiral': spiral_activation,
276
+ 'circular': circular_activation
277
+ }
278
+
279
+ try:
280
+
281
+ valid_mask = np.array([act in activation_functions for act in activation_list])
282
+ valid_activations = np.array(activation_list)[valid_mask]
283
+
284
+ activation_outputs = np.array([activation_functions[act](origin_input) for act in valid_activations])
285
+
286
+ return Input + np.sum(activation_outputs, axis=0)
287
+
288
+ except Exception as e:
289
+ warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
290
+ return Input
@@ -276,14 +276,13 @@ def apply_activation(Input, activation_list):
276
276
  }
277
277
 
278
278
  try:
279
- valid_activations = [act for act in activation_list if act in activation_functions]
280
279
 
281
- activation_outputs = cp.stack([activation_functions[act](origin_input)
282
- for act in valid_activations])
280
+ valid_mask = cp.array([act in activation_functions for act in activation_list])
281
+ valid_activations = cp.array(activation_list)[valid_mask]
283
282
 
284
- result = Input + cp.sum(activation_outputs, axis=0)
285
-
286
- return result
283
+ activation_outputs = cp.array([activation_functions[act](origin_input) for act in valid_activations])
284
+
285
+ return Input + cp.sum(activation_outputs, axis=0)
287
286
 
288
287
  except Exception as e:
289
288
  warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
@@ -17,7 +17,7 @@ def encode_one_hot(y_train, y_test=None, summary=False):
17
17
  tuple: One-hot encoded y_train and (if given) y_test.
18
18
  """
19
19
 
20
- from .memory_operations import optimize_labels, transfer_to_cpu
20
+ from memory_operations import optimize_labels, transfer_to_cpu
21
21
 
22
22
  y_train = optimize_labels(y_train, one_hot_encoded=False, cuda=True)
23
23
  y_test = optimize_labels(y_test, one_hot_encoded=False, cuda=True)
@@ -401,7 +401,7 @@ def standard_scaler(x_train=None, x_test=None, scaler_params=None, dtype=cp.floa
401
401
 
402
402
  return scaled_data # sample data scaled
403
403
 
404
-
404
+
405
405
  def normalization(
406
406
  Input, # num: Input data to be normalized.
407
407
  dtype=cp.float32
@@ -23,7 +23,7 @@ from .data_operations_cuda import normalization
23
23
  from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
24
24
  from .activation_functions_cuda import apply_activation, all_activations
25
25
  from .metrics_cuda import metrics
26
- from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
26
+ from model_operations_cuda import get_acc, get_preds, get_preds_softmax
27
27
  from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
28
28
  from .visualizations_cuda import (
29
29
  draw_neural_web,
@@ -151,11 +151,12 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
151
151
 
152
152
  """
153
153
 
154
- from .planeat_cuda import define_genomes
154
+ from planeat_cuda import define_genomes
155
155
 
156
156
  data = 'Train'
157
157
 
158
- activation_potentiation = all_activations()
158
+ activation_potentiation = all_activations()
159
+
159
160
  activation_potentiation_len = len(activation_potentiation)
160
161
 
161
162
  y_train = optimize_labels(y_train, cuda=True)
@@ -164,13 +165,13 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
164
165
  x_train = transfer_to_gpu(x_train, dtype=dtype)
165
166
  y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
166
167
 
167
- from .data_operations_cuda import batcher
168
+ from data_operations_cuda import batcher
168
169
 
169
170
  elif memory == 'cpu':
170
171
  x_train = transfer_to_cpu(x_train, dtype=dtype)
171
172
  y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
172
173
 
173
- from .data_operations import batcher
174
+ from data_operations import batcher
174
175
 
175
176
  else:
176
177
  raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
@@ -228,7 +229,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
228
229
  W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], dtype=dtype)
229
230
  weight_pop.append(W)
230
231
 
231
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j])
232
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
232
233
  acc = model[get_acc()]
233
234
 
234
235
  if strategy == 'accuracy': target_pop.append(acc)
@@ -721,9 +721,8 @@ def mutation(weight,
721
721
  max_threshold = len(activations)
722
722
 
723
723
  new_threshold = threshold
724
-
725
- except_this = ['spiral', 'circular']
726
- all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
724
+
725
+ all_acts = all_activations()
727
726
 
728
727
  activation_add_prob = 1 - activation_add_prob
729
728
  activation_delete_prob = 1 - activation_delete_prob
@@ -6,7 +6,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
6
6
  # Setting Up
7
7
  setup(
8
8
  name="pyerualjetwork",
9
- version="4.3.3",
9
+ version="4.3.4",
10
10
  author="Hasan Can Beydili",
11
11
  author_email="tchasancan@gmail.com",
12
12
  description=(
@@ -1,343 +0,0 @@
1
- import numpy as np
2
- from scipy.special import expit, softmax
3
- import warnings
4
-
5
-
6
- # ACTIVATION FUNCTIONS -----
7
-
8
- def all_activations():
9
-
10
- activations_list = ['linear', 'sigmoid', 'relu', 'tanh', 'circular', 'spiral', 'swish', 'sin_plus', 'mod_circular', 'tanh_circular', 'leaky_relu', 'softplus', 'elu', 'gelu', 'selu', 'sinakt', 'p_squared', 'sglu', 'dlrelu', 'exsig', 'acos', 'gla', 'srelu', 'qelu', 'isra', 'waveakt', 'arctan', 'bent_identity', 'sech', 'softsign', 'pwl', 'cubic', 'gaussian', 'sine', 'tanh_square', 'mod_sigmoid', 'quartic', 'square_quartic', 'cubic_quadratic', 'exp_cubic', 'sine_square', 'logarithmic', 'scaled_cubic', 'sine_offset']
11
-
12
- return activations_list
13
-
14
- def spiral_activation(x):
15
-
16
- r = np.sqrt(np.sum(x**2))
17
-
18
- theta = np.arctan2(x[1:], x[:-1])
19
-
20
- spiral_x = r * np.cos(theta + r)
21
- spiral_y = r * np.sin(theta + r)
22
-
23
-
24
- spiral_output = np.concatenate(([spiral_x[0]], spiral_y))
25
-
26
- return spiral_output
27
-
28
-
29
- def Softmax(
30
- x # num: Input data to be transformed using softmax function.
31
- ):
32
- """
33
- Applies the softmax function to the input data.
34
-
35
- Args:
36
- (num): Input data to be transformed using softmax function.
37
-
38
- Returns:
39
- (num): Transformed data after applying softmax function.
40
- """
41
-
42
- return softmax(x)
43
-
44
-
45
- def Sigmoid(
46
- x # num: Input data to be transformed using sigmoid function.
47
- ):
48
- """
49
- Applies the sigmoid function to the input data.
50
-
51
- Args:
52
- (num): Input data to be transformed using sigmoid function.
53
-
54
- Returns:
55
- (num): Transformed data after applying sigmoid function.
56
- """
57
- return expit(x)
58
-
59
-
60
- def Relu(
61
- x # num: Input data to be transformed using ReLU function.
62
- ):
63
- """
64
- Applies the Rectified Linear Unit (ReLU) function to the input data.
65
-
66
- Args:
67
- (num): Input data to be transformed using ReLU function.
68
-
69
- Returns:
70
- (num): Transformed data after applying ReLU function.
71
- """
72
-
73
- return np.maximum(0, x)
74
-
75
-
76
- def tanh(x):
77
- return np.tanh(x)
78
-
79
- def swish(x):
80
- return x * (1 / (1 + np.exp(-x)))
81
-
82
- def sin_plus(x):
83
- return (np.sin(x) + 1) / 2
84
-
85
- def modular_circular_activation(x, period=2*np.pi):
86
- return np.mod(x, period) / period
87
-
88
- def tanh_circular_activation(x):
89
- return (np.tanh(x) + 1) / 2
90
-
91
- def leaky_relu(x, alpha=0.01):
92
- return np.where(x > 0, x, alpha * x)
93
-
94
- def softplus(x):
95
- return np.log(1 + np.exp(x))
96
-
97
- def elu(x, alpha=1.0):
98
- return np.where(x > 0, x, alpha * (np.exp(x) - 1))
99
-
100
- def gelu(x):
101
- return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
102
-
103
- def selu(x, lambda_=1.0507, alpha=1.6733):
104
- return lambda_ * np.where(x > 0, x, alpha * (np.exp(x) - 1))
105
-
106
- def sinakt(x):
107
- return np.sin(x) + np.cos(x)
108
-
109
- def p_squared(x, alpha=1.0, beta=0.0):
110
- return alpha * x**2 + beta * x
111
-
112
- def sglu(x, alpha=1.0):
113
- return softmax(alpha * x) * x
114
-
115
- # 4. Double Leaky ReLU (DLReLU)
116
- def dlrelu(x):
117
- return np.maximum(0.01 * x, x) + np.minimum(0.01 * x, 0.1 * x)
118
-
119
- # 5. Exponential Sigmoid (ExSig)
120
- def exsig(x):
121
- return 1 / (1 + np.exp(-x**2))
122
-
123
- # 6. Adaptive Cosine Activation (ACos)
124
- def acos(x, alpha=1.0, beta=0.0):
125
- return np.cos(alpha * x + beta)
126
-
127
- # 7. Gaussian-like Activation (GLA)
128
- def gla(x, alpha=1.0, mu=0.0):
129
- return np.exp(-alpha * (x - mu)**2)
130
-
131
- # 8. Swish ReLU (SReLU)
132
- def srelu(x):
133
- return x * (1 / (1 + np.exp(-x))) + np.maximum(0, x)
134
-
135
- # 9. Quadratic Exponential Linear Unit (QELU)
136
- def qelu(x):
137
- return x**2 * np.exp(x) - 1
138
-
139
- # 10. Inverse Square Root Activation (ISRA)
140
- def isra(x):
141
- return x / np.sqrt(np.abs(x) + 1)
142
-
143
- def waveakt(x, alpha=1.0, beta=2.0, gamma=3.0):
144
- return np.sin(alpha * x) * np.cos(beta * x) * np.sin(gamma * x)
145
-
146
- def arctan(x):
147
- return np.arctan(x)
148
-
149
- def bent_identity(x):
150
- return (np.sqrt(x**2 + 1) - 1) / 2 + x
151
-
152
- def circular_activation(x, scale=2.0, frequency=1.0, shift=0.0):
153
-
154
- n_features = x.shape[0]
155
-
156
- circular_output = np.zeros_like(x)
157
-
158
- for i in range(n_features):
159
-
160
- r = np.sqrt(np.sum(x**2))
161
- theta = 2 * np.pi * (i / n_features) + shift
162
-
163
- circular_x = r * np.cos(theta + frequency * r) * scale
164
- circular_y = r * np.sin(theta + frequency * r) * scale
165
-
166
- if i % 2 == 0:
167
- circular_output[i] = circular_x
168
- else:
169
- circular_output[i] = circular_y
170
-
171
- return circular_output
172
-
173
- def sech(x):
174
- return 2 / (np.exp(x) + np.exp(-x))
175
-
176
- def softsign(x):
177
- return x / (1 + np.abs(x))
178
-
179
- def pwl(x, alpha=0.5, beta=1.5):
180
- return np.where(x <= 0, alpha * x, beta * x)
181
-
182
- def cubic(x):
183
- return x**3
184
-
185
- def gaussian(x, alpha=1.0, mu=0.0):
186
- return np.exp(-alpha * (x - mu)**2)
187
-
188
- def sine(x, alpha=1.0):
189
- return np.sin(alpha * x)
190
-
191
- def tanh_square(x):
192
- return np.tanh(x)**2
193
-
194
- def mod_sigmoid(x, alpha=1.0, beta=0.0):
195
- return 1 / (1 + np.exp(-alpha * x + beta))
196
-
197
- def quartic(x):
198
- return x**4
199
-
200
- def square_quartic(x):
201
- return (x**2)**2
202
-
203
- def cubic_quadratic(x):
204
- return x**3 * (x**2)
205
-
206
- def exp_cubic(x):
207
- return np.exp(x**3)
208
-
209
- def sine_square(x):
210
- return np.sin(x)**2
211
-
212
- def logarithmic(x):
213
- return np.log(x**2 + 1)
214
-
215
- def scaled_cubic(x, alpha=1.0):
216
- return alpha * x**3
217
-
218
- def sine_offset(x, beta=0.0):
219
- return np.sin(x + beta)
220
-
221
-
222
- def safe_add(current_sum, new_value):
223
- try:
224
- return current_sum + new_value
225
- except OverflowError:
226
- return np.array(current_sum) + np.array(new_value)
227
-
228
-
229
- def apply_activation(Input, activation_list):
230
- """
231
- Applies a sequence of activation functions to the input.
232
-
233
- Args:
234
- Input (numpy.ndarray): The input to apply activations to.
235
- activation_list (list): A list of activation function names to apply.
236
-
237
- Returns:
238
- numpy.ndarray: The input after all activations have been applied.
239
- """
240
-
241
- origin_input = np.copy(Input)
242
-
243
- for i in range(len(activation_list)):
244
- try:
245
- if activation_list[i] == 'sigmoid':
246
- Input = safe_add(Input, Sigmoid(origin_input))
247
- elif activation_list[i] == 'swish':
248
- Input = safe_add(Input, swish(origin_input))
249
- elif activation_list[i] == 'mod_circular':
250
- Input = safe_add(Input, modular_circular_activation(origin_input))
251
- elif activation_list[i] == 'tanh_circular':
252
- Input = safe_add(Input, tanh_circular_activation(origin_input))
253
- elif activation_list[i] == 'leaky_relu':
254
- Input = safe_add(Input, leaky_relu(origin_input))
255
- elif activation_list[i] == 'relu':
256
- Input = safe_add(Input, Relu(origin_input))
257
- elif activation_list[i] == 'softplus':
258
- Input = safe_add(Input, softplus(origin_input))
259
- elif activation_list[i] == 'elu':
260
- Input = safe_add(Input, elu(origin_input))
261
- elif activation_list[i] == 'gelu':
262
- Input = safe_add(Input, gelu(origin_input))
263
- elif activation_list[i] == 'selu':
264
- Input = safe_add(Input, selu(origin_input))
265
- elif activation_list[i] == 'tanh':
266
- Input = safe_add(Input, tanh(origin_input))
267
- elif activation_list[i] == 'sinakt':
268
- Input = safe_add(Input, sinakt(origin_input))
269
- elif activation_list[i] == 'p_squared':
270
- Input = safe_add(Input, p_squared(origin_input))
271
- elif activation_list[i] == 'sglu':
272
- Input = safe_add(Input, sglu(origin_input, alpha=1.0))
273
- elif activation_list[i] == 'dlrelu':
274
- Input = safe_add(Input, dlrelu(origin_input))
275
- elif activation_list[i] == 'exsig':
276
- Input = safe_add(Input, exsig(origin_input))
277
- elif activation_list[i] == 'sin_plus':
278
- Input = safe_add(Input, sin_plus(origin_input))
279
- elif activation_list[i] == 'acos':
280
- Input = safe_add(Input, acos(origin_input, alpha=1.0, beta=0.0))
281
- elif activation_list[i] == 'gla':
282
- Input = safe_add(Input, gla(origin_input, alpha=1.0, mu=0.0))
283
- elif activation_list[i] == 'srelu':
284
- Input = safe_add(Input, srelu(origin_input))
285
- elif activation_list[i] == 'qelu':
286
- Input = safe_add(Input, qelu(origin_input))
287
- elif activation_list[i] == 'isra':
288
- Input = safe_add(Input, isra(origin_input))
289
- elif activation_list[i] == 'waveakt':
290
- Input = safe_add(Input, waveakt(origin_input))
291
- elif activation_list[i] == 'arctan':
292
- Input = safe_add(Input, arctan(origin_input))
293
- elif activation_list[i] == 'bent_identity':
294
- Input = safe_add(Input, bent_identity(origin_input))
295
- elif activation_list[i] == 'sech':
296
- Input = safe_add(Input, sech(origin_input))
297
- elif activation_list[i] == 'softsign':
298
- Input = safe_add(Input, softsign(origin_input))
299
- elif activation_list[i] == 'pwl':
300
- Input = safe_add(Input, pwl(origin_input))
301
- elif activation_list[i] == 'cubic':
302
- Input = safe_add(Input, cubic(origin_input))
303
- elif activation_list[i] == 'gaussian':
304
- Input = safe_add(Input, gaussian(origin_input))
305
- elif activation_list[i] == 'sine':
306
- Input = safe_add(Input, sine(origin_input))
307
- elif activation_list[i] == 'tanh_square':
308
- Input = safe_add(Input, tanh_square(origin_input))
309
- elif activation_list[i] == 'mod_sigmoid':
310
- Input = safe_add(Input, mod_sigmoid(origin_input))
311
- elif activation_list[i] == 'linear':
312
- Input = safe_add(Input, origin_input)
313
- elif activation_list[i] == 'quartic':
314
- Input = safe_add(Input, quartic(origin_input))
315
- elif activation_list[i] == 'square_quartic':
316
- Input = safe_add(Input, square_quartic(origin_input))
317
- elif activation_list[i] == 'cubic_quadratic':
318
- Input = safe_add(Input, cubic_quadratic(origin_input))
319
- elif activation_list[i] == 'exp_cubic':
320
- Input = safe_add(Input, exp_cubic(origin_input))
321
- elif activation_list[i] == 'sine_square':
322
- Input = safe_add(Input, sine_square(origin_input))
323
- elif activation_list[i] == 'logarithmic':
324
- Input = safe_add(Input, logarithmic(origin_input))
325
- elif activation_list[i] == 'scaled_cubic':
326
- Input = safe_add(Input, scaled_cubic(origin_input, 1.0))
327
- elif activation_list[i] == 'sine_offset':
328
- Input = safe_add(Input, sine_offset(origin_input, 1.0))
329
- elif activation_list[i] == 'spiral':
330
- Input = safe_add(Input, spiral_activation(origin_input))
331
- elif activation_list[i] == 'circular':
332
- Input = safe_add(Input, circular_activation(origin_input))
333
-
334
-
335
- except Exception as e:
336
- warnings.warn(f"Error in activation {activation_list[i]}: {str(e)}", RuntimeWarning)
337
- if not isinstance(Input, np.ndarray):
338
- Input = np.array(Input)
339
- if not isinstance(origin_input, np.ndarray):
340
- origin_input = np.array(origin_input)
341
- continue
342
-
343
- return Input
File without changes