pyerualjetwork 4.3.2.1__tar.gz → 4.3.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. {pyerualjetwork-4.3.2.1 → pyerualjetwork-4.3.3}/PKG-INFO +17 -4
  2. {pyerualjetwork-4.3.2.1 → pyerualjetwork-4.3.3}/README.md +16 -3
  3. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/__init__.py +1 -1
  4. pyerualjetwork-4.3.3/pyerualjetwork/activation_functions.py +343 -0
  5. pyerualjetwork-4.3.3/pyerualjetwork/activation_functions_cuda.py +340 -0
  6. pyerualjetwork-4.3.3/pyerualjetwork/model_operations.py +408 -0
  7. pyerualjetwork-4.3.3/pyerualjetwork/model_operations_cuda.py +421 -0
  8. pyerualjetwork-4.3.3/pyerualjetwork/plan.py +627 -0
  9. pyerualjetwork-4.3.3/pyerualjetwork/plan_cuda.py +651 -0
  10. pyerualjetwork-4.3.3/pyerualjetwork/planeat.py +825 -0
  11. pyerualjetwork-4.3.3/pyerualjetwork/planeat_cuda.py +834 -0
  12. {pyerualjetwork-4.3.2.1 → pyerualjetwork-4.3.3}/pyerualjetwork.egg-info/PKG-INFO +17 -4
  13. pyerualjetwork-4.3.3/pyerualjetwork.egg-info/SOURCES.txt +46 -0
  14. pyerualjetwork-4.3.3/pyerualjetwork.egg-info/top_level.txt +2 -0
  15. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/__init__.py +11 -0
  16. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/data_operations.py +406 -0
  17. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/data_operations_cuda.py +461 -0
  18. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/help.py +17 -0
  19. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/loss_functions.py +21 -0
  20. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/loss_functions_cuda.py +21 -0
  21. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/memory_operations.py +298 -0
  22. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/metrics.py +190 -0
  23. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/metrics_cuda.py +163 -0
  24. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/ui.py +22 -0
  25. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/visualizations.py +823 -0
  26. pyerualjetwork-4.3.3/pyerualjetwork_afterburner/visualizations_cuda.py +825 -0
  27. {pyerualjetwork-4.3.2.1 → pyerualjetwork-4.3.3}/setup.py +1 -1
  28. pyerualjetwork-4.3.2.1/pyerualjetwork.egg-info/SOURCES.txt +0 -26
  29. pyerualjetwork-4.3.2.1/pyerualjetwork.egg-info/top_level.txt +0 -1
  30. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/data_operations.py +0 -0
  31. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/data_operations_cuda.py +0 -0
  32. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/help.py +0 -0
  33. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/loss_functions.py +0 -0
  34. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/loss_functions_cuda.py +0 -0
  35. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/memory_operations.py +0 -0
  36. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/metrics.py +0 -0
  37. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/metrics_cuda.py +0 -0
  38. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/ui.py +0 -0
  39. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/visualizations.py +0 -0
  40. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork}/visualizations_cuda.py +0 -0
  41. {pyerualjetwork-4.3.2.1 → pyerualjetwork-4.3.3}/pyerualjetwork.egg-info/dependency_links.txt +0 -0
  42. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork_afterburner}/activation_functions.py +0 -0
  43. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork_afterburner}/activation_functions_cuda.py +0 -0
  44. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork_afterburner}/model_operations.py +0 -0
  45. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork_afterburner}/model_operations_cuda.py +0 -0
  46. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork_afterburner}/plan.py +0 -0
  47. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork_afterburner}/plan_cuda.py +0 -0
  48. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork_afterburner}/planeat.py +0 -0
  49. {pyerualjetwork-4.3.2.1/pyerualjetwork-afterburner → pyerualjetwork-4.3.3/pyerualjetwork_afterburner}/planeat_cuda.py +0 -0
  50. {pyerualjetwork-4.3.2.1 → pyerualjetwork-4.3.3}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.3.2.1
3
+ Version: 4.3.3
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -23,19 +23,32 @@ PyPi Page: https://pypi.org/project/pyerualjetwork/
23
23
  GitHub Page: https://github.com/HCB06/PyerualJetwork
24
24
 
25
25
 
26
- pip install pyerualjetwork==x.x.x (means it is a normal package)
27
- pip install pyerualjetwork==x.x.x.x (last x means it is a afterburner package)
26
+ pip install pyerualjetwork
28
27
 
28
+ 'use if your data small, medium or large:'
29
+
29
30
  from pyerualjetwork import plan
30
31
  from pyerualjetwork import planeat
31
32
  from pyerualjetwork import data_operations
32
33
  from pyerualjetwork import model_operations
33
-
34
+
34
35
  from pyerualjetwork import plan_cuda
35
36
  from pyerualjetwork import planeat_cuda
36
37
  from pyerualjetwork import data_operations_cuda
37
38
  from pyerualjetwork import model_operations_cuda
38
39
 
40
+ 'use if your data huge: _afterburner package (afterburner package comes with powerful paralellism, afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded. Specially designed for LLM training and other massive model training)'
41
+
42
+ from pyerualjetwork_afterburner import plan
43
+ from pyerualjetwork_afterburner import planeat
44
+ from pyerualjetwork_afterburner import data_operations
45
+ from pyerualjetwork_afterburner import model_operations
46
+
47
+ from pyerualjetwork_afterburner import plan_cuda
48
+ from pyerualjetwork_afterburner import planeat_cuda
49
+ from pyerualjetwork_afterburner import data_operations_cuda
50
+ from pyerualjetwork_afterburner import model_operations_cuda
51
+
39
52
  Optimized for Visual Studio Code
40
53
 
41
54
  requires=[
@@ -14,19 +14,32 @@ PyPi Page: https://pypi.org/project/pyerualjetwork/
14
14
  GitHub Page: https://github.com/HCB06/PyerualJetwork
15
15
 
16
16
 
17
- pip install pyerualjetwork==x.x.x (means it is a normal package)
18
- pip install pyerualjetwork==x.x.x.x (last x means it is a afterburner package)
17
+ pip install pyerualjetwork
19
18
 
19
+ 'use if your data small, medium or large:'
20
+
20
21
  from pyerualjetwork import plan
21
22
  from pyerualjetwork import planeat
22
23
  from pyerualjetwork import data_operations
23
24
  from pyerualjetwork import model_operations
24
-
25
+
25
26
  from pyerualjetwork import plan_cuda
26
27
  from pyerualjetwork import planeat_cuda
27
28
  from pyerualjetwork import data_operations_cuda
28
29
  from pyerualjetwork import model_operations_cuda
29
30
 
31
+ 'use if your data huge: _afterburner package (afterburner package comes with powerful paralellism, afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded. Specially designed for LLM training and other massive model training)'
32
+
33
+ from pyerualjetwork_afterburner import plan
34
+ from pyerualjetwork_afterburner import planeat
35
+ from pyerualjetwork_afterburner import data_operations
36
+ from pyerualjetwork_afterburner import model_operations
37
+
38
+ from pyerualjetwork_afterburner import plan_cuda
39
+ from pyerualjetwork_afterburner import planeat_cuda
40
+ from pyerualjetwork_afterburner import data_operations_cuda
41
+ from pyerualjetwork_afterburner import model_operations_cuda
42
+
30
43
  Optimized for Visual Studio Code
31
44
 
32
45
  requires=[
@@ -1,4 +1,4 @@
1
- __version__ = "4.3.2.1-afterburner"
1
+ __version__ = "4.3.3"
2
2
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
3
3
 
4
4
  def print_version(__version__):
@@ -0,0 +1,343 @@
1
+ import numpy as np
2
+ from scipy.special import expit, softmax
3
+ import warnings
4
+
5
+
6
+ # ACTIVATION FUNCTIONS -----
7
+
8
+ def all_activations():
9
+
10
+ activations_list = ['linear', 'sigmoid', 'relu', 'tanh', 'circular', 'spiral', 'swish', 'sin_plus', 'mod_circular', 'tanh_circular', 'leaky_relu', 'softplus', 'elu', 'gelu', 'selu', 'sinakt', 'p_squared', 'sglu', 'dlrelu', 'exsig', 'acos', 'gla', 'srelu', 'qelu', 'isra', 'waveakt', 'arctan', 'bent_identity', 'sech', 'softsign', 'pwl', 'cubic', 'gaussian', 'sine', 'tanh_square', 'mod_sigmoid', 'quartic', 'square_quartic', 'cubic_quadratic', 'exp_cubic', 'sine_square', 'logarithmic', 'scaled_cubic', 'sine_offset']
11
+
12
+ return activations_list
13
+
14
+ def spiral_activation(x):
15
+
16
+ r = np.sqrt(np.sum(x**2))
17
+
18
+ theta = np.arctan2(x[1:], x[:-1])
19
+
20
+ spiral_x = r * np.cos(theta + r)
21
+ spiral_y = r * np.sin(theta + r)
22
+
23
+
24
+ spiral_output = np.concatenate(([spiral_x[0]], spiral_y))
25
+
26
+ return spiral_output
27
+
28
+
29
+ def Softmax(
30
+ x # num: Input data to be transformed using softmax function.
31
+ ):
32
+ """
33
+ Applies the softmax function to the input data.
34
+
35
+ Args:
36
+ (num): Input data to be transformed using softmax function.
37
+
38
+ Returns:
39
+ (num): Transformed data after applying softmax function.
40
+ """
41
+
42
+ return softmax(x)
43
+
44
+
45
+ def Sigmoid(
46
+ x # num: Input data to be transformed using sigmoid function.
47
+ ):
48
+ """
49
+ Applies the sigmoid function to the input data.
50
+
51
+ Args:
52
+ (num): Input data to be transformed using sigmoid function.
53
+
54
+ Returns:
55
+ (num): Transformed data after applying sigmoid function.
56
+ """
57
+ return expit(x)
58
+
59
+
60
+ def Relu(
61
+ x # num: Input data to be transformed using ReLU function.
62
+ ):
63
+ """
64
+ Applies the Rectified Linear Unit (ReLU) function to the input data.
65
+
66
+ Args:
67
+ (num): Input data to be transformed using ReLU function.
68
+
69
+ Returns:
70
+ (num): Transformed data after applying ReLU function.
71
+ """
72
+
73
+ return np.maximum(0, x)
74
+
75
+
76
+ def tanh(x):
77
+ return np.tanh(x)
78
+
79
+ def swish(x):
80
+ return x * (1 / (1 + np.exp(-x)))
81
+
82
+ def sin_plus(x):
83
+ return (np.sin(x) + 1) / 2
84
+
85
+ def modular_circular_activation(x, period=2*np.pi):
86
+ return np.mod(x, period) / period
87
+
88
+ def tanh_circular_activation(x):
89
+ return (np.tanh(x) + 1) / 2
90
+
91
+ def leaky_relu(x, alpha=0.01):
92
+ return np.where(x > 0, x, alpha * x)
93
+
94
+ def softplus(x):
95
+ return np.log(1 + np.exp(x))
96
+
97
+ def elu(x, alpha=1.0):
98
+ return np.where(x > 0, x, alpha * (np.exp(x) - 1))
99
+
100
+ def gelu(x):
101
+ return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
102
+
103
+ def selu(x, lambda_=1.0507, alpha=1.6733):
104
+ return lambda_ * np.where(x > 0, x, alpha * (np.exp(x) - 1))
105
+
106
+ def sinakt(x):
107
+ return np.sin(x) + np.cos(x)
108
+
109
+ def p_squared(x, alpha=1.0, beta=0.0):
110
+ return alpha * x**2 + beta * x
111
+
112
+ def sglu(x, alpha=1.0):
113
+ return softmax(alpha * x) * x
114
+
115
+ # 4. Double Leaky ReLU (DLReLU)
116
+ def dlrelu(x):
117
+ return np.maximum(0.01 * x, x) + np.minimum(0.01 * x, 0.1 * x)
118
+
119
+ # 5. Exponential Sigmoid (ExSig)
120
+ def exsig(x):
121
+ return 1 / (1 + np.exp(-x**2))
122
+
123
+ # 6. Adaptive Cosine Activation (ACos)
124
+ def acos(x, alpha=1.0, beta=0.0):
125
+ return np.cos(alpha * x + beta)
126
+
127
+ # 7. Gaussian-like Activation (GLA)
128
+ def gla(x, alpha=1.0, mu=0.0):
129
+ return np.exp(-alpha * (x - mu)**2)
130
+
131
+ # 8. Swish ReLU (SReLU)
132
+ def srelu(x):
133
+ return x * (1 / (1 + np.exp(-x))) + np.maximum(0, x)
134
+
135
+ # 9. Quadratic Exponential Linear Unit (QELU)
136
+ def qelu(x):
137
+ return x**2 * np.exp(x) - 1
138
+
139
+ # 10. Inverse Square Root Activation (ISRA)
140
+ def isra(x):
141
+ return x / np.sqrt(np.abs(x) + 1)
142
+
143
+ def waveakt(x, alpha=1.0, beta=2.0, gamma=3.0):
144
+ return np.sin(alpha * x) * np.cos(beta * x) * np.sin(gamma * x)
145
+
146
+ def arctan(x):
147
+ return np.arctan(x)
148
+
149
+ def bent_identity(x):
150
+ return (np.sqrt(x**2 + 1) - 1) / 2 + x
151
+
152
+ def circular_activation(x, scale=2.0, frequency=1.0, shift=0.0):
153
+
154
+ n_features = x.shape[0]
155
+
156
+ circular_output = np.zeros_like(x)
157
+
158
+ for i in range(n_features):
159
+
160
+ r = np.sqrt(np.sum(x**2))
161
+ theta = 2 * np.pi * (i / n_features) + shift
162
+
163
+ circular_x = r * np.cos(theta + frequency * r) * scale
164
+ circular_y = r * np.sin(theta + frequency * r) * scale
165
+
166
+ if i % 2 == 0:
167
+ circular_output[i] = circular_x
168
+ else:
169
+ circular_output[i] = circular_y
170
+
171
+ return circular_output
172
+
173
+ def sech(x):
174
+ return 2 / (np.exp(x) + np.exp(-x))
175
+
176
+ def softsign(x):
177
+ return x / (1 + np.abs(x))
178
+
179
+ def pwl(x, alpha=0.5, beta=1.5):
180
+ return np.where(x <= 0, alpha * x, beta * x)
181
+
182
+ def cubic(x):
183
+ return x**3
184
+
185
+ def gaussian(x, alpha=1.0, mu=0.0):
186
+ return np.exp(-alpha * (x - mu)**2)
187
+
188
+ def sine(x, alpha=1.0):
189
+ return np.sin(alpha * x)
190
+
191
+ def tanh_square(x):
192
+ return np.tanh(x)**2
193
+
194
+ def mod_sigmoid(x, alpha=1.0, beta=0.0):
195
+ return 1 / (1 + np.exp(-alpha * x + beta))
196
+
197
+ def quartic(x):
198
+ return x**4
199
+
200
+ def square_quartic(x):
201
+ return (x**2)**2
202
+
203
+ def cubic_quadratic(x):
204
+ return x**3 * (x**2)
205
+
206
+ def exp_cubic(x):
207
+ return np.exp(x**3)
208
+
209
+ def sine_square(x):
210
+ return np.sin(x)**2
211
+
212
+ def logarithmic(x):
213
+ return np.log(x**2 + 1)
214
+
215
+ def scaled_cubic(x, alpha=1.0):
216
+ return alpha * x**3
217
+
218
+ def sine_offset(x, beta=0.0):
219
+ return np.sin(x + beta)
220
+
221
+
222
+ def safe_add(current_sum, new_value):
223
+ try:
224
+ return current_sum + new_value
225
+ except OverflowError:
226
+ return np.array(current_sum) + np.array(new_value)
227
+
228
+
229
+ def apply_activation(Input, activation_list):
230
+ """
231
+ Applies a sequence of activation functions to the input.
232
+
233
+ Args:
234
+ Input (numpy.ndarray): The input to apply activations to.
235
+ activation_list (list): A list of activation function names to apply.
236
+
237
+ Returns:
238
+ numpy.ndarray: The input after all activations have been applied.
239
+ """
240
+
241
+ origin_input = np.copy(Input)
242
+
243
+ for i in range(len(activation_list)):
244
+ try:
245
+ if activation_list[i] == 'sigmoid':
246
+ Input = safe_add(Input, Sigmoid(origin_input))
247
+ elif activation_list[i] == 'swish':
248
+ Input = safe_add(Input, swish(origin_input))
249
+ elif activation_list[i] == 'mod_circular':
250
+ Input = safe_add(Input, modular_circular_activation(origin_input))
251
+ elif activation_list[i] == 'tanh_circular':
252
+ Input = safe_add(Input, tanh_circular_activation(origin_input))
253
+ elif activation_list[i] == 'leaky_relu':
254
+ Input = safe_add(Input, leaky_relu(origin_input))
255
+ elif activation_list[i] == 'relu':
256
+ Input = safe_add(Input, Relu(origin_input))
257
+ elif activation_list[i] == 'softplus':
258
+ Input = safe_add(Input, softplus(origin_input))
259
+ elif activation_list[i] == 'elu':
260
+ Input = safe_add(Input, elu(origin_input))
261
+ elif activation_list[i] == 'gelu':
262
+ Input = safe_add(Input, gelu(origin_input))
263
+ elif activation_list[i] == 'selu':
264
+ Input = safe_add(Input, selu(origin_input))
265
+ elif activation_list[i] == 'tanh':
266
+ Input = safe_add(Input, tanh(origin_input))
267
+ elif activation_list[i] == 'sinakt':
268
+ Input = safe_add(Input, sinakt(origin_input))
269
+ elif activation_list[i] == 'p_squared':
270
+ Input = safe_add(Input, p_squared(origin_input))
271
+ elif activation_list[i] == 'sglu':
272
+ Input = safe_add(Input, sglu(origin_input, alpha=1.0))
273
+ elif activation_list[i] == 'dlrelu':
274
+ Input = safe_add(Input, dlrelu(origin_input))
275
+ elif activation_list[i] == 'exsig':
276
+ Input = safe_add(Input, exsig(origin_input))
277
+ elif activation_list[i] == 'sin_plus':
278
+ Input = safe_add(Input, sin_plus(origin_input))
279
+ elif activation_list[i] == 'acos':
280
+ Input = safe_add(Input, acos(origin_input, alpha=1.0, beta=0.0))
281
+ elif activation_list[i] == 'gla':
282
+ Input = safe_add(Input, gla(origin_input, alpha=1.0, mu=0.0))
283
+ elif activation_list[i] == 'srelu':
284
+ Input = safe_add(Input, srelu(origin_input))
285
+ elif activation_list[i] == 'qelu':
286
+ Input = safe_add(Input, qelu(origin_input))
287
+ elif activation_list[i] == 'isra':
288
+ Input = safe_add(Input, isra(origin_input))
289
+ elif activation_list[i] == 'waveakt':
290
+ Input = safe_add(Input, waveakt(origin_input))
291
+ elif activation_list[i] == 'arctan':
292
+ Input = safe_add(Input, arctan(origin_input))
293
+ elif activation_list[i] == 'bent_identity':
294
+ Input = safe_add(Input, bent_identity(origin_input))
295
+ elif activation_list[i] == 'sech':
296
+ Input = safe_add(Input, sech(origin_input))
297
+ elif activation_list[i] == 'softsign':
298
+ Input = safe_add(Input, softsign(origin_input))
299
+ elif activation_list[i] == 'pwl':
300
+ Input = safe_add(Input, pwl(origin_input))
301
+ elif activation_list[i] == 'cubic':
302
+ Input = safe_add(Input, cubic(origin_input))
303
+ elif activation_list[i] == 'gaussian':
304
+ Input = safe_add(Input, gaussian(origin_input))
305
+ elif activation_list[i] == 'sine':
306
+ Input = safe_add(Input, sine(origin_input))
307
+ elif activation_list[i] == 'tanh_square':
308
+ Input = safe_add(Input, tanh_square(origin_input))
309
+ elif activation_list[i] == 'mod_sigmoid':
310
+ Input = safe_add(Input, mod_sigmoid(origin_input))
311
+ elif activation_list[i] == 'linear':
312
+ Input = safe_add(Input, origin_input)
313
+ elif activation_list[i] == 'quartic':
314
+ Input = safe_add(Input, quartic(origin_input))
315
+ elif activation_list[i] == 'square_quartic':
316
+ Input = safe_add(Input, square_quartic(origin_input))
317
+ elif activation_list[i] == 'cubic_quadratic':
318
+ Input = safe_add(Input, cubic_quadratic(origin_input))
319
+ elif activation_list[i] == 'exp_cubic':
320
+ Input = safe_add(Input, exp_cubic(origin_input))
321
+ elif activation_list[i] == 'sine_square':
322
+ Input = safe_add(Input, sine_square(origin_input))
323
+ elif activation_list[i] == 'logarithmic':
324
+ Input = safe_add(Input, logarithmic(origin_input))
325
+ elif activation_list[i] == 'scaled_cubic':
326
+ Input = safe_add(Input, scaled_cubic(origin_input, 1.0))
327
+ elif activation_list[i] == 'sine_offset':
328
+ Input = safe_add(Input, sine_offset(origin_input, 1.0))
329
+ elif activation_list[i] == 'spiral':
330
+ Input = safe_add(Input, spiral_activation(origin_input))
331
+ elif activation_list[i] == 'circular':
332
+ Input = safe_add(Input, circular_activation(origin_input))
333
+
334
+
335
+ except Exception as e:
336
+ warnings.warn(f"Error in activation {activation_list[i]}: {str(e)}", RuntimeWarning)
337
+ if not isinstance(Input, np.ndarray):
338
+ Input = np.array(Input)
339
+ if not isinstance(origin_input, np.ndarray):
340
+ origin_input = np.array(origin_input)
341
+ continue
342
+
343
+ return Input