pyerualjetwork 4.2.9b7__py3-none-any.whl → 4.3.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerualjetwork-4.2.9b7.dist-info → pyerualjetwork-4.3.0.1.dist-info}/METADATA +2 -1
- pyerualjetwork-4.3.0.1.dist-info/RECORD +24 -0
- pyerualjetwork-4.3.0.1.dist-info/top_level.txt +1 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/__init__.py +1 -1
- pyerualjetwork-jetstorm/activation_functions.py +291 -0
- pyerualjetwork-jetstorm/activation_functions_cuda.py +290 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/data_operations.py +2 -3
- {pyerualjetwork → pyerualjetwork-jetstorm}/model_operations.py +14 -14
- {pyerualjetwork → pyerualjetwork-jetstorm}/model_operations_cuda.py +16 -17
- {pyerualjetwork → pyerualjetwork-jetstorm}/plan.py +46 -248
- {pyerualjetwork → pyerualjetwork-jetstorm}/plan_cuda.py +44 -263
- {pyerualjetwork → pyerualjetwork-jetstorm}/planeat.py +14 -47
- {pyerualjetwork → pyerualjetwork-jetstorm}/planeat_cuda.py +11 -48
- pyerualjetwork/activation_functions.py +0 -343
- pyerualjetwork/activation_functions_cuda.py +0 -341
- pyerualjetwork-4.2.9b7.dist-info/RECORD +0 -24
- pyerualjetwork-4.2.9b7.dist-info/top_level.txt +0 -1
- {pyerualjetwork-4.2.9b7.dist-info → pyerualjetwork-4.3.0.1.dist-info}/WHEEL +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/data_operations_cuda.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/help.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/loss_functions.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/loss_functions_cuda.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/memory_operations.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/metrics.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/metrics_cuda.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/ui.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/visualizations.py +0 -0
- {pyerualjetwork → pyerualjetwork-jetstorm}/visualizations_cuda.py +0 -0
@@ -16,11 +16,9 @@ import cupy as cp
|
|
16
16
|
import numpy as np
|
17
17
|
import random
|
18
18
|
import math
|
19
|
-
import copy
|
20
19
|
|
21
20
|
|
22
21
|
### LIBRARY IMPORTS ###
|
23
|
-
from .plan_cuda import feed_forward
|
24
22
|
from .data_operations_cuda import normalization
|
25
23
|
from .ui import loading_bars, initialize_loading_bar
|
26
24
|
from .activation_functions_cuda import apply_activation, all_activations
|
@@ -279,7 +277,7 @@ def evolver(weights,
|
|
279
277
|
|
280
278
|
good_activations = list(activation_potentiations[slice_center:])
|
281
279
|
bad_activations = list(activation_potentiations[:slice_center])
|
282
|
-
best_activations =
|
280
|
+
best_activations = good_activations[-1].copy() if isinstance(good_activations[-1], list) else good_activations[-1]
|
283
281
|
|
284
282
|
|
285
283
|
### PLANEAT IS APPLIED ACCORDING TO THE SPECIFIED POLICY, STRATEGY, AND PROBABILITY CONFIGURATION:
|
@@ -293,10 +291,10 @@ def evolver(weights,
|
|
293
291
|
epsilon = cp.finfo(float).eps
|
294
292
|
|
295
293
|
child_W = cp.copy(bad_weights)
|
296
|
-
child_act = copy
|
294
|
+
child_act = bad_activations.copy()
|
297
295
|
|
298
296
|
mutated_W = cp.copy(bad_weights)
|
299
|
-
mutated_act = copy
|
297
|
+
mutated_act = bad_activations.copy()
|
300
298
|
|
301
299
|
|
302
300
|
for i in range(len(bad_weights)):
|
@@ -400,7 +398,7 @@ def evolver(weights,
|
|
400
398
|
return weights, activation_potentiations
|
401
399
|
|
402
400
|
|
403
|
-
def evaluate(x_population, weights, activation_potentiations
|
401
|
+
def evaluate(x_population, weights, activation_potentiations):
|
404
402
|
"""
|
405
403
|
Evaluates the performance of a population of genomes, applying different activation functions
|
406
404
|
and weights depending on whether reinforcement learning mode is enabled or not.
|
@@ -415,64 +413,29 @@ def evaluate(x_population, weights, activation_potentiations, rl_mode=False, dty
|
|
415
413
|
activation_potentiations (list or str): A list where each entry represents an activation function
|
416
414
|
or a potentiation strategy applied to each genome. If only one
|
417
415
|
activation function is used, this can be a single string.
|
418
|
-
|
419
|
-
rl_mode (bool, optional): If True, reinforcement learning mode is activated, this accepts x_population is a single genome. (Also weights and activation_potentations a single genomes part.)
|
420
|
-
Default is False.
|
421
|
-
|
422
|
-
|
423
|
-
dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
|
424
|
-
|
425
416
|
Returns:
|
426
417
|
list: A list of outputs corresponding to each genome in the population after applying the respective
|
427
418
|
activation function and weights.
|
428
419
|
|
429
|
-
Notes:
|
430
|
-
- If `rl_mode` is True:
|
431
|
-
- Accepts x_population is a single genom
|
432
|
-
- The inputs are flattened, and the activation function is applied across the single genom.
|
433
|
-
|
434
|
-
- If `rl_mode` is False:
|
435
|
-
- Accepts x_population is a list of genomes
|
436
|
-
- Each genome is processed individually, and the results are stored in the `outputs` list.
|
437
|
-
|
438
|
-
- `feed_forward()` function is the core function that processes the input with the given weights and activation function.
|
439
|
-
|
440
420
|
Example:
|
441
421
|
```python
|
442
|
-
outputs = evaluate(x_population, weights, activation_potentiations
|
422
|
+
outputs = evaluate(x_population, weights, activation_potentiations)
|
443
423
|
```
|
444
424
|
|
445
425
|
- The function returns a list of outputs after processing the population, where each element corresponds to
|
446
426
|
the output for each genome in `x_population`.
|
447
427
|
"""
|
448
|
-
|
449
|
-
### IF RL_MODE IS TRUE, A SINGLE GENOME IS ASSUMED AS INPUT, A FEEDFORWARD PREDICTION IS MADE, AND THE OUTPUT(NPARRAY) IS RETURNED:
|
450
|
-
|
451
|
-
### IF RL_MODE IS FALSE, PREDICTIONS ARE MADE FOR ALL GENOMES IN THE GROUP USING THEIR CORRESPONDING INDEXED INPUTS AND DATA.
|
452
428
|
### THE OUTPUTS ARE RETURNED AS A PYTHON LIST, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
|
453
429
|
|
454
|
-
if
|
455
|
-
|
456
|
-
Input = Input.ravel()
|
457
|
-
|
458
|
-
if isinstance(activation_potentiations, str):
|
459
|
-
activation_potentiations = [activation_potentiations]
|
460
|
-
|
461
|
-
outputs = feed_forward(Input=Input, is_training=False, activation_potentiation=activation_potentiations, w=weights)
|
462
|
-
|
430
|
+
if isinstance(activation_potentiations, str):
|
431
|
+
activation_potentiations = [activation_potentiations]
|
463
432
|
else:
|
464
|
-
|
465
|
-
for i, genome in enumerate(x_population):
|
466
|
-
|
467
|
-
Input = cp.array(genome)
|
468
|
-
Input = Input.ravel()
|
469
|
-
|
470
|
-
if isinstance(activation_potentiations[i], str):
|
471
|
-
activation_potentiations[i] = [activation_potentiations[i]]
|
433
|
+
activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
|
472
434
|
|
473
|
-
|
435
|
+
x_population = apply_activation(x_population, activation_potentiations)
|
436
|
+
result = x_population @ weights.T
|
474
437
|
|
475
|
-
return
|
438
|
+
return result
|
476
439
|
|
477
440
|
|
478
441
|
def cross_over(first_parent_W,
|
@@ -1,343 +0,0 @@
|
|
1
|
-
import numpy as np
|
2
|
-
from scipy.special import expit, softmax
|
3
|
-
import warnings
|
4
|
-
|
5
|
-
|
6
|
-
# ACTIVATION FUNCTIONS -----
|
7
|
-
|
8
|
-
def all_activations():
|
9
|
-
|
10
|
-
activations_list = ['linear', 'sigmoid', 'relu', 'tanh', 'circular', 'spiral', 'swish', 'sin_plus', 'mod_circular', 'tanh_circular', 'leaky_relu', 'softplus', 'elu', 'gelu', 'selu', 'sinakt', 'p_squared', 'sglu', 'dlrelu', 'exsig', 'acos', 'gla', 'srelu', 'qelu', 'isra', 'waveakt', 'arctan', 'bent_identity', 'sech', 'softsign', 'pwl', 'cubic', 'gaussian', 'sine', 'tanh_square', 'mod_sigmoid', 'quartic', 'square_quartic', 'cubic_quadratic', 'exp_cubic', 'sine_square', 'logarithmic', 'scaled_cubic', 'sine_offset']
|
11
|
-
|
12
|
-
return activations_list
|
13
|
-
|
14
|
-
def spiral_activation(x):
|
15
|
-
|
16
|
-
r = np.sqrt(np.sum(x**2))
|
17
|
-
|
18
|
-
theta = np.arctan2(x[1:], x[:-1])
|
19
|
-
|
20
|
-
spiral_x = r * np.cos(theta + r)
|
21
|
-
spiral_y = r * np.sin(theta + r)
|
22
|
-
|
23
|
-
|
24
|
-
spiral_output = np.concatenate(([spiral_x[0]], spiral_y))
|
25
|
-
|
26
|
-
return spiral_output
|
27
|
-
|
28
|
-
|
29
|
-
def Softmax(
|
30
|
-
x # num: Input data to be transformed using softmax function.
|
31
|
-
):
|
32
|
-
"""
|
33
|
-
Applies the softmax function to the input data.
|
34
|
-
|
35
|
-
Args:
|
36
|
-
(num): Input data to be transformed using softmax function.
|
37
|
-
|
38
|
-
Returns:
|
39
|
-
(num): Transformed data after applying softmax function.
|
40
|
-
"""
|
41
|
-
|
42
|
-
return softmax(x)
|
43
|
-
|
44
|
-
|
45
|
-
def Sigmoid(
|
46
|
-
x # num: Input data to be transformed using sigmoid function.
|
47
|
-
):
|
48
|
-
"""
|
49
|
-
Applies the sigmoid function to the input data.
|
50
|
-
|
51
|
-
Args:
|
52
|
-
(num): Input data to be transformed using sigmoid function.
|
53
|
-
|
54
|
-
Returns:
|
55
|
-
(num): Transformed data after applying sigmoid function.
|
56
|
-
"""
|
57
|
-
return expit(x)
|
58
|
-
|
59
|
-
|
60
|
-
def Relu(
|
61
|
-
x # num: Input data to be transformed using ReLU function.
|
62
|
-
):
|
63
|
-
"""
|
64
|
-
Applies the Rectified Linear Unit (ReLU) function to the input data.
|
65
|
-
|
66
|
-
Args:
|
67
|
-
(num): Input data to be transformed using ReLU function.
|
68
|
-
|
69
|
-
Returns:
|
70
|
-
(num): Transformed data after applying ReLU function.
|
71
|
-
"""
|
72
|
-
|
73
|
-
return np.maximum(0, x)
|
74
|
-
|
75
|
-
|
76
|
-
def tanh(x):
|
77
|
-
return np.tanh(x)
|
78
|
-
|
79
|
-
def swish(x):
|
80
|
-
return x * (1 / (1 + np.exp(-x)))
|
81
|
-
|
82
|
-
def sin_plus(x):
|
83
|
-
return (np.sin(x) + 1) / 2
|
84
|
-
|
85
|
-
def modular_circular_activation(x, period=2*np.pi):
|
86
|
-
return np.mod(x, period) / period
|
87
|
-
|
88
|
-
def tanh_circular_activation(x):
|
89
|
-
return (np.tanh(x) + 1) / 2
|
90
|
-
|
91
|
-
def leaky_relu(x, alpha=0.01):
|
92
|
-
return np.where(x > 0, x, alpha * x)
|
93
|
-
|
94
|
-
def softplus(x):
|
95
|
-
return np.log(1 + np.exp(x))
|
96
|
-
|
97
|
-
def elu(x, alpha=1.0):
|
98
|
-
return np.where(x > 0, x, alpha * (np.exp(x) - 1))
|
99
|
-
|
100
|
-
def gelu(x):
|
101
|
-
return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
|
102
|
-
|
103
|
-
def selu(x, lambda_=1.0507, alpha=1.6733):
|
104
|
-
return lambda_ * np.where(x > 0, x, alpha * (np.exp(x) - 1))
|
105
|
-
|
106
|
-
def sinakt(x):
|
107
|
-
return np.sin(x) + np.cos(x)
|
108
|
-
|
109
|
-
def p_squared(x, alpha=1.0, beta=0.0):
|
110
|
-
return alpha * x**2 + beta * x
|
111
|
-
|
112
|
-
def sglu(x, alpha=1.0):
|
113
|
-
return softmax(alpha * x) * x
|
114
|
-
|
115
|
-
# 4. Double Leaky ReLU (DLReLU)
|
116
|
-
def dlrelu(x):
|
117
|
-
return np.maximum(0.01 * x, x) + np.minimum(0.01 * x, 0.1 * x)
|
118
|
-
|
119
|
-
# 5. Exponential Sigmoid (ExSig)
|
120
|
-
def exsig(x):
|
121
|
-
return 1 / (1 + np.exp(-x**2))
|
122
|
-
|
123
|
-
# 6. Adaptive Cosine Activation (ACos)
|
124
|
-
def acos(x, alpha=1.0, beta=0.0):
|
125
|
-
return np.cos(alpha * x + beta)
|
126
|
-
|
127
|
-
# 7. Gaussian-like Activation (GLA)
|
128
|
-
def gla(x, alpha=1.0, mu=0.0):
|
129
|
-
return np.exp(-alpha * (x - mu)**2)
|
130
|
-
|
131
|
-
# 8. Swish ReLU (SReLU)
|
132
|
-
def srelu(x):
|
133
|
-
return x * (1 / (1 + np.exp(-x))) + np.maximum(0, x)
|
134
|
-
|
135
|
-
# 9. Quadratic Exponential Linear Unit (QELU)
|
136
|
-
def qelu(x):
|
137
|
-
return x**2 * np.exp(x) - 1
|
138
|
-
|
139
|
-
# 10. Inverse Square Root Activation (ISRA)
|
140
|
-
def isra(x):
|
141
|
-
return x / np.sqrt(np.abs(x) + 1)
|
142
|
-
|
143
|
-
def waveakt(x, alpha=1.0, beta=2.0, gamma=3.0):
|
144
|
-
return np.sin(alpha * x) * np.cos(beta * x) * np.sin(gamma * x)
|
145
|
-
|
146
|
-
def arctan(x):
|
147
|
-
return np.arctan(x)
|
148
|
-
|
149
|
-
def bent_identity(x):
|
150
|
-
return (np.sqrt(x**2 + 1) - 1) / 2 + x
|
151
|
-
|
152
|
-
def circular_activation(x, scale=2.0, frequency=1.0, shift=0.0):
|
153
|
-
|
154
|
-
n_features = x.shape[0]
|
155
|
-
|
156
|
-
circular_output = np.zeros_like(x)
|
157
|
-
|
158
|
-
for i in range(n_features):
|
159
|
-
|
160
|
-
r = np.sqrt(np.sum(x**2))
|
161
|
-
theta = 2 * np.pi * (i / n_features) + shift
|
162
|
-
|
163
|
-
circular_x = r * np.cos(theta + frequency * r) * scale
|
164
|
-
circular_y = r * np.sin(theta + frequency * r) * scale
|
165
|
-
|
166
|
-
if i % 2 == 0:
|
167
|
-
circular_output[i] = circular_x
|
168
|
-
else:
|
169
|
-
circular_output[i] = circular_y
|
170
|
-
|
171
|
-
return circular_output
|
172
|
-
|
173
|
-
def sech(x):
|
174
|
-
return 2 / (np.exp(x) + np.exp(-x))
|
175
|
-
|
176
|
-
def softsign(x):
|
177
|
-
return x / (1 + np.abs(x))
|
178
|
-
|
179
|
-
def pwl(x, alpha=0.5, beta=1.5):
|
180
|
-
return np.where(x <= 0, alpha * x, beta * x)
|
181
|
-
|
182
|
-
def cubic(x):
|
183
|
-
return x**3
|
184
|
-
|
185
|
-
def gaussian(x, alpha=1.0, mu=0.0):
|
186
|
-
return np.exp(-alpha * (x - mu)**2)
|
187
|
-
|
188
|
-
def sine(x, alpha=1.0):
|
189
|
-
return np.sin(alpha * x)
|
190
|
-
|
191
|
-
def tanh_square(x):
|
192
|
-
return np.tanh(x)**2
|
193
|
-
|
194
|
-
def mod_sigmoid(x, alpha=1.0, beta=0.0):
|
195
|
-
return 1 / (1 + np.exp(-alpha * x + beta))
|
196
|
-
|
197
|
-
def quartic(x):
|
198
|
-
return x**4
|
199
|
-
|
200
|
-
def square_quartic(x):
|
201
|
-
return (x**2)**2
|
202
|
-
|
203
|
-
def cubic_quadratic(x):
|
204
|
-
return x**3 * (x**2)
|
205
|
-
|
206
|
-
def exp_cubic(x):
|
207
|
-
return np.exp(x**3)
|
208
|
-
|
209
|
-
def sine_square(x):
|
210
|
-
return np.sin(x)**2
|
211
|
-
|
212
|
-
def logarithmic(x):
|
213
|
-
return np.log(x**2 + 1)
|
214
|
-
|
215
|
-
def scaled_cubic(x, alpha=1.0):
|
216
|
-
return alpha * x**3
|
217
|
-
|
218
|
-
def sine_offset(x, beta=0.0):
|
219
|
-
return np.sin(x + beta)
|
220
|
-
|
221
|
-
|
222
|
-
def safe_add(current_sum, new_value):
|
223
|
-
try:
|
224
|
-
return current_sum + new_value
|
225
|
-
except OverflowError:
|
226
|
-
return np.array(current_sum) + np.array(new_value)
|
227
|
-
|
228
|
-
|
229
|
-
def apply_activation(Input, activation_list):
|
230
|
-
"""
|
231
|
-
Applies a sequence of activation functions to the input.
|
232
|
-
|
233
|
-
Args:
|
234
|
-
Input (numpy.ndarray): The input to apply activations to.
|
235
|
-
activation_list (list): A list of activation function names to apply.
|
236
|
-
|
237
|
-
Returns:
|
238
|
-
numpy.ndarray: The input after all activations have been applied.
|
239
|
-
"""
|
240
|
-
|
241
|
-
origin_input = np.copy(Input)
|
242
|
-
|
243
|
-
for i in range(len(activation_list)):
|
244
|
-
try:
|
245
|
-
if activation_list[i] == 'sigmoid':
|
246
|
-
Input = safe_add(Input, Sigmoid(origin_input))
|
247
|
-
elif activation_list[i] == 'swish':
|
248
|
-
Input = safe_add(Input, swish(origin_input))
|
249
|
-
elif activation_list[i] == 'mod_circular':
|
250
|
-
Input = safe_add(Input, modular_circular_activation(origin_input))
|
251
|
-
elif activation_list[i] == 'tanh_circular':
|
252
|
-
Input = safe_add(Input, tanh_circular_activation(origin_input))
|
253
|
-
elif activation_list[i] == 'leaky_relu':
|
254
|
-
Input = safe_add(Input, leaky_relu(origin_input))
|
255
|
-
elif activation_list[i] == 'relu':
|
256
|
-
Input = safe_add(Input, Relu(origin_input))
|
257
|
-
elif activation_list[i] == 'softplus':
|
258
|
-
Input = safe_add(Input, softplus(origin_input))
|
259
|
-
elif activation_list[i] == 'elu':
|
260
|
-
Input = safe_add(Input, elu(origin_input))
|
261
|
-
elif activation_list[i] == 'gelu':
|
262
|
-
Input = safe_add(Input, gelu(origin_input))
|
263
|
-
elif activation_list[i] == 'selu':
|
264
|
-
Input = safe_add(Input, selu(origin_input))
|
265
|
-
elif activation_list[i] == 'tanh':
|
266
|
-
Input = safe_add(Input, tanh(origin_input))
|
267
|
-
elif activation_list[i] == 'sinakt':
|
268
|
-
Input = safe_add(Input, sinakt(origin_input))
|
269
|
-
elif activation_list[i] == 'p_squared':
|
270
|
-
Input = safe_add(Input, p_squared(origin_input))
|
271
|
-
elif activation_list[i] == 'sglu':
|
272
|
-
Input = safe_add(Input, sglu(origin_input, alpha=1.0))
|
273
|
-
elif activation_list[i] == 'dlrelu':
|
274
|
-
Input = safe_add(Input, dlrelu(origin_input))
|
275
|
-
elif activation_list[i] == 'exsig':
|
276
|
-
Input = safe_add(Input, exsig(origin_input))
|
277
|
-
elif activation_list[i] == 'sin_plus':
|
278
|
-
Input = safe_add(Input, sin_plus(origin_input))
|
279
|
-
elif activation_list[i] == 'acos':
|
280
|
-
Input = safe_add(Input, acos(origin_input, alpha=1.0, beta=0.0))
|
281
|
-
elif activation_list[i] == 'gla':
|
282
|
-
Input = safe_add(Input, gla(origin_input, alpha=1.0, mu=0.0))
|
283
|
-
elif activation_list[i] == 'srelu':
|
284
|
-
Input = safe_add(Input, srelu(origin_input))
|
285
|
-
elif activation_list[i] == 'qelu':
|
286
|
-
Input = safe_add(Input, qelu(origin_input))
|
287
|
-
elif activation_list[i] == 'isra':
|
288
|
-
Input = safe_add(Input, isra(origin_input))
|
289
|
-
elif activation_list[i] == 'waveakt':
|
290
|
-
Input = safe_add(Input, waveakt(origin_input))
|
291
|
-
elif activation_list[i] == 'arctan':
|
292
|
-
Input = safe_add(Input, arctan(origin_input))
|
293
|
-
elif activation_list[i] == 'bent_identity':
|
294
|
-
Input = safe_add(Input, bent_identity(origin_input))
|
295
|
-
elif activation_list[i] == 'sech':
|
296
|
-
Input = safe_add(Input, sech(origin_input))
|
297
|
-
elif activation_list[i] == 'softsign':
|
298
|
-
Input = safe_add(Input, softsign(origin_input))
|
299
|
-
elif activation_list[i] == 'pwl':
|
300
|
-
Input = safe_add(Input, pwl(origin_input))
|
301
|
-
elif activation_list[i] == 'cubic':
|
302
|
-
Input = safe_add(Input, cubic(origin_input))
|
303
|
-
elif activation_list[i] == 'gaussian':
|
304
|
-
Input = safe_add(Input, gaussian(origin_input))
|
305
|
-
elif activation_list[i] == 'sine':
|
306
|
-
Input = safe_add(Input, sine(origin_input))
|
307
|
-
elif activation_list[i] == 'tanh_square':
|
308
|
-
Input = safe_add(Input, tanh_square(origin_input))
|
309
|
-
elif activation_list[i] == 'mod_sigmoid':
|
310
|
-
Input = safe_add(Input, mod_sigmoid(origin_input))
|
311
|
-
elif activation_list[i] == 'linear':
|
312
|
-
Input = safe_add(Input, origin_input)
|
313
|
-
elif activation_list[i] == 'quartic':
|
314
|
-
Input = safe_add(Input, quartic(origin_input))
|
315
|
-
elif activation_list[i] == 'square_quartic':
|
316
|
-
Input = safe_add(Input, square_quartic(origin_input))
|
317
|
-
elif activation_list[i] == 'cubic_quadratic':
|
318
|
-
Input = safe_add(Input, cubic_quadratic(origin_input))
|
319
|
-
elif activation_list[i] == 'exp_cubic':
|
320
|
-
Input = safe_add(Input, exp_cubic(origin_input))
|
321
|
-
elif activation_list[i] == 'sine_square':
|
322
|
-
Input = safe_add(Input, sine_square(origin_input))
|
323
|
-
elif activation_list[i] == 'logarithmic':
|
324
|
-
Input = safe_add(Input, logarithmic(origin_input))
|
325
|
-
elif activation_list[i] == 'scaled_cubic':
|
326
|
-
Input = safe_add(Input, scaled_cubic(origin_input, 1.0))
|
327
|
-
elif activation_list[i] == 'sine_offset':
|
328
|
-
Input = safe_add(Input, sine_offset(origin_input, 1.0))
|
329
|
-
elif activation_list[i] == 'spiral':
|
330
|
-
Input = safe_add(Input, spiral_activation(origin_input))
|
331
|
-
elif activation_list[i] == 'circular':
|
332
|
-
Input = safe_add(Input, circular_activation(origin_input))
|
333
|
-
|
334
|
-
|
335
|
-
except Exception as e:
|
336
|
-
warnings.warn(f"Error in activation {activation_list[i]}: {str(e)}", RuntimeWarning)
|
337
|
-
if not isinstance(Input, np.ndarray):
|
338
|
-
Input = np.array(Input)
|
339
|
-
if not isinstance(origin_input, np.ndarray):
|
340
|
-
origin_input = np.array(origin_input)
|
341
|
-
continue
|
342
|
-
|
343
|
-
return Input
|