pyerualjetwork 4.3.7b3__py3-none-any.whl → 4.3.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/activation_functions.py +2 -2
- pyerualjetwork/activation_functions_cuda.py +63 -114
- pyerualjetwork/data_operations_cuda.py +1 -1
- pyerualjetwork/model_operations.py +14 -14
- pyerualjetwork/model_operations_cuda.py +16 -17
- pyerualjetwork/parallel.py +118 -0
- pyerualjetwork/plan.py +61 -256
- pyerualjetwork/plan_cuda.py +60 -267
- pyerualjetwork/planeat.py +12 -44
- pyerualjetwork/planeat_cuda.py +9 -45
- pyerualjetwork/visualizations.py +29 -26
- pyerualjetwork/visualizations_cuda.py +20 -22
- {pyerualjetwork-4.3.7b3.dist-info → pyerualjetwork-4.3.8.dist-info}/METADATA +2 -19
- pyerualjetwork-4.3.8.dist-info/RECORD +25 -0
- pyerualjetwork-4.3.8.dist-info/top_level.txt +1 -0
- pyerualjetwork-4.3.7b3.dist-info/RECORD +0 -44
- pyerualjetwork-4.3.7b3.dist-info/top_level.txt +0 -2
- pyerualjetwork_afterburner/__init__.py +0 -11
- pyerualjetwork_afterburner/activation_functions.py +0 -290
- pyerualjetwork_afterburner/activation_functions_cuda.py +0 -289
- pyerualjetwork_afterburner/data_operations.py +0 -406
- pyerualjetwork_afterburner/data_operations_cuda.py +0 -461
- pyerualjetwork_afterburner/help.py +0 -17
- pyerualjetwork_afterburner/loss_functions.py +0 -21
- pyerualjetwork_afterburner/loss_functions_cuda.py +0 -21
- pyerualjetwork_afterburner/memory_operations.py +0 -298
- pyerualjetwork_afterburner/metrics.py +0 -190
- pyerualjetwork_afterburner/metrics_cuda.py +0 -163
- pyerualjetwork_afterburner/model_operations.py +0 -408
- pyerualjetwork_afterburner/model_operations_cuda.py +0 -420
- pyerualjetwork_afterburner/plan.py +0 -425
- pyerualjetwork_afterburner/plan_cuda.py +0 -436
- pyerualjetwork_afterburner/planeat.py +0 -793
- pyerualjetwork_afterburner/planeat_cuda.py +0 -797
- pyerualjetwork_afterburner/ui.py +0 -22
- pyerualjetwork_afterburner/visualizations.py +0 -823
- pyerualjetwork_afterburner/visualizations_cuda.py +0 -825
- {pyerualjetwork-4.3.7b3.dist-info → pyerualjetwork-4.3.8.dist-info}/WHEEL +0 -0
pyerualjetwork/__init__.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
__version__ = "4.3.
|
1
|
+
__version__ = "4.3.8"
|
2
2
|
__update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
|
3
3
|
|
4
4
|
def print_version(__version__):
|
@@ -217,124 +217,73 @@ def scaled_cubic(x, alpha=1.0):
|
|
217
217
|
def sine_offset(x, beta=0.0):
|
218
218
|
return cp.sin(x + beta)
|
219
219
|
|
220
|
-
|
221
|
-
def safe_add(current_sum, new_value):
|
222
|
-
try:
|
223
|
-
return current_sum + new_value
|
224
|
-
except OverflowError:
|
225
|
-
return cp.array(current_sum) + cp.array(new_value)
|
226
220
|
|
227
221
|
def apply_activation(Input, activation_list):
|
228
222
|
"""
|
229
|
-
|
230
|
-
|
231
|
-
Args:
|
232
|
-
Input (cupy.ndarray): The input to apply activations to.
|
233
|
-
activation_list (list): A list of activation function names to apply.
|
234
|
-
|
235
|
-
Returns:
|
236
|
-
cupy.ndarray: The input after all activations have been applied.
|
237
|
-
"""
|
223
|
+
Applies activation functions for inputs
|
238
224
|
|
225
|
+
Args:
|
226
|
+
Input (cupy.ndarray):
|
227
|
+
activation_list (list):
|
228
|
+
"""
|
239
229
|
origin_input = cp.copy(Input)
|
230
|
+
|
231
|
+
activation_functions = {
|
232
|
+
'sigmoid': Sigmoid,
|
233
|
+
'swish': swish,
|
234
|
+
'mod_circular': modular_circular_activation,
|
235
|
+
'tanh_circular': tanh_circular_activation,
|
236
|
+
'leaky_relu': leaky_relu,
|
237
|
+
'relu': Relu,
|
238
|
+
'softplus': softplus,
|
239
|
+
'elu': elu,
|
240
|
+
'gelu': gelu,
|
241
|
+
'selu': selu,
|
242
|
+
'tanh': tanh,
|
243
|
+
'sinakt': sinakt,
|
244
|
+
'p_squared': p_squared,
|
245
|
+
'sglu': lambda x: sglu(x, alpha=1.0),
|
246
|
+
'dlrelu': dlrelu,
|
247
|
+
'exsig': exsig,
|
248
|
+
'sin_plus': sin_plus,
|
249
|
+
'acos': lambda x: acos(x, alpha=1.0, beta=0.0),
|
250
|
+
'gla': lambda x: gla(x, alpha=1.0, mu=0.0),
|
251
|
+
'srelu': srelu,
|
252
|
+
'qelu': qelu,
|
253
|
+
'isra': isra,
|
254
|
+
'waveakt': waveakt,
|
255
|
+
'arctan': arctan,
|
256
|
+
'bent_identity': bent_identity,
|
257
|
+
'sech': sech,
|
258
|
+
'softsign': softsign,
|
259
|
+
'pwl': pwl,
|
260
|
+
'cubic': cubic,
|
261
|
+
'gaussian': gaussian,
|
262
|
+
'sine': sine,
|
263
|
+
'tanh_square': tanh_square,
|
264
|
+
'mod_sigmoid': mod_sigmoid,
|
265
|
+
'linear': lambda x: x,
|
266
|
+
'quartic': quartic,
|
267
|
+
'square_quartic': square_quartic,
|
268
|
+
'cubic_quadratic': cubic_quadratic,
|
269
|
+
'exp_cubic': exp_cubic,
|
270
|
+
'sine_square': sine_square,
|
271
|
+
'logarithmic': logarithmic,
|
272
|
+
'scaled_cubic': lambda x: scaled_cubic(x, 1.0),
|
273
|
+
'sine_offset': lambda x: sine_offset(x, 1.0),
|
274
|
+
'spiral': spiral_activation,
|
275
|
+
'circular': circular_activation
|
276
|
+
}
|
277
|
+
|
278
|
+
try:
|
279
|
+
|
280
|
+
valid_mask = cp.array([act in activation_functions for act in activation_list])
|
281
|
+
valid_activations = cp.array(activation_list)[valid_mask]
|
282
|
+
|
283
|
+
activation_outputs = cp.array([activation_functions[act](origin_input) for act in valid_activations])
|
240
284
|
|
241
|
-
|
242
|
-
try:
|
243
|
-
if activation_list[i] == 'sigmoid':
|
244
|
-
Input = safe_add(Input, Sigmoid(origin_input))
|
245
|
-
elif activation_list[i] == 'swish':
|
246
|
-
Input = safe_add(Input, swish(origin_input))
|
247
|
-
elif activation_list[i] == 'mod_circular':
|
248
|
-
Input = safe_add(Input, modular_circular_activation(origin_input))
|
249
|
-
elif activation_list[i] == 'tanh_circular':
|
250
|
-
Input = safe_add(Input, tanh_circular_activation(origin_input))
|
251
|
-
elif activation_list[i] == 'leaky_relu':
|
252
|
-
Input = safe_add(Input, leaky_relu(origin_input))
|
253
|
-
elif activation_list[i] == 'relu':
|
254
|
-
Input = safe_add(Input, Relu(origin_input))
|
255
|
-
elif activation_list[i] == 'softplus':
|
256
|
-
Input = safe_add(Input, softplus(origin_input))
|
257
|
-
elif activation_list[i] == 'elu':
|
258
|
-
Input = safe_add(Input, elu(origin_input))
|
259
|
-
elif activation_list[i] == 'gelu':
|
260
|
-
Input = safe_add(Input, gelu(origin_input))
|
261
|
-
elif activation_list[i] == 'selu':
|
262
|
-
Input = safe_add(Input, selu(origin_input))
|
263
|
-
elif activation_list[i] == 'tanh':
|
264
|
-
Input = safe_add(Input, tanh(origin_input))
|
265
|
-
elif activation_list[i] == 'sinakt':
|
266
|
-
Input = safe_add(Input, sinakt(origin_input))
|
267
|
-
elif activation_list[i] == 'p_squared':
|
268
|
-
Input = safe_add(Input, p_squared(origin_input))
|
269
|
-
elif activation_list[i] == 'sglu':
|
270
|
-
Input = safe_add(Input, sglu(origin_input, alpha=1.0))
|
271
|
-
elif activation_list[i] == 'dlrelu':
|
272
|
-
Input = safe_add(Input, dlrelu(origin_input))
|
273
|
-
elif activation_list[i] == 'exsig':
|
274
|
-
Input = safe_add(Input, exsig(origin_input))
|
275
|
-
elif activation_list[i] == 'sin_plus':
|
276
|
-
Input = safe_add(Input, sin_plus(origin_input))
|
277
|
-
elif activation_list[i] == 'acos':
|
278
|
-
Input = safe_add(Input, acos(origin_input, alpha=1.0, beta=0.0))
|
279
|
-
elif activation_list[i] == 'gla':
|
280
|
-
Input = safe_add(Input, gla(origin_input, alpha=1.0, mu=0.0))
|
281
|
-
elif activation_list[i] == 'srelu':
|
282
|
-
Input = safe_add(Input, srelu(origin_input))
|
283
|
-
elif activation_list[i] == 'qelu':
|
284
|
-
Input = safe_add(Input, qelu(origin_input))
|
285
|
-
elif activation_list[i] == 'isra':
|
286
|
-
Input = safe_add(Input, isra(origin_input))
|
287
|
-
elif activation_list[i] == 'waveakt':
|
288
|
-
Input = safe_add(Input, waveakt(origin_input))
|
289
|
-
elif activation_list[i] == 'arctan':
|
290
|
-
Input = safe_add(Input, arctan(origin_input))
|
291
|
-
elif activation_list[i] == 'bent_identity':
|
292
|
-
Input = safe_add(Input, bent_identity(origin_input))
|
293
|
-
elif activation_list[i] == 'sech':
|
294
|
-
Input = safe_add(Input, sech(origin_input))
|
295
|
-
elif activation_list[i] == 'softsign':
|
296
|
-
Input = safe_add(Input, softsign(origin_input))
|
297
|
-
elif activation_list[i] == 'pwl':
|
298
|
-
Input = safe_add(Input, pwl(origin_input))
|
299
|
-
elif activation_list[i] == 'cubic':
|
300
|
-
Input = safe_add(Input, cubic(origin_input))
|
301
|
-
elif activation_list[i] == 'gaussian':
|
302
|
-
Input = safe_add(Input, gaussian(origin_input))
|
303
|
-
elif activation_list[i] == 'sine':
|
304
|
-
Input = safe_add(Input, sine(origin_input))
|
305
|
-
elif activation_list[i] == 'tanh_square':
|
306
|
-
Input = safe_add(Input, tanh_square(origin_input))
|
307
|
-
elif activation_list[i] == 'mod_sigmoid':
|
308
|
-
Input = safe_add(Input, mod_sigmoid(origin_input))
|
309
|
-
elif activation_list[i] == 'linear':
|
310
|
-
Input = safe_add(Input, origin_input)
|
311
|
-
elif activation_list[i] == 'quartic':
|
312
|
-
Input = safe_add(Input, quartic(origin_input))
|
313
|
-
elif activation_list[i] == 'square_quartic':
|
314
|
-
Input = safe_add(Input, square_quartic(origin_input))
|
315
|
-
elif activation_list[i] == 'cubic_quadratic':
|
316
|
-
Input = safe_add(Input, cubic_quadratic(origin_input))
|
317
|
-
elif activation_list[i] == 'exp_cubic':
|
318
|
-
Input = safe_add(Input, exp_cubic(origin_input))
|
319
|
-
elif activation_list[i] == 'sine_square':
|
320
|
-
Input = safe_add(Input, sine_square(origin_input))
|
321
|
-
elif activation_list[i] == 'logarithmic':
|
322
|
-
Input = safe_add(Input, logarithmic(origin_input))
|
323
|
-
elif activation_list[i] == 'scaled_cubic':
|
324
|
-
Input = safe_add(Input, scaled_cubic(origin_input, 1.0))
|
325
|
-
elif activation_list[i] == 'sine_offset':
|
326
|
-
Input = safe_add(Input, sine_offset(origin_input, 1.0))
|
327
|
-
elif activation_list[i] == 'spiral':
|
328
|
-
Input = safe_add(Input, spiral_activation(origin_input))
|
329
|
-
elif activation_list[i] == 'circular':
|
330
|
-
Input = safe_add(Input, circular_activation(origin_input))
|
285
|
+
return Input + cp.sum(activation_outputs, axis=0)
|
331
286
|
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
Input = cp.array(Input)
|
336
|
-
if not isinstance(origin_input, cp.ndarray):
|
337
|
-
origin_input = cp.array(origin_input)
|
338
|
-
continue
|
339
|
-
|
340
|
-
return Input
|
287
|
+
except Exception as e:
|
288
|
+
warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
|
289
|
+
return Input
|
@@ -258,7 +258,7 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
|
|
258
258
|
ndarray: Output from the model.
|
259
259
|
"""
|
260
260
|
|
261
|
-
from .
|
261
|
+
from .activation_functions import apply_activation
|
262
262
|
from .data_operations import standard_scaler
|
263
263
|
|
264
264
|
model = load_model(model_name, model_path)
|
@@ -269,12 +269,12 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
|
|
269
269
|
|
270
270
|
Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
|
271
271
|
|
272
|
-
|
273
|
-
|
274
|
-
neural_layer = neural_layer.ravel()
|
272
|
+
Input = np.array(Input, dtype=dtype, copy=False)
|
273
|
+
Input = Input.ravel()
|
275
274
|
|
276
275
|
try:
|
277
|
-
|
276
|
+
Input = apply_activation(Input, activation_potentiation)
|
277
|
+
neural_layer = Input @ W.T
|
278
278
|
return neural_layer
|
279
279
|
except:
|
280
280
|
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: predict_model_ssd." + Style.RESET_ALL)
|
@@ -304,7 +304,7 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=np.float3
|
|
304
304
|
W = model[get_weights()]
|
305
305
|
|
306
306
|
try:
|
307
|
-
Input =
|
307
|
+
Input = W.T @ output
|
308
308
|
return Input
|
309
309
|
except:
|
310
310
|
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: reverse_predict_model_ssd." + Style.RESET_ALL)
|
@@ -334,18 +334,18 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
334
334
|
ndarray: Output from the model.
|
335
335
|
"""
|
336
336
|
|
337
|
-
from data_operations import standard_scaler
|
338
|
-
from
|
337
|
+
from .data_operations import standard_scaler
|
338
|
+
from .activation_functions import apply_activation
|
339
339
|
|
340
340
|
Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
|
341
341
|
|
342
|
+
Input = np.array(Input, dtype=dtype, copy=False)
|
343
|
+
Input = Input.ravel()
|
344
|
+
|
342
345
|
try:
|
343
346
|
|
344
|
-
|
345
|
-
neural_layer =
|
346
|
-
neural_layer = neural_layer.ravel()
|
347
|
-
|
348
|
-
neural_layer = feed_forward(neural_layer, np.copy(W.astype(dtype, copy=False)), is_training=False, Class='?', activation_potentiation=activation_potentiation)
|
347
|
+
Input = apply_activation(Input, activation_potentiation)
|
348
|
+
neural_layer = Input @ W.T
|
349
349
|
|
350
350
|
return neural_layer
|
351
351
|
|
@@ -371,7 +371,7 @@ def reverse_predict_model_ram(output, W, dtype=np.float32):
|
|
371
371
|
"""
|
372
372
|
|
373
373
|
try:
|
374
|
-
Input =
|
374
|
+
Input = W.T @ output
|
375
375
|
return Input
|
376
376
|
|
377
377
|
except:
|
@@ -263,10 +263,10 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
|
263
263
|
Returns:
|
264
264
|
ndarray: Output from the model.
|
265
265
|
"""
|
266
|
-
|
266
|
+
|
267
267
|
Input = cp.array(Input, dtype=dtype, copy=False)
|
268
|
-
|
269
|
-
from .
|
268
|
+
|
269
|
+
from .activation_functions_cuda import apply_activation
|
270
270
|
from .data_operations_cuda import standard_scaler
|
271
271
|
|
272
272
|
model = load_model(model_name, model_path)
|
@@ -277,13 +277,14 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
|
|
277
277
|
|
278
278
|
Input = standard_scaler(None, Input, scaler_params)
|
279
279
|
|
280
|
-
|
281
|
-
|
282
|
-
neural_layer = neural_layer.ravel()
|
280
|
+
Input = cp.array(Input, dtype=dtype, copy=False)
|
281
|
+
Input = Input.ravel()
|
283
282
|
|
284
283
|
try:
|
285
|
-
|
284
|
+
Input = apply_activation(Input, activation_potentiation)
|
285
|
+
neural_layer = Input @ W.T
|
286
286
|
return neural_layer
|
287
|
+
|
287
288
|
except:
|
288
289
|
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: predict_model_ssd." + Style.RESET_ALL)
|
289
290
|
sys.exit()
|
@@ -314,7 +315,7 @@ def reverse_predict_model_ssd(output, model_name, model_path='', dtype=cp.float3
|
|
314
315
|
W = model[get_weights()]
|
315
316
|
|
316
317
|
try:
|
317
|
-
Input =
|
318
|
+
Input = W.T @ output
|
318
319
|
return Input
|
319
320
|
except:
|
320
321
|
print(Fore.RED + "ERROR: Unexpected Output or wrong model parameters from: reverse_predict_model_ssd." + Style.RESET_ALL)
|
@@ -344,19 +345,17 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
|
|
344
345
|
"""
|
345
346
|
|
346
347
|
from .data_operations_cuda import standard_scaler
|
347
|
-
from .
|
348
|
+
from .activation_functions_cuda import apply_activation
|
348
349
|
|
349
|
-
Input = cp.array(Input, dtype=dtype, copy=False)
|
350
|
-
|
351
350
|
Input = standard_scaler(None, Input, scaler_params)
|
352
351
|
|
352
|
+
Input = cp.array(Input, dtype=dtype, copy=False)
|
353
|
+
Input = Input.ravel()
|
354
|
+
|
353
355
|
try:
|
354
356
|
|
355
|
-
|
356
|
-
neural_layer =
|
357
|
-
neural_layer = neural_layer.ravel()
|
358
|
-
|
359
|
-
neural_layer = feed_forward(neural_layer, cp.copy(W), is_training=False, Class='?', activation_potentiation=activation_potentiation)
|
357
|
+
Input = apply_activation(Input, activation_potentiation)
|
358
|
+
neural_layer = Input @ W.T
|
360
359
|
|
361
360
|
return neural_layer
|
362
361
|
|
@@ -384,7 +383,7 @@ def reverse_predict_model_ram(output, W, dtype=cp.float32):
|
|
384
383
|
output = cp.array(output, dtype=dtype, copy=False)
|
385
384
|
|
386
385
|
try:
|
387
|
-
Input =
|
386
|
+
Input = W.T @ output
|
388
387
|
return Input
|
389
388
|
|
390
389
|
except:
|
@@ -0,0 +1,118 @@
|
|
1
|
+
import multiprocessing as mp
|
2
|
+
from functools import partial
|
3
|
+
import random
|
4
|
+
from .planeat import mutation, cross_over, second_parent_selection
|
5
|
+
|
6
|
+
def run_process(policy, best_weight, best_activations, good_weights,
|
7
|
+
good_activations, bad_weights, bad_activations, best_fitness,
|
8
|
+
normalized_fitness, child_W, child_act, mutated_W, mutated_act,
|
9
|
+
cross_over_mode, activation_selection_add_prob,
|
10
|
+
activation_selection_change_prob, activation_selection_threshold,
|
11
|
+
bad_genomes_selection_prob, fitness_bias, epsilon,
|
12
|
+
bad_genomes_mutation_prob, activation_mutate_prob,
|
13
|
+
activation_mutate_add_prob, activation_mutate_delete_prob,
|
14
|
+
activation_mutate_change_prob, weight_mutate_prob,
|
15
|
+
weight_mutate_threshold, activation_mutate_threshold):
|
16
|
+
|
17
|
+
process_func = partial(process_single, policy=policy, best_weight=best_weight,
|
18
|
+
best_activations=best_activations, good_weights=good_weights,
|
19
|
+
good_activations=good_activations, bad_weights=bad_weights,
|
20
|
+
bad_activations=bad_activations, best_fitness=best_fitness,
|
21
|
+
normalized_fitness=normalized_fitness, child_W=child_W,
|
22
|
+
child_act=child_act, mutated_W=mutated_W, mutated_act=mutated_act,
|
23
|
+
cross_over_mode=cross_over_mode,
|
24
|
+
activation_selection_add_prob=activation_selection_add_prob,
|
25
|
+
activation_selection_change_prob=activation_selection_change_prob,
|
26
|
+
activation_selection_threshold=activation_selection_threshold,
|
27
|
+
bad_genomes_selection_prob=bad_genomes_selection_prob,
|
28
|
+
fitness_bias=fitness_bias,
|
29
|
+
epsilon=epsilon,
|
30
|
+
bad_genomes_mutation_prob=bad_genomes_mutation_prob,
|
31
|
+
activation_mutate_prob=activation_mutate_prob,
|
32
|
+
activation_mutate_add_prob=activation_mutate_add_prob,
|
33
|
+
activation_mutate_delete_prob=activation_mutate_delete_prob,
|
34
|
+
activation_mutate_change_prob=activation_mutate_change_prob,
|
35
|
+
weight_mutate_prob=weight_mutate_prob,
|
36
|
+
weight_mutate_threshold=weight_mutate_threshold,
|
37
|
+
activation_mutate_threshold=activation_mutate_threshold)
|
38
|
+
|
39
|
+
with mp.Pool() as pool:
|
40
|
+
results = pool.map(process_func, range(len(bad_weights)))
|
41
|
+
|
42
|
+
for i, new_child_W, new_child_act, new_mutated_W, new_mutated_act in results:
|
43
|
+
child_W[i] = new_child_W
|
44
|
+
child_act[i] = new_child_act
|
45
|
+
mutated_W[i] = new_mutated_W
|
46
|
+
mutated_act[i] = new_mutated_act
|
47
|
+
|
48
|
+
|
49
|
+
return child_W, child_act, mutated_W, mutated_act
|
50
|
+
|
51
|
+
|
52
|
+
def process_single(i, policy, best_weight, best_activations,
|
53
|
+
good_weights, good_activations,
|
54
|
+
bad_weights, bad_activations,
|
55
|
+
best_fitness, normalized_fitness,
|
56
|
+
cross_over_mode,
|
57
|
+
activation_selection_add_prob, activation_selection_change_prob,
|
58
|
+
activation_selection_threshold,
|
59
|
+
bad_genomes_selection_prob, fitness_bias,
|
60
|
+
epsilon, bad_genomes_mutation_prob,
|
61
|
+
activation_mutate_prob, activation_mutate_add_prob,
|
62
|
+
activation_mutate_delete_prob, activation_mutate_change_prob,
|
63
|
+
weight_mutate_prob, weight_mutate_threshold,
|
64
|
+
activation_mutate_threshold):
|
65
|
+
|
66
|
+
if policy == 'aggressive':
|
67
|
+
first_parent_W = best_weight
|
68
|
+
first_parent_act = best_activations
|
69
|
+
elif policy == 'explorer':
|
70
|
+
first_parent_W = good_weights[i]
|
71
|
+
first_parent_act = good_activations[i]
|
72
|
+
else:
|
73
|
+
raise ValueError("policy parameter must be: 'aggressive' or 'explorer'")
|
74
|
+
|
75
|
+
second_parent_W, second_parent_act, s_i = second_parent_selection(
|
76
|
+
good_weights, bad_weights, good_activations, bad_activations, bad_genomes_selection_prob)
|
77
|
+
|
78
|
+
new_child_W, new_child_act = cross_over(
|
79
|
+
first_parent_W,
|
80
|
+
second_parent_W,
|
81
|
+
first_parent_act,
|
82
|
+
second_parent_act,
|
83
|
+
cross_over_mode=cross_over_mode,
|
84
|
+
activation_selection_add_prob=activation_selection_add_prob,
|
85
|
+
activation_selection_change_prob=activation_selection_change_prob,
|
86
|
+
activation_selection_threshold=activation_selection_threshold,
|
87
|
+
bad_genomes_selection_prob=bad_genomes_selection_prob,
|
88
|
+
first_parent_fitness=best_fitness,
|
89
|
+
fitness_bias=fitness_bias,
|
90
|
+
second_parent_fitness=normalized_fitness[s_i],
|
91
|
+
epsilon=epsilon
|
92
|
+
)
|
93
|
+
|
94
|
+
mutation_prob = random.uniform(0, 1)
|
95
|
+
if mutation_prob > bad_genomes_mutation_prob:
|
96
|
+
genome_W = good_weights[i]
|
97
|
+
genome_act = good_activations[i]
|
98
|
+
fitness_index = int(len(bad_weights) / 2 + i)
|
99
|
+
else:
|
100
|
+
genome_W = bad_weights[i]
|
101
|
+
genome_act = bad_activations[i]
|
102
|
+
fitness_index = i
|
103
|
+
|
104
|
+
new_mutated_W, new_mutated_act = mutation(
|
105
|
+
genome_W,
|
106
|
+
genome_act,
|
107
|
+
activation_mutate_prob=activation_mutate_prob,
|
108
|
+
activation_add_prob=activation_mutate_add_prob,
|
109
|
+
activation_delete_prob=activation_mutate_delete_prob,
|
110
|
+
activation_change_prob=activation_mutate_change_prob,
|
111
|
+
weight_mutate_prob=weight_mutate_prob,
|
112
|
+
weight_mutate_threshold=weight_mutate_threshold,
|
113
|
+
genome_fitness=normalized_fitness[fitness_index],
|
114
|
+
activation_mutate_threshold=activation_mutate_threshold,
|
115
|
+
epsilon=epsilon
|
116
|
+
)
|
117
|
+
|
118
|
+
return (i, new_child_W, new_child_act, new_mutated_W, new_mutated_act)
|