pyerualjetwork 4.3.3__py3-none-any.whl → 4.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- __version__ = "4.3.3"
1
+ __version__ = "4.3.4"
2
2
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
3
3
 
4
4
  def print_version(__version__):
@@ -217,127 +217,74 @@ def scaled_cubic(x, alpha=1.0):
217
217
 
218
218
  def sine_offset(x, beta=0.0):
219
219
  return np.sin(x + beta)
220
-
221
-
222
- def safe_add(current_sum, new_value):
223
- try:
224
- return current_sum + new_value
225
- except OverflowError:
226
- return np.array(current_sum) + np.array(new_value)
227
220
 
228
221
 
229
222
  def apply_activation(Input, activation_list):
230
223
  """
231
- Applies a sequence of activation functions to the input.
232
-
233
- Args:
234
- Input (numpy.ndarray): The input to apply activations to.
235
- activation_list (list): A list of activation function names to apply.
236
-
237
- Returns:
238
- numpy.ndarray: The input after all activations have been applied.
239
- """
224
+ Applies activation functions for inputs
240
225
 
226
+ Args:
227
+ Input (numpy.ndarray):
228
+ activation_list (list):
229
+ """
241
230
  origin_input = np.copy(Input)
231
+
232
+ activation_functions = {
233
+ 'sigmoid': Sigmoid,
234
+ 'swish': swish,
235
+ 'mod_circular': modular_circular_activation,
236
+ 'tanh_circular': tanh_circular_activation,
237
+ 'leaky_relu': leaky_relu,
238
+ 'relu': Relu,
239
+ 'softplus': softplus,
240
+ 'elu': elu,
241
+ 'gelu': gelu,
242
+ 'selu': selu,
243
+ 'tanh': tanh,
244
+ 'sinakt': sinakt,
245
+ 'p_squared': p_squared,
246
+ 'sglu': lambda x: sglu(x, alpha=1.0),
247
+ 'dlrelu': dlrelu,
248
+ 'exsig': exsig,
249
+ 'sin_plus': sin_plus,
250
+ 'acos': lambda x: acos(x, alpha=1.0, beta=0.0),
251
+ 'gla': lambda x: gla(x, alpha=1.0, mu=0.0),
252
+ 'srelu': srelu,
253
+ 'qelu': qelu,
254
+ 'isra': isra,
255
+ 'waveakt': waveakt,
256
+ 'arctan': arctan,
257
+ 'bent_identity': bent_identity,
258
+ 'sech': sech,
259
+ 'softsign': softsign,
260
+ 'pwl': pwl,
261
+ 'cubic': cubic,
262
+ 'gaussian': gaussian,
263
+ 'sine': sine,
264
+ 'tanh_square': tanh_square,
265
+ 'mod_sigmoid': mod_sigmoid,
266
+ 'linear': lambda x: x,
267
+ 'quartic': quartic,
268
+ 'square_quartic': square_quartic,
269
+ 'cubic_quadratic': cubic_quadratic,
270
+ 'exp_cubic': exp_cubic,
271
+ 'sine_square': sine_square,
272
+ 'logarithmic': logarithmic,
273
+ 'scaled_cubic': lambda x: scaled_cubic(x, 1.0),
274
+ 'sine_offset': lambda x: sine_offset(x, 1.0),
275
+ 'spiral': spiral_activation,
276
+ 'circular': circular_activation
277
+ }
278
+
279
+ try:
280
+
281
+ valid_mask = np.array([act in activation_functions for act in activation_list])
282
+ valid_activations = np.array(activation_list)[valid_mask]
242
283
 
243
- for i in range(len(activation_list)):
244
- try:
245
- if activation_list[i] == 'sigmoid':
246
- Input = safe_add(Input, Sigmoid(origin_input))
247
- elif activation_list[i] == 'swish':
248
- Input = safe_add(Input, swish(origin_input))
249
- elif activation_list[i] == 'mod_circular':
250
- Input = safe_add(Input, modular_circular_activation(origin_input))
251
- elif activation_list[i] == 'tanh_circular':
252
- Input = safe_add(Input, tanh_circular_activation(origin_input))
253
- elif activation_list[i] == 'leaky_relu':
254
- Input = safe_add(Input, leaky_relu(origin_input))
255
- elif activation_list[i] == 'relu':
256
- Input = safe_add(Input, Relu(origin_input))
257
- elif activation_list[i] == 'softplus':
258
- Input = safe_add(Input, softplus(origin_input))
259
- elif activation_list[i] == 'elu':
260
- Input = safe_add(Input, elu(origin_input))
261
- elif activation_list[i] == 'gelu':
262
- Input = safe_add(Input, gelu(origin_input))
263
- elif activation_list[i] == 'selu':
264
- Input = safe_add(Input, selu(origin_input))
265
- elif activation_list[i] == 'tanh':
266
- Input = safe_add(Input, tanh(origin_input))
267
- elif activation_list[i] == 'sinakt':
268
- Input = safe_add(Input, sinakt(origin_input))
269
- elif activation_list[i] == 'p_squared':
270
- Input = safe_add(Input, p_squared(origin_input))
271
- elif activation_list[i] == 'sglu':
272
- Input = safe_add(Input, sglu(origin_input, alpha=1.0))
273
- elif activation_list[i] == 'dlrelu':
274
- Input = safe_add(Input, dlrelu(origin_input))
275
- elif activation_list[i] == 'exsig':
276
- Input = safe_add(Input, exsig(origin_input))
277
- elif activation_list[i] == 'sin_plus':
278
- Input = safe_add(Input, sin_plus(origin_input))
279
- elif activation_list[i] == 'acos':
280
- Input = safe_add(Input, acos(origin_input, alpha=1.0, beta=0.0))
281
- elif activation_list[i] == 'gla':
282
- Input = safe_add(Input, gla(origin_input, alpha=1.0, mu=0.0))
283
- elif activation_list[i] == 'srelu':
284
- Input = safe_add(Input, srelu(origin_input))
285
- elif activation_list[i] == 'qelu':
286
- Input = safe_add(Input, qelu(origin_input))
287
- elif activation_list[i] == 'isra':
288
- Input = safe_add(Input, isra(origin_input))
289
- elif activation_list[i] == 'waveakt':
290
- Input = safe_add(Input, waveakt(origin_input))
291
- elif activation_list[i] == 'arctan':
292
- Input = safe_add(Input, arctan(origin_input))
293
- elif activation_list[i] == 'bent_identity':
294
- Input = safe_add(Input, bent_identity(origin_input))
295
- elif activation_list[i] == 'sech':
296
- Input = safe_add(Input, sech(origin_input))
297
- elif activation_list[i] == 'softsign':
298
- Input = safe_add(Input, softsign(origin_input))
299
- elif activation_list[i] == 'pwl':
300
- Input = safe_add(Input, pwl(origin_input))
301
- elif activation_list[i] == 'cubic':
302
- Input = safe_add(Input, cubic(origin_input))
303
- elif activation_list[i] == 'gaussian':
304
- Input = safe_add(Input, gaussian(origin_input))
305
- elif activation_list[i] == 'sine':
306
- Input = safe_add(Input, sine(origin_input))
307
- elif activation_list[i] == 'tanh_square':
308
- Input = safe_add(Input, tanh_square(origin_input))
309
- elif activation_list[i] == 'mod_sigmoid':
310
- Input = safe_add(Input, mod_sigmoid(origin_input))
311
- elif activation_list[i] == 'linear':
312
- Input = safe_add(Input, origin_input)
313
- elif activation_list[i] == 'quartic':
314
- Input = safe_add(Input, quartic(origin_input))
315
- elif activation_list[i] == 'square_quartic':
316
- Input = safe_add(Input, square_quartic(origin_input))
317
- elif activation_list[i] == 'cubic_quadratic':
318
- Input = safe_add(Input, cubic_quadratic(origin_input))
319
- elif activation_list[i] == 'exp_cubic':
320
- Input = safe_add(Input, exp_cubic(origin_input))
321
- elif activation_list[i] == 'sine_square':
322
- Input = safe_add(Input, sine_square(origin_input))
323
- elif activation_list[i] == 'logarithmic':
324
- Input = safe_add(Input, logarithmic(origin_input))
325
- elif activation_list[i] == 'scaled_cubic':
326
- Input = safe_add(Input, scaled_cubic(origin_input, 1.0))
327
- elif activation_list[i] == 'sine_offset':
328
- Input = safe_add(Input, sine_offset(origin_input, 1.0))
329
- elif activation_list[i] == 'spiral':
330
- Input = safe_add(Input, spiral_activation(origin_input))
331
- elif activation_list[i] == 'circular':
332
- Input = safe_add(Input, circular_activation(origin_input))
284
+ activation_outputs = np.array([activation_functions[act](origin_input) for act in valid_activations])
333
285
 
286
+ return Input + np.sum(activation_outputs, axis=0)
334
287
 
335
- except Exception as e:
336
- warnings.warn(f"Error in activation {activation_list[i]}: {str(e)}", RuntimeWarning)
337
- if not isinstance(Input, np.ndarray):
338
- Input = np.array(Input)
339
- if not isinstance(origin_input, np.ndarray):
340
- origin_input = np.array(origin_input)
341
- continue
342
-
343
- return Input
288
+ except Exception as e:
289
+ warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
290
+ return Input
pyerualjetwork/plan.py CHANGED
@@ -249,7 +249,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
249
249
 
250
250
  """
251
251
 
252
- from .planeat import define_genomes
252
+ from planeat import define_genomes
253
253
 
254
254
  data = 'Train'
255
255
 
@@ -48,7 +48,6 @@ bar_format_normal = loading_bars()[0]
48
48
  bar_format_learner = loading_bars()[1]
49
49
 
50
50
  # BUILD -----
51
-
52
51
  def fit(
53
52
  x_train,
54
53
  y_train,
@@ -265,7 +264,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
265
264
 
266
265
  """
267
266
 
268
- from .planeat_cuda import define_genomes
267
+ from planeat_cuda import define_genomes
269
268
 
270
269
  data = 'Train'
271
270
 
@@ -278,13 +277,13 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
278
277
  x_train = transfer_to_gpu(x_train, dtype=dtype)
279
278
  y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
280
279
 
281
- from .data_operations_cuda import batcher
280
+ from data_operations_cuda import batcher
282
281
 
283
282
  elif memory == 'cpu':
284
283
  x_train = transfer_to_cpu(x_train, dtype=dtype)
285
284
  y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
286
285
 
287
- from .data_operations import batcher
286
+ from data_operations import batcher
288
287
 
289
288
  else:
290
289
  raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
@@ -336,13 +335,12 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
336
335
  for j in range(activation_potentiation_len):
337
336
 
338
337
  x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
339
-
340
338
  if fit_start is True and i == 0:
341
339
  act_pop.append(activation_potentiation[j])
342
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
340
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], auto_normalization=auto_normalization, train_bar=False, dtype=dtype, memory=memory)
343
341
  weight_pop.append(W)
344
342
 
345
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
343
+ model = evaluate(x_train_batch, y_train_batch, weight_pop[j], act_pop[j], loading_bar_status=False, dtype=dtype, memory=memory)
346
344
  acc = model[get_acc()]
347
345
 
348
346
  if strategy == 'accuracy': target_pop.append(acc)
@@ -562,7 +560,6 @@ def feed_forward(
562
560
  neural_layer = cp.dot(w, Input)
563
561
 
564
562
  return neural_layer
565
-
566
563
 
567
564
  def evaluate(
568
565
  x_test,
pyerualjetwork/planeat.py CHANGED
@@ -20,7 +20,7 @@ import math
20
20
  from .plan import feed_forward
21
21
  from .data_operations import normalization
22
22
  from .ui import loading_bars, initialize_loading_bar
23
- from .activation_functions import apply_activation, all_activations
23
+ from. activation_functions import apply_activation, all_activations
24
24
 
25
25
  def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
26
26
  """
@@ -617,7 +617,7 @@ def update_neuron_history(LTPW, ax1, row, col, class_count, artist5, fig1, acc=F
617
617
 
618
618
  def initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train):
619
619
  """Initializes the visualization setup based on the parameters."""
620
- from .data_operations import find_closest_factors
620
+ from data_operations import find_closest_factors
621
621
  visualization_objects = {}
622
622
 
623
623
  if show_training or neurons_history:
@@ -704,7 +704,7 @@ def show():
704
704
 
705
705
  def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
706
706
  """Initialize all visualization components"""
707
- from .data_operations import find_closest_factors
707
+ from data_operations import find_closest_factors
708
708
  viz_objects = {}
709
709
 
710
710
  if show_history:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.3.3
3
+ Version: 4.3.4
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -23,31 +23,33 @@ PyPi Page: https://pypi.org/project/pyerualjetwork/
23
23
  GitHub Page: https://github.com/HCB06/PyerualJetwork
24
24
 
25
25
 
26
- pip install pyerualjetwork
27
-
28
- 'use if your data small, medium or large:'
26
+ pip install pyerualjetwork
29
27
 
30
- from pyerualjetwork import plan
31
- from pyerualjetwork import planeat
32
- from pyerualjetwork import data_operations
33
- from pyerualjetwork import model_operations
34
-
35
- from pyerualjetwork import plan_cuda
36
- from pyerualjetwork import planeat_cuda
37
- from pyerualjetwork import data_operations_cuda
38
- from pyerualjetwork import model_operations_cuda
39
-
40
- 'use if your data huge: _afterburner package (afterburner package comes with powerful paralellism, afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded. Specially designed for LLM training and other massive model training)'
41
-
42
- from pyerualjetwork_afterburner import plan
43
- from pyerualjetwork_afterburner import planeat
44
- from pyerualjetwork_afterburner import data_operations
45
- from pyerualjetwork_afterburner import model_operations
46
-
47
- from pyerualjetwork_afterburner import plan_cuda
48
- from pyerualjetwork_afterburner import planeat_cuda
49
- from pyerualjetwork_afterburner import data_operations_cuda
50
- from pyerualjetwork_afterburner import model_operations_cuda
28
+ 'use this if your data small or memory management is a problem :'
29
+
30
+ from pyerualjetwork import plan
31
+ from pyerualjetwork import planeat
32
+ from pyerualjetwork import data_operations
33
+ from pyerualjetwork import model_operations
34
+
35
+ from pyerualjetwork import plan_cuda
36
+ from pyerualjetwork import planeat_cuda
37
+ from pyerualjetwork import data_operations_cuda
38
+ from pyerualjetwork import model_operations_cuda
39
+
40
+ 'use this if your data large or memory management is not a problem : _afterburner package (afterburner package comes with powerful paralellism,
41
+ afterburner with cuda modules offers super-fast training but some memory managemant features and visualization features discarded.
42
+ Specially designed for LLM training and other massive model training)'
43
+
44
+ from pyerualjetwork_afterburner import plan
45
+ from pyerualjetwork_afterburner import planeat
46
+ from pyerualjetwork_afterburner import data_operations
47
+ from pyerualjetwork_afterburner import model_operations
48
+
49
+ from pyerualjetwork_afterburner import plan_cuda
50
+ from pyerualjetwork_afterburner import planeat_cuda
51
+ from pyerualjetwork_afterburner import data_operations_cuda
52
+ from pyerualjetwork_afterburner import model_operations_cuda
51
53
 
52
54
  Optimized for Visual Studio Code
53
55
 
@@ -1,5 +1,5 @@
1
- pyerualjetwork/__init__.py,sha256=aKQcQmw61Ctm9v2v1JO5ZYp4F3k7s9KkeId4a_B31ac,639
2
- pyerualjetwork/activation_functions.py,sha256=eLEesmMgDvkI1TqaLTpqtOgTaLbHEAyw-D57KIKd9G4,11775
1
+ pyerualjetwork/__init__.py,sha256=UZL2J9uCXFaBgNKiBN79R0oWCaQxDkK6SWour_AgCWQ,639
2
+ pyerualjetwork/activation_functions.py,sha256=AR91fQV2W2rc-Qb4Yp7b8ucYpGjwyQUewO-M-lyEMs8,7729
3
3
  pyerualjetwork/activation_functions_cuda.py,sha256=ztIw6rMR4t1289_TPIGYwE6qarl_YbSOGj5Ep3rUMqs,11803
4
4
  pyerualjetwork/data_operations.py,sha256=Flteouu6rfSo2uHMqBHuzO02dXmbNa-I5qWmUpGTZ5Y,14760
5
5
  pyerualjetwork/data_operations_cuda.py,sha256=UpoJoFhIwTU4xg9dVuLAxLAT4CkRaGsxvtJG9j1xrNo,17629
@@ -11,18 +11,18 @@ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,607
11
11
  pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
12
  pyerualjetwork/model_operations.py,sha256=RKqnh7-MByFosxqme4q4jC1lOndX26O-OVXYV6ZxoEE,12965
13
13
  pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
14
- pyerualjetwork/plan.py,sha256=ApMQC46_I8qtMqO4lLYLme--SGcMRg-GRo1-gSb3A3I,31894
15
- pyerualjetwork/plan_cuda.py,sha256=H_EuNNyxrY6-AiuRkOYC8J_UmbzoqJ9aeO0i9pgUDZI,33277
16
- pyerualjetwork/planeat.py,sha256=e-J-u5gJYijKznN6gn2DZoaCJJro84DOBYTy1rR5-y4,39470
14
+ pyerualjetwork/plan.py,sha256=9dYxGCIEyQoUyo1s8X6RHshFNKekXteQqtQ2lm_Khb8,31893
15
+ pyerualjetwork/plan_cuda.py,sha256=jraw3-QqweiptwZOhRQxApsAqkVWMf4pmdeMWt0v7Sc,33253
16
+ pyerualjetwork/planeat.py,sha256=uRX-hDywGOai6hHhbYrmcRodNZOg4WCQeJWZbdMlZs8,39470
17
17
  pyerualjetwork/planeat_cuda.py,sha256=QNHCQLkR0MNFqyN2iHAtC7cbf8qZiD3p_54YH3lnMFA,39529
18
18
  pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
- pyerualjetwork/visualizations.py,sha256=1SKMZaJ80OD2qHUyMxW1IOv8zwmxzMPxclfbeq1Xr4g,28772
19
+ pyerualjetwork/visualizations.py,sha256=VL00sX2DZz83F__PyEJH9s1LizuXpOBzWjnoSjMJIJ0,28770
20
20
  pyerualjetwork/visualizations_cuda.py,sha256=KbMhfsLlxujy_i3QrwCf734Q-k6d7Zn_7CEbm3gzK9w,29186
21
- pyerualjetwork_afterburner/__init__.py,sha256=PWlgYDHv0-7II5khz9y5meQi0PdWYwsuQ7-pEcCijqM,655
22
- pyerualjetwork_afterburner/activation_functions.py,sha256=2bv7o4EPEFr8cSKq7KI04HhMUyxgBpe8soGvN98Mazg,7740
23
- pyerualjetwork_afterburner/activation_functions_cuda.py,sha256=Ua606lsj9LQahfLi6oZMkSyzyPT7ySrvC6qfACNCbL8,7781
21
+ pyerualjetwork_afterburner/__init__.py,sha256=XfhHAEboCZ5NGlC1bMkuPdWJHvTyrQ-bZGkZUd0qi90,651
22
+ pyerualjetwork_afterburner/activation_functions.py,sha256=bKf00lsuuLJNO-4vVp4OqBi4zJ-qZ8L3v-vl52notkY,7721
23
+ pyerualjetwork_afterburner/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
24
24
  pyerualjetwork_afterburner/data_operations.py,sha256=Flteouu6rfSo2uHMqBHuzO02dXmbNa-I5qWmUpGTZ5Y,14760
25
- pyerualjetwork_afterburner/data_operations_cuda.py,sha256=UpoJoFhIwTU4xg9dVuLAxLAT4CkRaGsxvtJG9j1xrNo,17629
25
+ pyerualjetwork_afterburner/data_operations_cuda.py,sha256=nn8fXBRZrdJWqcXlQqebaBMXkR3LyGfUwlzQ-26R8yo,17624
26
26
  pyerualjetwork_afterburner/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
27
27
  pyerualjetwork_afterburner/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
28
28
  pyerualjetwork_afterburner/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
@@ -32,13 +32,13 @@ pyerualjetwork_afterburner/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2
32
32
  pyerualjetwork_afterburner/model_operations.py,sha256=MCSCNYiiICRVZITobtS3ZIWmH5Q9gjyELuH32sAdgg4,12649
33
33
  pyerualjetwork_afterburner/model_operations_cuda.py,sha256=NT01BK5nrDYE7H1x3KnSI8gmx0QTGGB0mP_LqEb1uuU,13157
34
34
  pyerualjetwork_afterburner/plan.py,sha256=Gxv8ii4brTYMzzFZBP-X6kkwc6w6vtTPiMmqVOAqoq8,21972
35
- pyerualjetwork_afterburner/plan_cuda.py,sha256=usyL-rWfczko8MQ-tmgMyt7UrKoH7IG3FX3edBiq-vc,22716
35
+ pyerualjetwork_afterburner/plan_cuda.py,sha256=7R9alPqSOXpL_3vmVbQJFUVIOj5SjltIo0necj76AjE,22751
36
36
  pyerualjetwork_afterburner/planeat.py,sha256=Lq5R0aMS4UIdZdbUKsKDv5g0WLwYryomR3IQYb8vAa4,37573
37
- pyerualjetwork_afterburner/planeat_cuda.py,sha256=dZdKrrhdnoTjcF8Uv23Y4UvlOfizazNyx9v6QsdpIoo,37621
37
+ pyerualjetwork_afterburner/planeat_cuda.py,sha256=KnU54osvwrMVvdO4fww7BqFBoq0I8c2YcZOVHD8l69g,37494
38
38
  pyerualjetwork_afterburner/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
39
39
  pyerualjetwork_afterburner/visualizations.py,sha256=1SKMZaJ80OD2qHUyMxW1IOv8zwmxzMPxclfbeq1Xr4g,28772
40
40
  pyerualjetwork_afterburner/visualizations_cuda.py,sha256=KbMhfsLlxujy_i3QrwCf734Q-k6d7Zn_7CEbm3gzK9w,29186
41
- pyerualjetwork-4.3.3.dist-info/METADATA,sha256=hfWfhq4gsongGVfzIxIzRuSzE44rsnHDUmVSbD4pXlo,8304
42
- pyerualjetwork-4.3.3.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
43
- pyerualjetwork-4.3.3.dist-info/top_level.txt,sha256=uK64ge08QQoPuXM3aiRVPgiQQtl8Fxm2-HieIut5Lwo,42
44
- pyerualjetwork-4.3.3.dist-info/RECORD,,
41
+ pyerualjetwork-4.3.4.dist-info/METADATA,sha256=0NPm5VKPBK7A6Pc5VTWF7oZOdBC1q_-vFcMpsFkAc74,8248
42
+ pyerualjetwork-4.3.4.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
43
+ pyerualjetwork-4.3.4.dist-info/top_level.txt,sha256=uK64ge08QQoPuXM3aiRVPgiQQtl8Fxm2-HieIut5Lwo,42
44
+ pyerualjetwork-4.3.4.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
- __version__ = "4.3.2.2b0-afterburner"
1
+ __version__ = "4.3.4-afterburner"
2
2
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
3
3
 
4
4
  def print_version(__version__):
@@ -277,15 +277,14 @@ def apply_activation(Input, activation_list):
277
277
  }
278
278
 
279
279
  try:
280
- valid_activations = [act for act in activation_list if act in activation_functions]
281
-
282
- activation_outputs = np.stack([activation_functions[act](origin_input)
283
- for act in valid_activations])
284
280
 
285
- result = Input + np.sum(activation_outputs, axis=0)
286
-
287
- return result
281
+ valid_mask = np.array([act in activation_functions for act in activation_list])
282
+ valid_activations = np.array(activation_list)[valid_mask]
283
+
284
+ activation_outputs = np.array([activation_functions[act](origin_input) for act in valid_activations])
285
+
286
+ return Input + np.sum(activation_outputs, axis=0)
288
287
 
289
288
  except Exception as e:
290
289
  warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
291
- return Input
290
+ return Input
@@ -276,14 +276,13 @@ def apply_activation(Input, activation_list):
276
276
  }
277
277
 
278
278
  try:
279
- valid_activations = [act for act in activation_list if act in activation_functions]
280
279
 
281
- activation_outputs = cp.stack([activation_functions[act](origin_input)
282
- for act in valid_activations])
280
+ valid_mask = cp.array([act in activation_functions for act in activation_list])
281
+ valid_activations = cp.array(activation_list)[valid_mask]
283
282
 
284
- result = Input + cp.sum(activation_outputs, axis=0)
285
-
286
- return result
283
+ activation_outputs = cp.array([activation_functions[act](origin_input) for act in valid_activations])
284
+
285
+ return Input + cp.sum(activation_outputs, axis=0)
287
286
 
288
287
  except Exception as e:
289
288
  warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
@@ -17,7 +17,7 @@ def encode_one_hot(y_train, y_test=None, summary=False):
17
17
  tuple: One-hot encoded y_train and (if given) y_test.
18
18
  """
19
19
 
20
- from .memory_operations import optimize_labels, transfer_to_cpu
20
+ from memory_operations import optimize_labels, transfer_to_cpu
21
21
 
22
22
  y_train = optimize_labels(y_train, one_hot_encoded=False, cuda=True)
23
23
  y_test = optimize_labels(y_test, one_hot_encoded=False, cuda=True)
@@ -401,7 +401,7 @@ def standard_scaler(x_train=None, x_test=None, scaler_params=None, dtype=cp.floa
401
401
 
402
402
  return scaled_data # sample data scaled
403
403
 
404
-
404
+
405
405
  def normalization(
406
406
  Input, # num: Input data to be normalized.
407
407
  dtype=cp.float32
@@ -23,7 +23,7 @@ from .data_operations_cuda import normalization
23
23
  from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
24
24
  from .activation_functions_cuda import apply_activation, all_activations
25
25
  from .metrics_cuda import metrics
26
- from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
26
+ from model_operations_cuda import get_acc, get_preds, get_preds_softmax
27
27
  from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
28
28
  from .visualizations_cuda import (
29
29
  draw_neural_web,
@@ -151,11 +151,12 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
151
151
 
152
152
  """
153
153
 
154
- from .planeat_cuda import define_genomes
154
+ from planeat_cuda import define_genomes
155
155
 
156
156
  data = 'Train'
157
157
 
158
- activation_potentiation = all_activations()
158
+ activation_potentiation = all_activations()
159
+
159
160
  activation_potentiation_len = len(activation_potentiation)
160
161
 
161
162
  y_train = optimize_labels(y_train, cuda=True)
@@ -164,13 +165,13 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
164
165
  x_train = transfer_to_gpu(x_train, dtype=dtype)
165
166
  y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
166
167
 
167
- from .data_operations_cuda import batcher
168
+ from data_operations_cuda import batcher
168
169
 
169
170
  elif memory == 'cpu':
170
171
  x_train = transfer_to_cpu(x_train, dtype=dtype)
171
172
  y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
172
173
 
173
- from .data_operations import batcher
174
+ from data_operations import batcher
174
175
 
175
176
  else:
176
177
  raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
@@ -228,7 +229,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
228
229
  W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], dtype=dtype)
229
230
  weight_pop.append(W)
230
231
 
231
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j])
232
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
232
233
  acc = model[get_acc()]
233
234
 
234
235
  if strategy == 'accuracy': target_pop.append(acc)
@@ -721,9 +721,8 @@ def mutation(weight,
721
721
  max_threshold = len(activations)
722
722
 
723
723
  new_threshold = threshold
724
-
725
- except_this = ['spiral', 'circular']
726
- all_acts = [item for item in all_activations() if item not in except_this] # SPIRAL AND CIRCULAR ACTIVATION DISCARDED
724
+
725
+ all_acts = all_activations()
727
726
 
728
727
  activation_add_prob = 1 - activation_add_prob
729
728
  activation_delete_prob = 1 - activation_delete_prob