pyerualjetwork 5.1__py3-none-any.whl → 5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. pyerualjetwork/__init__.py +15 -14
  2. pyerualjetwork/cpu/__init__.py +24 -0
  3. pyerualjetwork/{activation_functions_cpu.py → cpu/activation_functions.py} +40 -4
  4. pyerualjetwork/{data_operations_cpu.py → cpu/data_ops.py} +17 -19
  5. pyerualjetwork/{metrics_cpu.py → cpu/metrics.py} +3 -1
  6. pyerualjetwork/{visualizations_cpu.py → cpu/visualizations.py} +96 -139
  7. pyerualjetwork/cuda/__init__.py +24 -0
  8. pyerualjetwork/{activation_functions_cuda.py → cuda/activation_functions.py} +54 -5
  9. pyerualjetwork/{data_operations_cuda.py → cuda/data_ops.py} +16 -16
  10. pyerualjetwork/{metrics_cuda.py → cuda/metrics.py} +1 -1
  11. pyerualjetwork/{visualizations_cuda.py → cuda/visualizations.py} +8 -244
  12. pyerualjetwork/{ene_cpu.py → ene.py} +29 -95
  13. pyerualjetwork/fitness_functions.py +0 -1
  14. pyerualjetwork/help.py +5 -5
  15. pyerualjetwork/issue_solver.py +39 -11
  16. pyerualjetwork/{memory_operations.py → memory_ops.py} +1 -1
  17. pyerualjetwork/model_ops.py +734 -0
  18. pyerualjetwork/{neu_cpu.py → nn.py} +199 -91
  19. pyerualjetwork/{model_operations_cpu.py → old_cpu_model_ops.py} +62 -59
  20. pyerualjetwork/{model_operations_cuda.py → old_cuda_model_ops.py} +99 -86
  21. {pyerualjetwork-5.1.dist-info → pyerualjetwork-5.5.dist-info}/METADATA +16 -18
  22. pyerualjetwork-5.5.dist-info/RECORD +27 -0
  23. pyerualjetwork/ene_cuda.py +0 -962
  24. pyerualjetwork/neu_cuda.py +0 -588
  25. pyerualjetwork-5.1.dist-info/RECORD +0 -26
  26. /pyerualjetwork/{loss_functions_cpu.py → cpu/loss_functions.py} +0 -0
  27. /pyerualjetwork/{loss_functions_cuda.py → cuda/loss_functions.py} +0 -0
  28. {pyerualjetwork-5.1.dist-info → pyerualjetwork-5.5.dist-info}/WHEEL +0 -0
  29. {pyerualjetwork-5.1.dist-info → pyerualjetwork-5.5.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,7 @@
1
1
  import networkx as nx
2
2
  import matplotlib.pyplot as plt
3
3
  import numpy as np
4
+ import cupy as cp
4
5
  from scipy.spatial import ConvexHull
5
6
  import seaborn as sns
6
7
  from matplotlib.animation import ArtistAnimation
@@ -86,7 +87,7 @@ def draw_model_architecture(model_name, model_path=''):
86
87
  Visualizes the architecture of a neural network model with multiple inputs based on activation functions.
87
88
  """
88
89
 
89
- from .model_operations_cpu import load_model, get_scaler, get_act, get_weights
90
+ from ..model_ops import load_model, get_scaler, get_act, get_weights
90
91
 
91
92
  model = load_model(model_name=model_name, model_path=model_path)
92
93
 
@@ -182,14 +183,11 @@ def draw_model_architecture(model_name, model_path=''):
182
183
 
183
184
  def draw_activations(x_train, activation):
184
185
 
185
- from . import activation_functions_cpu as af
186
+ from . import activation_functions as af
186
187
 
187
188
  if activation == 'sigmoid':
188
189
  result = af.Sigmoid(x_train)
189
190
 
190
- elif activation == 'swish':
191
- result = af.swish(x_train)
192
-
193
191
  elif activation == 'circular':
194
192
  result = af.circular_activation(x_train)
195
193
 
@@ -205,18 +203,9 @@ def draw_activations(x_train, activation):
205
203
  elif activation == 'relu':
206
204
  result = af.Relu(x_train)
207
205
 
208
- elif activation == 'softplus':
209
- result = af.softplus(x_train)
210
-
211
- elif activation == 'elu':
212
- result = af.elu(x_train)
213
-
214
206
  elif activation == 'gelu':
215
207
  result = af.gelu(x_train)
216
208
 
217
- elif activation == 'selu':
218
- result = af.selu(x_train)
219
-
220
209
  elif activation == 'softmax':
221
210
  result = af.Softmax(x_train)
222
211
 
@@ -235,24 +224,12 @@ def draw_activations(x_train, activation):
235
224
  elif activation == 'dlrelu':
236
225
  result = af.dlrelu(x_train)
237
226
 
238
- elif activation == 'exsig':
239
- result = af.exsig(x_train)
240
-
241
227
  elif activation == 'sin_plus':
242
228
  result = af.sin_plus(x_train)
243
229
 
244
230
  elif activation == 'acos':
245
231
  result = af.acos(x_train, alpha=1.0, beta=0.0)
246
232
 
247
- elif activation == 'gla':
248
- result = af.gla(x_train, alpha=1.0, mu=0.0)
249
-
250
- elif activation == 'srelu':
251
- result = af.srelu(x_train)
252
-
253
- elif activation == 'qelu':
254
- result = af.qelu(x_train)
255
-
256
233
  elif activation == 'isra':
257
234
  result = af.isra(x_train)
258
235
 
@@ -265,54 +242,27 @@ def draw_activations(x_train, activation):
265
242
  elif activation == 'bent_identity':
266
243
  result = af.bent_identity(x_train)
267
244
 
268
- elif activation == 'sech':
269
- result = af.sech(x_train)
270
-
271
245
  elif activation == 'softsign':
272
246
  result = af.softsign(x_train)
273
247
 
274
248
  elif activation == 'pwl':
275
249
  result = af.pwl(x_train)
276
250
 
277
- elif activation == 'cubic':
278
- result = af.cubic(x_train)
279
-
280
- elif activation == 'gaussian':
281
- result = af.gaussian(x_train)
282
-
283
251
  elif activation == 'sine':
284
252
  result = af.sine(x_train)
285
253
 
286
254
  elif activation == 'tanh_square':
287
255
  result = af.tanh_square(x_train)
288
256
 
289
- elif activation == 'mod_sigmoid':
290
- result = af.mod_sigmoid(x_train)
291
-
292
257
  elif activation == 'linear':
293
258
  result = x_train
294
259
 
295
- elif activation == 'quartic':
296
- result = af.quartic(x_train)
297
-
298
- elif activation == 'square_quartic':
299
- result = af.square_quartic(x_train)
300
-
301
- elif activation == 'cubic_quadratic':
302
- result = af.cubic_quadratic(x_train)
303
-
304
- #elif activation == 'exp_cubic':
305
- #result = af.exp_cubic(x_train)
306
-
307
260
  elif activation == 'sine_square':
308
261
  result = af.sine_square(x_train)
309
262
 
310
263
  elif activation == 'logarithmic':
311
264
  result = af.logarithmic(x_train)
312
265
 
313
- elif activation == 'scaled_cubic':
314
- result = af.scaled_cubic(x_train, 1.0)
315
-
316
266
  elif activation == 'sine_offset':
317
267
  result = af.sine_offset(x_train, 1.0)
318
268
 
@@ -325,35 +275,37 @@ def draw_activations(x_train, activation):
325
275
  return x_train
326
276
 
327
277
 
328
- def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activations):
278
+ def plot_evaluate(x_test, y_test, y_preds, model, acc, cuda=False):
329
279
 
330
- from .metrics_cpu import metrics, confusion_matrix, roc_curve
331
- from .ui import loading_bars, initialize_loading_bar
332
- from .data_operations_cpu import decode_one_hot
333
- from .model_operations_cpu import predict_model_ram
280
+ if not cuda:
281
+ from .metrics import metrics, confusion_matrix, roc_curve
282
+ from .data_ops import decode_one_hot
283
+ else:
284
+ from ..cuda.metrics import metrics, confusion_matrix, roc_curve
285
+ from ..cuda.data_ops import decode_one_hot
334
286
 
335
- bar_format_normal = loading_bars()[0]
336
287
 
337
- acc = acc_list[len(acc_list) - 1]
338
- y_true = decode_one_hot(y_test)
288
+ from ..model_ops import predict_from_memory
339
289
 
340
- y_true = np.array(y_true)
341
- y_preds = np.array(y_preds)
290
+ y_true = decode_one_hot(y_test)
291
+ y_preds = decode_one_hot(y_preds)
292
+
342
293
  Class = np.unique(decode_one_hot(y_test))
343
294
 
344
295
  precision, recall, f1 = metrics(y_test, y_preds)
345
296
 
346
-
347
297
  cm = confusion_matrix(y_true, y_preds, len(Class))
348
298
  fig, axs = plt.subplots(2, 2, figsize=(16, 12))
299
+ fig.suptitle("Evaluation Report")
349
300
 
350
- sns.heatmap(cm, annot=True, fmt='d', ax=axs[0, 0])
301
+ sns.heatmap(cm.get() if cuda else cm, annot=True, fmt='d', ax=axs[0, 0])
351
302
  axs[0, 0].set_title("Confusion Matrix")
352
303
  axs[0, 0].set_xlabel("Predicted Class")
353
304
  axs[0, 0].set_ylabel("Actual Class")
354
305
 
355
306
  if len(Class) == 2:
356
307
  fpr, tpr, thresholds = roc_curve(y_true, y_preds)
308
+ if cuda: fpr, tpr = fpr.get(), tpr.get()
357
309
 
358
310
  roc_auc = np.trapz(tpr, fpr)
359
311
  axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
@@ -380,7 +332,8 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activations):
380
332
 
381
333
 
382
334
  fpr, tpr, thresholds = roc_curve(y_true_copy, y_preds_copy)
383
-
335
+ if cuda: fpr, tpr = fpr.get(), tpr.get()
336
+
384
337
  roc_auc = np.trapz(tpr, fpr)
385
338
  axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
386
339
  axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
@@ -394,7 +347,7 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activations):
394
347
 
395
348
 
396
349
  metric = ['Precision', 'Recall', 'F1 Score', 'Accuracy']
397
- values = [precision, recall, f1, acc]
350
+ values = [precision, recall, f1, acc if not cuda else acc.get()]
398
351
  colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']
399
352
 
400
353
 
@@ -405,7 +358,7 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activations):
405
358
  axs[0, 1].text(bar.get_x() + bar.get_width() / 2, bar.get_height() - 0.05, f'{value:.2f}',
406
359
  ha='center', va='bottom', fontsize=12, color='white', weight='bold')
407
360
 
408
- axs[0, 1].set_ylim(0, 1)
361
+ axs[0, 1].set_ylim(0, 1)
409
362
  axs[0, 1].set_xlabel('Metrics')
410
363
  axs[0, 1].set_ylabel('Score')
411
364
  axs[0, 1].set_title('Precision, Recall, F1 Score, and Accuracy (Weighted)')
@@ -414,107 +367,99 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activations):
414
367
  feature_indices=[0, 1]
415
368
 
416
369
  h = .02
370
+
371
+ array_type = np if not cuda else cp
417
372
  x_min, x_max = x_test[:, feature_indices[0]].min() - 1, x_test[:, feature_indices[0]].max() + 1
418
373
  y_min, y_max = x_test[:, feature_indices[1]].min() - 1, x_test[:, feature_indices[1]].max() + 1
419
- xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
420
- np.arange(y_min, y_max, h))
421
-
422
- grid = np.c_[xx.ravel(), yy.ravel()]
374
+ xx, yy = array_type.meshgrid(array_type.arange(x_min, x_max, h),
375
+ array_type.arange(y_min, y_max, h))
423
376
 
424
- try:
425
377
 
426
- grid_full = np.zeros((grid.shape[0], x_test.shape[1]))
427
- grid_full[:, feature_indices] = grid
428
-
429
- Z = [None] * len(grid_full)
430
-
431
- predict_progress = initialize_loading_bar(total=len(grid_full),leave=False,
432
- bar_format=bar_format_normal ,desc="Predicts For Decision Boundary",ncols= 65)
378
+ grid = array_type.c_[xx.ravel(), yy.ravel()]
379
+ grid_full = array_type.zeros((grid.shape[0], x_test.shape[1]), dtype=array_type.float32)
380
+ grid_full[:, feature_indices] = grid
433
381
 
434
- for i in range(len(grid_full)):
382
+ Z = array_type.argmax(predict_from_memory(grid_full, model=model, cuda=cuda), axis=1)
435
383
 
436
- Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activations=activations))
437
- predict_progress.update(1)
384
+ Z = Z.reshape(xx.shape)
438
385
 
439
- predict_progress.close()
386
+ if cuda:
387
+ xx = xx.get()
388
+ yy = yy.get()
389
+ Z = Z.get()
390
+ x_test = x_test.get()
391
+ y_test_decoded = decode_one_hot(y_test).get()
440
392
 
441
- Z = np.array(Z)
442
- Z = Z.reshape(xx.shape)
393
+ else:
394
+ y_test_decoded = decode_one_hot(y_test)
443
395
 
444
- axs[1,1].contourf(xx, yy, Z, alpha=0.8)
445
- axs[1,1].scatter(x_test[:, feature_indices[0]], x_test[:, feature_indices[1]], c=decode_one_hot(y_test), edgecolors='k', marker='o', s=20, alpha=0.9)
446
- axs[1,1].set_xlabel(f'Feature {0 + 1}')
447
- axs[1,1].set_ylabel(f'Feature {1 + 1}')
448
- axs[1,1].set_title('Decision Boundary')
396
+ axs[1,1].contourf(xx, yy, Z, alpha=0.8)
397
+ axs[1,1].scatter(x_test[:, feature_indices[0]], x_test[:, feature_indices[1]], c=y_test_decoded, edgecolors='k', marker='o', s=20, alpha=0.9)
398
+ axs[1,1].set_xlabel(f'Feature {0 + 1}')
399
+ axs[1,1].set_ylabel(f'Feature {1 + 1}')
400
+ axs[1,1].set_title('Decision Boundary')
449
401
 
450
- except Exception as e:
451
- print(f"Error: {e}")
452
402
 
453
403
  plt.show()
454
404
 
455
405
 
456
- def plot_decision_boundary(x, y, activations, W, artist=None, ax=None):
457
-
458
- from .model_operations_cpu import predict_model_ram
459
- from .data_operations_cpu import decode_one_hot
406
+ def plot_decision_boundary(x, y, ax, activations, W, artist, model_type, activation_potentiation=False, cuda=False):
460
407
 
408
+ from ..model_ops import get_model_template, predict_from_memory
409
+
410
+ if not cuda:
411
+ from .data_ops import decode_one_hot
412
+
413
+ else:
414
+ from ..cuda.data_ops import decode_one_hot
415
+
461
416
  feature_indices = [0, 1]
462
417
 
418
+ template_model = get_model_template()
419
+
420
+ model = template_model._replace(weights=W,
421
+ activations=activations,
422
+ model_type=model_type,
423
+ activation_potentiation=activation_potentiation)
463
424
  h = .02
425
+ array_type = np if not cuda else cp
464
426
  x_min, x_max = x[:, feature_indices[0]].min() - 1, x[:, feature_indices[0]].max() + 1
465
427
  y_min, y_max = x[:, feature_indices[1]].min() - 1, x[:, feature_indices[1]].max() + 1
466
- xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
467
- np.arange(y_min, y_max, h))
468
-
469
- grid = np.c_[xx.ravel(), yy.ravel()]
470
- grid_full = np.zeros((grid.shape[0], x.shape[1]), dtype=np.float32)
428
+ xx, yy = array_type.meshgrid(array_type.arange(x_min, x_max, h),
429
+ array_type.arange(y_min, y_max, h))
430
+
431
+ grid = array_type.c_[xx.ravel(), yy.ravel()]
432
+ grid_full = array_type.zeros((grid.shape[0], x.shape[1]), dtype=array_type.float32)
471
433
  grid_full[:, feature_indices] = grid
472
-
473
- Z = [None] * len(grid_full)
474
434
 
475
- for i in range(len(grid_full)):
476
- Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activations=activations))
435
+ Z = array_type.argmax(predict_from_memory(grid_full, model=model, cuda=cuda), axis=1)
477
436
 
478
- Z = np.array(Z, dtype=np.int32)
479
437
  Z = Z.reshape(xx.shape)
480
438
 
481
- if ax is None:
482
-
483
- plt.contourf(xx, yy, Z, alpha=0.8)
484
- plt.scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
485
- plt.xlabel(f'Feature {0 + 1}')
486
- plt.ylabel(f'Feature {1 + 1}')
487
- plt.title('Decision Boundary')
488
-
489
- plt.show()
439
+ if cuda:
440
+ xx = xx.get()
441
+ yy = yy.get()
442
+ Z = Z.get()
443
+ x = x.get()
444
+ y_test_decoded = decode_one_hot(y).get()
490
445
 
491
446
  else:
447
+ y_test_decoded = decode_one_hot(y)
492
448
 
493
- try:
494
- art1_1 = ax[1, 0].contourf(xx, yy, Z, alpha=0.8)
495
- art1_2 = ax[1, 0].scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
496
- ax[1, 0].set_xlabel(f'Feature {0 + 1}')
497
- ax[1, 0].set_ylabel(f'Feature {1 + 1}')
498
- ax[1, 0].set_title('Decision Boundary')
449
+ art1_1 = ax.contourf(xx, yy, Z, alpha=0.8)
450
+ art1_2 = ax.scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=y_test_decoded, edgecolors='k', marker='o', s=20, alpha=0.9)
451
+ ax.set_xlabel(f'Feature {0 + 1}')
452
+ ax.set_ylabel(f'Feature {1 + 1}')
453
+ ax.set_title('Decision Boundary')
454
+ artist.append([*art1_1.collections, art1_2])
499
455
 
500
- return art1_1, art1_2
501
-
502
- except:
503
-
504
- art1_1 = ax.contourf(xx, yy, Z, alpha=0.8)
505
- art1_2 = ax.scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
506
- ax.set_xlabel(f'Feature {0 + 1}')
507
- ax.set_ylabel(f'Feature {1 + 1}')
508
- ax.set_title('Decision Boundary')
509
-
510
-
511
- return art1_1, art1_2
456
+ return artist
512
457
 
513
-
458
+
514
459
  def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
515
460
 
516
- from .metrics_cpu import pca
517
- from .data_operations_cpu import decode_one_hot
461
+ from .metrics import pca
462
+ from .data_ops import decode_one_hot
518
463
 
519
464
  if x.shape[1] > 2:
520
465
 
@@ -707,7 +652,7 @@ def show():
707
652
 
708
653
  def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
709
654
 
710
- from .data_operations_cpu import find_closest_factors
655
+ from .data_ops import find_closest_factors
711
656
  viz_objects = {}
712
657
 
713
658
  if show_history:
@@ -823,4 +768,16 @@ def display_visualizations_for_learner(viz_objects, best_weights, data, best_acc
823
768
 
824
769
  ani5 = ArtistAnimation(web['fig'], web['artists'], interval=interval, blit=True)
825
770
  plt.tight_layout()
826
- plt.show()
771
+ plt.show()
772
+
773
+
774
+ def display_decision_boundary_history(fig, artist, interval):
775
+
776
+ ani1 = ArtistAnimation(fig, artist, interval=interval, blit=True)
777
+ plt.tight_layout()
778
+ plt.show()
779
+
780
+
781
+ def create_decision_boundary_hist():
782
+ fig, ax = plt.subplots()
783
+ return fig, ax
@@ -0,0 +1,24 @@
1
+ """
2
+ CUDA
3
+ ====
4
+ The modules contained in this folder and their functions compute data on a graphics processing unit with CUDA technology and a installed CUDA toolkit, storing it in the GPU's VRAM.
5
+
6
+ Modules in the folder:
7
+ ----------------------
8
+ - activation_functions
9
+ - data_ops
10
+ - loss_functions
11
+ - metrics
12
+ - visualizations
13
+
14
+ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
15
+
16
+ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
17
+
18
+ - Creator: Hasan Can Beydili
19
+ - YouTube: https://www.youtube.com/@HasanCanBeydili
20
+ - Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
21
+ - Instagram: https://www.instagram.com/canbeydilj
22
+ - Contact: tchasancan@gmail.com
23
+
24
+ """
@@ -1,4 +1,44 @@
1
+ """
2
+
3
+ Activation Functions on CUDA
4
+ ============================
5
+ This module contains activation functions that run on the CUDA GPU.
6
+
7
+
8
+ Module functions:
9
+ -----------------
10
+ - 'sigmoid': Sigmoid,
11
+ - 'mod_circular': modular_circular_activation,
12
+ - 'tanh_circular': tanh_circular_activation,
13
+ - 'leaky_relu': leaky_relu,
14
+ - 'relu': Relu,
15
+ - 'gelu': gelu,
16
+ - 'tanh': tanh,
17
+ - 'sinakt': sinakt,
18
+ - 'p_squared': p_squared,
19
+ - 'sglu': lambda x: sglu(x, alpha=1.0),
20
+ - 'dlrelu': dlrelu,
21
+ - 'sin_plus': sin_plus,
22
+ - 'acos': lambda x: acos(x, alpha=1.0, beta=0.0),
23
+ - 'isra': isra,
24
+ - 'waveakt': waveakt,
25
+ - 'arctan': arctan,
26
+ - 'bent_identity': bent_identity,
27
+ - 'softsign': softsign,
28
+ - 'pwl': pwl,
29
+ - 'sine': sine,
30
+ - 'tanh_square': tanh_square,
31
+ - 'linear':,
32
+ - 'sine_square': sine_square,
33
+ - 'logarithmic': logarithmic,
34
+ - 'sine_offset': lambda x: sine_offset(x, 1.0),
35
+ - 'spiral': spiral_activation,
36
+ - 'circular': circular_activation
37
+ - Softmax()
38
+ """
39
+
1
40
  import cupy as cp
41
+ import numpy as np
2
42
  from scipy.special import expit, softmax
3
43
  import warnings
4
44
 
@@ -81,6 +121,9 @@ def sin_plus(x):
81
121
  def modular_circular_activation(x, period=2*cp.pi):
82
122
  return cp.mod(x, period) / period
83
123
 
124
+ def gelu(x):
125
+ return 0.5 * x * (1 + cp.tanh(cp.sqrt(2 / cp.pi) * (x + 0.044715 * cp.power(x, 3))))
126
+
84
127
  def tanh_circular_activation(x):
85
128
  return (cp.tanh(x) + 1) / 2
86
129
 
@@ -201,13 +244,19 @@ def apply_activation(Input, activation_list):
201
244
 
202
245
  try:
203
246
 
204
- valid_mask = cp.array([act in activation_functions for act in activation_list])
205
- valid_activations = cp.array(activation_list)[valid_mask]
247
+ if isinstance(activation_list, str):
248
+ activation_list = [activation_list]
206
249
 
207
- activation_outputs = cp.array([activation_functions[act](origin_input) for act in valid_activations])
250
+ activation_list = [str(act).lower() for act in activation_list]
208
251
 
209
- return cp.sum(activation_outputs, axis=0)
210
-
252
+ valid_activations = [act for act in activation_list if act in activation_functions]
253
+
254
+ result = origin_input
255
+ for act in valid_activations:
256
+ result = activation_functions[act](result)
257
+
258
+ return result
259
+
211
260
  except Exception as e:
212
261
  warnings.warn(f"Error in activation processing: {str(e)}", RuntimeWarning)
213
262
  return Input
@@ -21,7 +21,7 @@ Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJe
21
21
 
22
22
  PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
23
23
 
24
- - Author: Hasan Can Beydili
24
+ - Creator: Hasan Can Beydili
25
25
  - YouTube: https://www.youtube.com/@HasanCanBeydili
26
26
  - Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
27
27
  - Instagram: https://www.instagram.com/canbeydilj
@@ -47,7 +47,7 @@ def encode_one_hot(y_train, y_test=None, summary=False):
47
47
  tuple: One-hot encoded y_train and (if given: y_test).
48
48
  """
49
49
 
50
- from .memory_operations import optimize_labels, transfer_to_cpu
50
+ from ..memory_ops import optimize_labels, transfer_to_cpu
51
51
 
52
52
  y_train = transfer_to_cpu(y_train,dtype=y_train.dtype)
53
53
  y_test = transfer_to_cpu(y_test,dtype=y_test.dtype)
@@ -113,7 +113,7 @@ def split(X, y, test_size, random_state=42, dtype=cp.float32, shuffle_in_cpu=Fal
113
113
  Returns:
114
114
  tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
115
115
  """
116
- from .memory_operations import transfer_to_gpu, optimize_labels
116
+ from memory_ops import transfer_to_gpu, optimize_labels
117
117
 
118
118
  X = transfer_to_gpu(X, dtype=dtype)
119
119
  y = optimize_labels(y, one_hot_encoded=False, cuda=True)
@@ -172,8 +172,8 @@ def manuel_balancer(x_train, y_train, target_samples_per_class, dtype=cp.float32
172
172
  x_balanced -- Balanced input dataset (cupy array format)
173
173
  y_balanced -- Balanced class labels (one-hot encoded, cupy array format)
174
174
  """
175
- from .ui import loading_bars, get_loading_bar_style
176
- from .memory_operations import transfer_to_gpu
175
+ from ..ui import loading_bars, get_loading_bar_style
176
+ from ..memory_ops import transfer_to_gpu
177
177
 
178
178
  bar_format = loading_bars()[0]
179
179
  x_train = transfer_to_gpu(x_train, dtype=dtype)
@@ -261,8 +261,8 @@ def auto_balancer(x_train, y_train, dtype=cp.float32, shuffle_in_cpu=False):
261
261
  tuple: A tuple containing balanced input data and labels.
262
262
  """
263
263
 
264
- from .ui import loading_bars, get_loading_bar_style
265
- from .memory_operations import transfer_to_gpu
264
+ from ..ui import loading_bars, get_loading_bar_style
265
+ from ..memory_ops import transfer_to_gpu
266
266
 
267
267
  x_train = transfer_to_gpu(x_train, dtype=dtype)
268
268
  y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
@@ -331,8 +331,8 @@ def synthetic_augmentation(x_train, y_train, dtype=cp.float32, shuffle_in_cpu=Fa
331
331
  Returns:
332
332
  x_train_balanced, y_train_balanced (cupy array format)
333
333
  """
334
- from .ui import loading_bars, get_loading_bar_style
335
- from .memory_operations import transfer_to_gpu
334
+ from ..ui import loading_bars, get_loading_bar_style
335
+ from ..memory_ops import transfer_to_gpu
336
336
 
337
337
  x = transfer_to_gpu(x_train, dtype=dtype)
338
338
  y = transfer_to_gpu(y_train, dtype=y_train.dtype)
@@ -484,20 +484,20 @@ def find_closest_factors(a):
484
484
  j = a // i
485
485
  return i, j
486
486
 
487
- def batcher(x_test, y_test, batch_size=1):
487
+ def batcher(x, y, batch_size=1):
488
488
 
489
489
  if batch_size == 1:
490
- return x_test, y_test
490
+ return x, y
491
491
 
492
- y_labels = cp.argmax(y_test, axis=1)
492
+ y_labels = cp.argmax(y, axis=1)
493
493
 
494
494
  unique_labels = cp.unique(y_labels)
495
495
  total_samples = sum(
496
496
  int(cp.sum(y_labels == class_label) * batch_size) for class_label in unique_labels
497
497
  )
498
498
 
499
- sampled_x = cp.empty((total_samples, x_test.shape[1]), dtype=x_test.dtype)
500
- sampled_y = cp.empty((total_samples, y_test.shape[1]), dtype=y_test.dtype)
499
+ sampled_x = cp.empty((total_samples, x.shape[1]), dtype=x.dtype)
500
+ sampled_y = cp.empty((total_samples, y.shape[1]), dtype=y.dtype)
501
501
 
502
502
  offset = 0
503
503
  for class_label in unique_labels:
@@ -507,8 +507,8 @@ def batcher(x_test, y_test, batch_size=1):
507
507
 
508
508
  sampled_indices = cp.random.choice(class_indices, num_samples, replace=False)
509
509
 
510
- sampled_x[offset:offset + num_samples] = x_test[sampled_indices]
511
- sampled_y[offset:offset + num_samples] = y_test[sampled_indices]
510
+ sampled_x[offset:offset + num_samples] = x[sampled_indices]
511
+ sampled_y[offset:offset + num_samples] = y[sampled_indices]
512
512
 
513
513
  offset += num_samples
514
514
 
@@ -1,7 +1,7 @@
1
1
  import cupy as cp
2
2
 
3
3
  def metrics(y_ts, test_preds, average='weighted'):
4
- from .data_operations_cpu import decode_one_hot
4
+ from .data_ops import decode_one_hot
5
5
  y_test_d = cp.array(decode_one_hot(y_ts))
6
6
  y_pred = cp.array(test_preds)
7
7