ilovetools 0.2.21__tar.gz → 0.2.23__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. {ilovetools-0.2.21/ilovetools.egg-info → ilovetools-0.2.23}/PKG-INFO +2 -2
  2. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/__init__.py +2 -2
  3. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/activations.py +101 -148
  4. ilovetools-0.2.23/ilovetools/ml/normalization.py +847 -0
  5. ilovetools-0.2.23/ilovetools/ml/positional_encoding.py +587 -0
  6. ilovetools-0.2.23/ilovetools/ml/regularization.py +716 -0
  7. {ilovetools-0.2.21 → ilovetools-0.2.23/ilovetools.egg-info}/PKG-INFO +2 -2
  8. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools.egg-info/SOURCES.txt +5 -1
  9. {ilovetools-0.2.21 → ilovetools-0.2.23}/pyproject.toml +2 -2
  10. {ilovetools-0.2.21 → ilovetools-0.2.23}/setup.py +2 -2
  11. ilovetools-0.2.23/tests/test_normalization.py +409 -0
  12. ilovetools-0.2.23/tests/test_positional_encoding.py +492 -0
  13. ilovetools-0.2.23/tests/test_pypi_installation.py +136 -0
  14. ilovetools-0.2.23/tests/test_regularization.py +431 -0
  15. ilovetools-0.2.23/tests/verify_positional_encoding.py +162 -0
  16. ilovetools-0.2.21/ilovetools/ml/normalization.py +0 -523
  17. ilovetools-0.2.21/ilovetools/ml/regularization.py +0 -603
  18. ilovetools-0.2.21/tests/test_normalization.py +0 -439
  19. ilovetools-0.2.21/tests/test_regularization.py +0 -322
  20. {ilovetools-0.2.21 → ilovetools-0.2.23}/LICENSE +0 -0
  21. {ilovetools-0.2.21 → ilovetools-0.2.23}/MANIFEST.in +0 -0
  22. {ilovetools-0.2.21 → ilovetools-0.2.23}/README.md +0 -0
  23. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ai/__init__.py +0 -0
  24. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ai/embeddings.py +0 -0
  25. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ai/inference.py +0 -0
  26. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ai/llm_helpers.py +0 -0
  27. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/audio/__init__.py +0 -0
  28. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/automation/__init__.py +0 -0
  29. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/automation/file_organizer.py +0 -0
  30. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/conversion/__init__.py +0 -0
  31. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/conversion/config_converter.py +0 -0
  32. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/conversion/config_converter_fixed_header.py +0 -0
  33. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/data/__init__.py +0 -0
  34. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/data/feature_engineering.py +0 -0
  35. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/data/preprocessing.py +0 -0
  36. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/database/__init__.py +0 -0
  37. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/datetime/__init__.py +0 -0
  38. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/email/__init__.py +0 -0
  39. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/email/template_engine.py +0 -0
  40. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/files/__init__.py +0 -0
  41. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/image/__init__.py +0 -0
  42. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/__init__.py +0 -0
  43. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/anomaly_detection.py +0 -0
  44. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/attention.py +0 -0
  45. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/clustering.py +0 -0
  46. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/cnn.py +0 -0
  47. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/cross_validation.py +0 -0
  48. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/dimensionality.py +0 -0
  49. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/ensemble.py +0 -0
  50. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/feature_selection.py +0 -0
  51. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/gradient_descent.py +0 -0
  52. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/imbalanced.py +0 -0
  53. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/interpretation.py +0 -0
  54. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/loss_functions.py +0 -0
  55. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/metrics.py +0 -0
  56. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/neural_network.py +0 -0
  57. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/normalization_advanced.py +0 -0
  58. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/optimizers.py +0 -0
  59. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/pipeline.py +0 -0
  60. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/rnn.py +0 -0
  61. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/timeseries.py +0 -0
  62. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/ml/tuning.py +0 -0
  63. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/security/__init__.py +0 -0
  64. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/security/password_checker.py +0 -0
  65. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/text/__init__.py +0 -0
  66. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/utils/__init__.py +0 -0
  67. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/utils/cache_system.py +0 -0
  68. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/utils/logger.py +0 -0
  69. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/utils/rate_limiter.py +0 -0
  70. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/utils/retry.py +0 -0
  71. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/validation/__init__.py +0 -0
  72. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/validation/data_validator.py +0 -0
  73. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/web/__init__.py +0 -0
  74. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/web/scraper.py +0 -0
  75. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools/web/url_shortener.py +0 -0
  76. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools.egg-info/dependency_links.txt +0 -0
  77. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools.egg-info/requires.txt +0 -0
  78. {ilovetools-0.2.21 → ilovetools-0.2.23}/ilovetools.egg-info/top_level.txt +0 -0
  79. {ilovetools-0.2.21 → ilovetools-0.2.23}/requirements.txt +0 -0
  80. {ilovetools-0.2.21 → ilovetools-0.2.23}/setup.cfg +0 -0
  81. {ilovetools-0.2.21 → ilovetools-0.2.23}/tests/__init__.py +0 -0
  82. {ilovetools-0.2.21 → ilovetools-0.2.23}/tests/test_activations.py +0 -0
  83. {ilovetools-0.2.21 → ilovetools-0.2.23}/tests/test_attention.py +0 -0
  84. {ilovetools-0.2.21 → ilovetools-0.2.23}/tests/test_cnn.py +0 -0
  85. {ilovetools-0.2.21 → ilovetools-0.2.23}/tests/test_gradient_descent.py +0 -0
  86. {ilovetools-0.2.21 → ilovetools-0.2.23}/tests/test_loss_functions.py +0 -0
  87. {ilovetools-0.2.21 → ilovetools-0.2.23}/tests/test_neural_network.py +0 -0
  88. {ilovetools-0.2.21 → ilovetools-0.2.23}/tests/test_normalization_advanced.py +0 -0
  89. {ilovetools-0.2.21 → ilovetools-0.2.23}/tests/test_optimizers.py +0 -0
  90. {ilovetools-0.2.21 → ilovetools-0.2.23}/tests/test_rnn.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.2.21
3
+ Version: 0.2.23
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
11
11
  Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
12
12
  Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
13
13
  Project-URL: Source, https://github.com/AliMehdi512/ilovetools
14
- Keywords: utilities,tools,ai,ml,data-processing,automation,batch-normalization,layer-normalization,normalization-techniques,deep-learning
14
+ Keywords: utilities,tools,ai,ml,data-processing,automation,transformers,positional-encoding,attention-mechanism,multi-head-attention,rope,alibi,deep-learning,nlp
15
15
  Classifier: Development Status :: 3 - Alpha
16
16
  Classifier: Intended Audience :: Developers
17
17
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
@@ -2,8 +2,8 @@
2
2
  ilovetools - A comprehensive Python utility library
3
3
  """
4
4
 
5
- __version__ = "0.2.20"
6
- # release marker: 0.2.20
5
+ __version__ = "0.2.21"
6
+ # release marker: 0.2.21
7
7
  __author__ = "Ali Mehdi"
8
8
  __email__ = "ali.mehdi.dev579@gmail.com"
9
9
 
@@ -36,6 +36,23 @@ __all__ = [
36
36
  # Utilities
37
37
  'apply_activation',
38
38
  'get_activation_function',
39
+
40
+ # Convenient aliases (without _activation suffix)
41
+ 'sigmoid',
42
+ 'tanh',
43
+ 'relu',
44
+ 'leaky_relu',
45
+ 'elu',
46
+ 'selu',
47
+ 'gelu',
48
+ 'swish',
49
+ 'mish',
50
+ 'softplus',
51
+ 'softsign',
52
+ 'hard_sigmoid',
53
+ 'hard_tanh',
54
+ 'softmax',
55
+ 'log_softmax',
39
56
  ]
40
57
 
41
58
 
@@ -250,17 +267,15 @@ def gelu_activation(x: np.ndarray) -> np.ndarray:
250
267
  Gaussian Error Linear Unit (GELU) activation.
251
268
 
252
269
  GELU(x) = x × Φ(x)
253
-
254
- where Φ(x) is the cumulative distribution function of standard normal
270
+ where Φ(x) is the cumulative distribution function of the standard normal distribution
255
271
 
256
272
  Approximation: GELU(x) ≈ 0.5x(1 + tanh[√(2/π)(x + 0.044715x³)])
257
273
 
258
274
  Properties:
259
- - Smooth
260
- - Probabilistic interpretation
261
- - State-of-the-art performance
262
- - Used in BERT, GPT models
263
- - Non-monotonic
275
+ - Smooth, non-monotonic
276
+ - Used in BERT, GPT
277
+ - Better than ReLU for Transformers
278
+ - Stochastic regularizer
264
279
 
265
280
  Args:
266
281
  x: Input array
@@ -275,7 +290,7 @@ def gelu_activation(x: np.ndarray) -> np.ndarray:
275
290
  >>> x = np.array([-2, -1, 0, 1, 2])
276
291
  >>> output = gelu_activation(x)
277
292
  >>> print(output)
278
- [-0.0454 -0.1588 0.0000 0.8412 1.9545]
293
+ [-0.0454 -0.1588 0.0000 0.8412 1.9546]
279
294
  """
280
295
  return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * x**3)))
281
296
 
@@ -285,13 +300,12 @@ def swish_activation(
285
300
  beta: float = 1.0
286
301
  ) -> np.ndarray:
287
302
  """
288
- Swish activation function (also called SiLU).
303
+ Swish activation function (also known as SiLU).
289
304
 
290
305
  Swish(x) = x × σ(βx)
291
306
 
292
307
  Properties:
293
- - Smooth
294
- - Non-monotonic
308
+ - Smooth, non-monotonic
295
309
  - Self-gated
296
310
  - Better than ReLU in deep networks
297
311
  - Used in EfficientNet
@@ -320,14 +334,12 @@ def mish_activation(x: np.ndarray) -> np.ndarray:
320
334
  Mish activation function.
321
335
 
322
336
  Mish(x) = x × tanh(softplus(x))
323
- = x × tanh(ln(1 + e^x))
324
337
 
325
338
  Properties:
326
- - Smooth
327
- - Non-monotonic
339
+ - Smooth, non-monotonic
328
340
  - Self-regularizing
329
- - Better generalization than ReLU
330
- - Good for computer vision
341
+ - Better than ReLU and Swish in some cases
342
+ - Unbounded above, bounded below
331
343
 
332
344
  Args:
333
345
  x: Input array
@@ -344,20 +356,20 @@ def mish_activation(x: np.ndarray) -> np.ndarray:
344
356
  >>> print(output)
345
357
  [-0.2525 -0.3034 0.0000 0.8651 1.9440]
346
358
  """
347
- return x * np.tanh(np.log(1 + np.exp(np.clip(x, -20, 20))))
359
+ return x * np.tanh(np.log(1 + np.exp(x)))
348
360
 
349
361
 
350
362
  def softplus_activation(x: np.ndarray) -> np.ndarray:
351
363
  """
352
364
  Softplus activation function.
353
365
 
354
- Softplus(x) = ln(1 + e^x)
366
+ Softplus(x) = log(1 + e^x)
355
367
 
356
368
  Properties:
357
369
  - Smooth approximation of ReLU
358
370
  - Always positive
359
371
  - Differentiable everywhere
360
- - Used in probabilistic models
372
+ - Can be used as smooth ReLU
361
373
 
362
374
  Args:
363
375
  x: Input array
@@ -374,7 +386,7 @@ def softplus_activation(x: np.ndarray) -> np.ndarray:
374
386
  >>> print(output)
375
387
  [0.1269 0.3133 0.6931 1.3133 2.1269]
376
388
  """
377
- return np.log(1 + np.exp(np.clip(x, -20, 20)))
389
+ return np.log(1 + np.exp(np.clip(x, -500, 500)))
378
390
 
379
391
 
380
392
  def softsign_activation(x: np.ndarray) -> np.ndarray:
@@ -388,7 +400,8 @@ def softsign_activation(x: np.ndarray) -> np.ndarray:
388
400
  Properties:
389
401
  - Similar to tanh but polynomial
390
402
  - Faster than tanh
391
- - Smoother saturation
403
+ - Smoother than ReLU
404
+ - Bounded output
392
405
 
393
406
  Args:
394
407
  x: Input array
@@ -410,14 +423,17 @@ def softsign_activation(x: np.ndarray) -> np.ndarray:
410
423
 
411
424
  def hard_sigmoid_activation(x: np.ndarray) -> np.ndarray:
412
425
  """
413
- Hard Sigmoid activation (piecewise linear approximation).
426
+ Hard sigmoid activation function.
427
+
428
+ HardSigmoid(x) = clip((x + 1) / 2, 0, 1)
414
429
 
415
- HardSigmoid(x) = max(0, min(1, 0.2x + 0.5))
430
+ Output range: [0, 1]
416
431
 
417
432
  Properties:
433
+ - Piecewise linear approximation of sigmoid
418
434
  - Faster than sigmoid
419
- - Piecewise linear
420
- - Good for mobile/embedded
435
+ - Used in mobile/embedded systems
436
+ - Good for quantization
421
437
 
422
438
  Args:
423
439
  x: Input array
@@ -429,24 +445,27 @@ def hard_sigmoid_activation(x: np.ndarray) -> np.ndarray:
429
445
  >>> import numpy as np
430
446
  >>> from ilovetools.ml import hard_sigmoid_activation
431
447
 
432
- >>> x = np.array([-3, -1, 0, 1, 3])
448
+ >>> x = np.array([-2, -1, 0, 1, 2])
433
449
  >>> output = hard_sigmoid_activation(x)
434
450
  >>> print(output)
435
- [0.0 0.3 0.5 0.7 1.0]
451
+ [0.0000 0.0000 0.5000 1.0000 1.0000]
436
452
  """
437
- return np.clip(0.2 * x + 0.5, 0, 1)
453
+ return np.clip((x + 1) / 2, 0, 1)
438
454
 
439
455
 
440
456
  def hard_tanh_activation(x: np.ndarray) -> np.ndarray:
441
457
  """
442
- Hard Tanh activation (piecewise linear approximation).
458
+ Hard tanh activation function.
459
+
460
+ HardTanh(x) = clip(x, -1, 1)
443
461
 
444
- HardTanh(x) = max(-1, min(1, x))
462
+ Output range: [-1, 1]
445
463
 
446
464
  Properties:
465
+ - Piecewise linear approximation of tanh
447
466
  - Faster than tanh
448
- - Piecewise linear
449
- - Bounded output
467
+ - Used in mobile/embedded systems
468
+ - Good for quantization
450
469
 
451
470
  Args:
452
471
  x: Input array
@@ -473,10 +492,10 @@ def softmax_activation(
473
492
  """
474
493
  Softmax activation function.
475
494
 
476
- Softmax(x_i) = e^(x_i) / Σ(e^(x_j))
495
+ Softmax(x_i) = e^(x_i) / Σ e^(x_j)
477
496
 
478
497
  Properties:
479
- - Converts to probability distribution
498
+ - Converts logits to probabilities
480
499
  - Output sums to 1
481
500
  - Used for multi-class classification
482
501
  - Differentiable
@@ -492,11 +511,11 @@ def softmax_activation(
492
511
  >>> import numpy as np
493
512
  >>> from ilovetools.ml import softmax_activation
494
513
 
495
- >>> x = np.array([1.0, 2.0, 3.0])
514
+ >>> x = np.array([1, 2, 3, 4, 5])
496
515
  >>> output = softmax_activation(x)
497
516
  >>> print(output)
498
- [0.0900 0.2447 0.6652]
499
- >>> print(output.sum())
517
+ [0.0117 0.0317 0.0861 0.2341 0.6364]
518
+ >>> print(np.sum(output)) # Should be 1.0
500
519
  1.0
501
520
  """
502
521
  # Subtract max for numerical stability
@@ -510,15 +529,15 @@ def log_softmax_activation(
510
529
  axis: int = -1
511
530
  ) -> np.ndarray:
512
531
  """
513
- Log-Softmax activation function.
532
+ Log-softmax activation function.
514
533
 
515
- LogSoftmax(x_i) = log(e^(x_i) / Σ(e^(x_j)))
516
- = x_i - log(Σ(e^(x_j)))
534
+ LogSoftmax(x_i) = log(e^(x_i) / Σ e^(x_j))
535
+ = x_i - log(Σ e^(x_j))
517
536
 
518
537
  Properties:
519
- - Numerically stable
520
- - Used with NLL loss
521
- - Avoids underflow
538
+ - Numerically stable version of log(softmax(x))
539
+ - Used with negative log-likelihood loss
540
+ - Better numerical properties than log(softmax(x))
522
541
 
523
542
  Args:
524
543
  x: Input array
@@ -531,10 +550,10 @@ def log_softmax_activation(
531
550
  >>> import numpy as np
532
551
  >>> from ilovetools.ml import log_softmax_activation
533
552
 
534
- >>> x = np.array([1.0, 2.0, 3.0])
553
+ >>> x = np.array([1, 2, 3, 4, 5])
535
554
  >>> output = log_softmax_activation(x)
536
555
  >>> print(output)
537
- [-2.4076 -1.4076 -0.4076]
556
+ [-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]
538
557
  """
539
558
  x_shifted = x - np.max(x, axis=axis, keepdims=True)
540
559
  return x_shifted - np.log(np.sum(np.exp(x_shifted), axis=axis, keepdims=True))
@@ -549,22 +568,13 @@ def sigmoid_derivative(x: np.ndarray) -> np.ndarray:
549
568
  σ'(x) = σ(x) × (1 - σ(x))
550
569
 
551
570
  Args:
552
- x: Input array (or sigmoid output)
571
+ x: Input array (can be pre-activated or post-activated)
553
572
 
554
573
  Returns:
555
574
  Derivative
556
-
557
- Examples:
558
- >>> import numpy as np
559
- >>> from ilovetools.ml import sigmoid_derivative
560
-
561
- >>> x = np.array([0.0])
562
- >>> deriv = sigmoid_derivative(x)
563
- >>> print(deriv)
564
- [0.25]
565
575
  """
566
- sig = sigmoid_activation(x)
567
- return sig * (1 - sig)
576
+ s = sigmoid_activation(x)
577
+ return s * (1 - s)
568
578
 
569
579
 
570
580
  def tanh_derivative(x: np.ndarray) -> np.ndarray:
@@ -574,22 +584,13 @@ def tanh_derivative(x: np.ndarray) -> np.ndarray:
574
584
  tanh'(x) = 1 - tanh²(x)
575
585
 
576
586
  Args:
577
- x: Input array (or tanh output)
587
+ x: Input array
578
588
 
579
589
  Returns:
580
590
  Derivative
581
-
582
- Examples:
583
- >>> import numpy as np
584
- >>> from ilovetools.ml import tanh_derivative
585
-
586
- >>> x = np.array([0.0])
587
- >>> deriv = tanh_derivative(x)
588
- >>> print(deriv)
589
- [1.0]
590
591
  """
591
- tanh_x = np.tanh(x)
592
- return 1 - tanh_x ** 2
592
+ t = tanh_activation(x)
593
+ return 1 - t**2
593
594
 
594
595
 
595
596
  def relu_derivative(x: np.ndarray) -> np.ndarray:
@@ -603,15 +604,6 @@ def relu_derivative(x: np.ndarray) -> np.ndarray:
603
604
 
604
605
  Returns:
605
606
  Derivative
606
-
607
- Examples:
608
- >>> import numpy as np
609
- >>> from ilovetools.ml import relu_derivative
610
-
611
- >>> x = np.array([-1, 0, 1])
612
- >>> deriv = relu_derivative(x)
613
- >>> print(deriv)
614
- [0 0 1]
615
607
  """
616
608
  return np.where(x > 0, 1, 0)
617
609
 
@@ -631,15 +623,6 @@ def leaky_relu_derivative(
631
623
 
632
624
  Returns:
633
625
  Derivative
634
-
635
- Examples:
636
- >>> import numpy as np
637
- >>> from ilovetools.ml import leaky_relu_derivative
638
-
639
- >>> x = np.array([-1, 0, 1])
640
- >>> deriv = leaky_relu_derivative(x, alpha=0.01)
641
- >>> print(deriv)
642
- [0.01 0.01 1.00]
643
626
  """
644
627
  return np.where(x > 0, 1, alpha)
645
628
 
@@ -659,15 +642,6 @@ def elu_derivative(
659
642
 
660
643
  Returns:
661
644
  Derivative
662
-
663
- Examples:
664
- >>> import numpy as np
665
- >>> from ilovetools.ml import elu_derivative
666
-
667
- >>> x = np.array([-1, 0, 1])
668
- >>> deriv = elu_derivative(x, alpha=1.0)
669
- >>> print(deriv)
670
- [0.3679 1.0000 1.0000]
671
645
  """
672
646
  return np.where(x > 0, 1, alpha * np.exp(x))
673
647
 
@@ -687,41 +661,23 @@ def swish_derivative(
687
661
 
688
662
  Returns:
689
663
  Derivative
690
-
691
- Examples:
692
- >>> import numpy as np
693
- >>> from ilovetools.ml import swish_derivative
694
-
695
- >>> x = np.array([0.0])
696
- >>> deriv = swish_derivative(x)
697
- >>> print(deriv)
698
- [0.5]
699
664
  """
700
665
  swish = swish_activation(x, beta)
701
- sig = sigmoid_activation(beta * x)
702
- return swish + sig * (1 - swish)
666
+ sigmoid = sigmoid_activation(beta * x)
667
+ return swish + sigmoid * (1 - swish)
703
668
 
704
669
 
705
670
  def softplus_derivative(x: np.ndarray) -> np.ndarray:
706
671
  """
707
672
  Derivative of Softplus activation.
708
673
 
709
- Softplus'(x) = σ(x) = 1 / (1 + e^(-x))
674
+ Softplus'(x) = σ(x)
710
675
 
711
676
  Args:
712
677
  x: Input array
713
678
 
714
679
  Returns:
715
- Derivative (sigmoid)
716
-
717
- Examples:
718
- >>> import numpy as np
719
- >>> from ilovetools.ml import softplus_derivative
720
-
721
- >>> x = np.array([0.0])
722
- >>> deriv = softplus_derivative(x)
723
- >>> print(deriv)
724
- [0.5]
680
+ Derivative
725
681
  """
726
682
  return sigmoid_activation(x)
727
683
 
@@ -739,7 +695,7 @@ def apply_activation(
739
695
  Args:
740
696
  x: Input array
741
697
  activation: Name of activation function
742
- **kwargs: Additional parameters for activation
698
+ **kwargs: Additional arguments for activation function
743
699
 
744
700
  Returns:
745
701
  Activated output
@@ -748,36 +704,13 @@ def apply_activation(
748
704
  >>> import numpy as np
749
705
  >>> from ilovetools.ml import apply_activation
750
706
 
751
- >>> x = np.array([-1, 0, 1])
707
+ >>> x = np.array([-2, -1, 0, 1, 2])
752
708
  >>> output = apply_activation(x, 'relu')
753
709
  >>> print(output)
754
- [0 0 1]
710
+ [0 0 0 1 2]
755
711
  """
756
- activation_map = {
757
- 'sigmoid': sigmoid_activation,
758
- 'tanh': tanh_activation,
759
- 'relu': relu_activation,
760
- 'leaky_relu': leaky_relu_activation,
761
- 'elu': elu_activation,
762
- 'selu': selu_activation,
763
- 'gelu': gelu_activation,
764
- 'swish': swish_activation,
765
- 'mish': mish_activation,
766
- 'softplus': softplus_activation,
767
- 'softsign': softsign_activation,
768
- 'hard_sigmoid': hard_sigmoid_activation,
769
- 'hard_tanh': hard_tanh_activation,
770
- 'softmax': softmax_activation,
771
- 'log_softmax': log_softmax_activation,
772
- 'linear': lambda x: x,
773
- 'none': lambda x: x,
774
- }
775
-
776
- activation_lower = activation.lower()
777
- if activation_lower not in activation_map:
778
- raise ValueError(f"Unknown activation: {activation}")
779
-
780
- return activation_map[activation_lower](x, **kwargs)
712
+ activation_func = get_activation_function(activation)
713
+ return activation_func(x, **kwargs)
781
714
 
782
715
 
783
716
  def get_activation_function(activation: str):
@@ -793,8 +726,8 @@ def get_activation_function(activation: str):
793
726
  Examples:
794
727
  >>> from ilovetools.ml import get_activation_function
795
728
 
796
- >>> relu = get_activation_function('relu')
797
- >>> print(relu.__name__)
729
+ >>> relu_func = get_activation_function('relu')
730
+ >>> print(relu_func.__name__)
798
731
  relu_activation
799
732
  """
800
733
  activation_map = {
@@ -820,3 +753,23 @@ def get_activation_function(activation: str):
820
753
  raise ValueError(f"Unknown activation: {activation}")
821
754
 
822
755
  return activation_map[activation_lower]
756
+
757
+
758
+ # ==================== CONVENIENT ALIASES ====================
759
+
760
+ # Aliases without _activation suffix for convenience
761
+ sigmoid = sigmoid_activation
762
+ tanh = tanh_activation
763
+ relu = relu_activation
764
+ leaky_relu = leaky_relu_activation
765
+ elu = elu_activation
766
+ selu = selu_activation
767
+ gelu = gelu_activation
768
+ swish = swish_activation
769
+ mish = mish_activation
770
+ softplus = softplus_activation
771
+ softsign = softsign_activation
772
+ hard_sigmoid = hard_sigmoid_activation
773
+ hard_tanh = hard_tanh_activation
774
+ softmax = softmax_activation
775
+ log_softmax = log_softmax_activation