lucid-dl 2.5.5__tar.gz → 2.8.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. {lucid_dl-2.5.5/lucid_dl.egg-info → lucid_dl-2.8.4}/PKG-INFO +11 -12
  2. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/README.md +10 -11
  3. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_func/__init__.py +16 -2
  4. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_func/bfunc.py +21 -0
  5. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_func/ufunc.py +40 -0
  6. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_tensor/tensor.py +33 -24
  7. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_tensor/tensor_ops.py +9 -1
  8. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_util/func.py +1 -1
  9. lucid_dl-2.8.4/lucid/data/__init__.py +2 -0
  10. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/data/_base.py +91 -3
  11. lucid_dl-2.8.4/lucid/data/_util.py +70 -0
  12. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/__init__.py +1 -0
  13. lucid_dl-2.8.4/lucid/models/imgclf/cspnet.py +425 -0
  14. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/mobile.py +30 -27
  15. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/__init__.py +2 -0
  16. lucid_dl-2.8.4/lucid/models/objdet/detr.py +948 -0
  17. lucid_dl-2.8.4/lucid/models/objdet/efficientdet.py +670 -0
  18. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/util.py +8 -2
  19. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/yolo/__init__.py +2 -0
  20. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/yolo/yolo_v2.py +12 -8
  21. lucid_dl-2.8.4/lucid/models/objdet/yolo/yolo_v3.py +426 -0
  22. lucid_dl-2.8.4/lucid/models/objdet/yolo/yolo_v4.py +640 -0
  23. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/__init__.py +1 -0
  24. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/__init__.py +23 -3
  25. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_loss.py +55 -1
  26. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_util.py +25 -1
  27. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/module.py +4 -6
  28. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/__init__.py +1 -0
  29. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/activation.py +9 -0
  30. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/loss.py +24 -1
  31. lucid_dl-2.8.4/lucid/nn/modules/rnn.py +529 -0
  32. lucid_dl-2.8.4/lucid/nn/util.py +60 -0
  33. lucid_dl-2.8.4/lucid/optim/__init__.py +8 -0
  34. lucid_dl-2.8.4/lucid/optim/_base.py +142 -0
  35. lucid_dl-2.8.4/lucid/optim/lr_scheduler/__init__.py +2 -0
  36. lucid_dl-2.8.4/lucid/optim/lr_scheduler/_base.py +85 -0
  37. lucid_dl-2.5.5/lucid/optim/lr_scheduler/_sched.py → lucid_dl-2.8.4/lucid/optim/lr_scheduler/_schedulers.py +36 -0
  38. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/port.py +13 -12
  39. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/random/__init__.py +18 -3
  40. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/types.py +2 -0
  41. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/weights/__init__.py +2 -2
  42. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/weights/__init__.pyi +45 -0
  43. {lucid_dl-2.5.5 → lucid_dl-2.8.4/lucid_dl.egg-info}/PKG-INFO +11 -12
  44. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid_dl.egg-info/SOURCES.txt +15 -8
  45. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/setup.py +1 -1
  46. lucid_dl-2.5.5/lucid/data/__init__.py +0 -2
  47. lucid_dl-2.5.5/lucid/data/util.py +0 -1
  48. lucid_dl-2.5.5/lucid/optim/__init__.py +0 -8
  49. lucid_dl-2.5.5/lucid/optim/base.py +0 -57
  50. lucid_dl-2.5.5/lucid/optim/lr_scheduler/__init__.py +0 -2
  51. lucid_dl-2.5.5/lucid/optim/lr_scheduler/base.py +0 -63
  52. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/LICENSE +0 -0
  53. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/__init__.py +0 -0
  54. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_backend/__init__.py +0 -0
  55. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_backend/core.py +0 -0
  56. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_backend/metal.py +0 -0
  57. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_func/gfunc.py +0 -0
  58. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_tensor/__init__.py +0 -0
  59. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_util/__init__.py +0 -0
  60. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/datasets/__init__.py +0 -0
  61. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/datasets/_base.py +0 -0
  62. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/datasets/cifar.py +0 -0
  63. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/datasets/mnist.py +0 -0
  64. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/einops/__init__.py +0 -0
  65. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/einops/_func.py +0 -0
  66. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/error.py +0 -0
  67. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/linalg/__init__.py +0 -0
  68. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/linalg/_func.py +0 -0
  69. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/__init__.py +0 -0
  70. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/alex.py +0 -0
  71. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/coatnet.py +0 -0
  72. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/convnext.py +0 -0
  73. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/crossvit.py +0 -0
  74. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/cvt.py +0 -0
  75. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/dense.py +0 -0
  76. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/efficient.py +0 -0
  77. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/efficientformer.py +0 -0
  78. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/inception.py +0 -0
  79. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/inception_next.py +0 -0
  80. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/inception_res.py +0 -0
  81. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/lenet.py +0 -0
  82. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/maxvit.py +0 -0
  83. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/pvt.py +0 -0
  84. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/resnest.py +0 -0
  85. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/resnet.py +0 -0
  86. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/resnext.py +0 -0
  87. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/senet.py +0 -0
  88. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/sknet.py +0 -0
  89. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/swin.py +0 -0
  90. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/vgg.py +0 -0
  91. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/vit.py +0 -0
  92. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/xception.py +0 -0
  93. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/zfnet.py +0 -0
  94. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imggen/__init__.py +0 -0
  95. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imggen/ddpm.py +0 -0
  96. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imggen/vae.py +0 -0
  97. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/fast_rcnn.py +0 -0
  98. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/faster_rcnn.py +0 -0
  99. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/rcnn.py +0 -0
  100. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/yolo/yolo_v1.py +0 -0
  101. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/seq2seq/__init__.py +0 -0
  102. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/seq2seq/transformer.py +0 -0
  103. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/util.py +0 -0
  104. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_activation.py +0 -0
  105. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_attention.py +0 -0
  106. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_conv.py +0 -0
  107. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_drop.py +0 -0
  108. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_linear.py +0 -0
  109. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_norm.py +0 -0
  110. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_pool.py +0 -0
  111. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_spatial.py +0 -0
  112. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/fused.py +0 -0
  113. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/init/__init__.py +0 -0
  114. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/init/_dist.py +0 -0
  115. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/attention.py +0 -0
  116. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/conv.py +0 -0
  117. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/drop.py +0 -0
  118. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/einops.py +0 -0
  119. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/linear.py +0 -0
  120. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/norm.py +0 -0
  121. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/pool.py +0 -0
  122. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/sparse.py +0 -0
  123. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/transformer.py +0 -0
  124. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/vision.py +0 -0
  125. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/parameter.py +0 -0
  126. /lucid_dl-2.5.5/lucid/optim/_ada.py → /lucid_dl-2.8.4/lucid/optim/ada.py +0 -0
  127. /lucid_dl-2.5.5/lucid/optim/_adam.py → /lucid_dl-2.8.4/lucid/optim/adam.py +0 -0
  128. /lucid_dl-2.5.5/lucid/optim/_prop.py → /lucid_dl-2.8.4/lucid/optim/prop.py +0 -0
  129. /lucid_dl-2.5.5/lucid/optim/_sgd.py → /lucid_dl-2.8.4/lucid/optim/sgd.py +0 -0
  130. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/random/_func.py +0 -0
  131. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/transforms/__init__.py +0 -0
  132. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/transforms/_base.py +0 -0
  133. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/transforms/image.py +0 -0
  134. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/visual/__init__.py +0 -0
  135. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/visual/graph.py +0 -0
  136. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid_dl.egg-info/dependency_links.txt +0 -0
  137. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid_dl.egg-info/requires.txt +0 -0
  138. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid_dl.egg-info/top_level.txt +0 -0
  139. {lucid_dl-2.5.5 → lucid_dl-2.8.4}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lucid-dl
3
- Version: 2.5.5
3
+ Version: 2.8.4
4
4
  Summary: Lumerico's Comprehensive Interface for Deep Learning
5
5
  Home-page: https://github.com/ChanLumerico/lucid
6
6
  Author: ChanLumerico
@@ -29,27 +29,26 @@ Dynamic: summary
29
29
  # Lucid² 💎
30
30
 
31
31
  ![PyPI - Version](https://img.shields.io/pypi/v/lucid-dl?color=red)
32
- ![PyPI - Downloads](https://img.shields.io/pypi/dm/lucid-dl)
33
- ![PyPI - Total Downloads](https://img.shields.io/badge/total%20downloads-30.2k-yellow)
34
- ![GitHub code size in bytes](https://img.shields.io/github/languages/code-size/ChanLumerico/lucid)
32
+ ![PyPI - Downloads](https://img.shields.io/pypi/dm/lucid-dl.svg)
33
+ ![PyPI - Total Downloads](https://img.shields.io/badge/total%20downloads-34.0k-yellow.svg)
34
+ ![GitHub code size in bytes](https://img.shields.io/github/languages/code-size/ChanLumerico/lucid.svg)
35
35
  ![Code Style](https://img.shields.io/badge/code%20style-black-000000.svg)
36
- ![Lines of Code](https://img.shields.io/endpoint?url=https%3A%2F%2Floc-counter.onrender.com%2F%3Frepo%3DChanLumerico%2Flucid%26branch%3Dmain%26ignored%3Ddocs%26stat%3DlinesOfCode&label=Lines%20of%20Code&color=purple&cacheSeconds=5)
36
+ ![Lines of Code](https://img.shields.io/badge/lines%20of%20code-26.5k-purple.svg)
37
37
 
38
38
  **Lucid** is a minimalist deep learning framework built entirely from scratch in Python. It offers a pedagogically rich environment to explore the foundations of modern deep learning systems, including autodiff, neural network modules, and GPU acceleration — all while staying lightweight, readable, and free of complex dependencies.
39
39
 
40
40
  Whether you're a student, educator, or an advanced researcher seeking to demystify deep learning internals, Lucid provides a transparent and highly introspectable API that faithfully replicates key behaviors of major frameworks like PyTorch, yet in a form simple enough to study line by line.
41
41
 
42
- [📑 Lucid Documentation](https://chanlumerico.github.io/lucid/build/html/index.html)
43
-
42
+ [📑 Lucid Documentation](https://chanlumerico.github.io/lucid/build/html/index.html) |
44
43
  [🤗 Lucid Huggingface](https://huggingface.co/ChanLumerico/lucid)
45
44
 
46
45
  ### 🔥 What's New
47
46
 
48
- - Now supports [**`🤗 Safetensors`**](https://github.com/huggingface/safetensors) for Lucid neural module porting along with the legacy `.lcd` format
49
- - Implemented **YOLO-v2**: `lucid.models.YOLO_V2` with variants `yolo_v2`, `yolo_v2_tiny`
50
- - Implemented **YOLO-v1**: `lucid.models.YOLO_V1` with variants `yolo_v1`, `yolo_v1_tiny`
51
- - **DDPM** implemented: `lucid.models.DDPM`
52
- - Added `lucid.cumprod` and `lucid.cumsum`
47
+ - Now supports [**`Safetensors`**](https://github.com/huggingface/safetensors) for Lucid neural module porting along with the legacy `.lcd` format
48
+
49
+ - Added new neural module category `nn.rnn`, including:
50
+
51
+ `nn.RNNBase`, `nn.RNN`, `nn.LSTM`, `nn.GRU`, `nn.RNNCell`, `nn.LSTMCell`, `nn.GRUCell`
53
52
 
54
53
  ## 🔧 How to Install
55
54
 
@@ -1,27 +1,26 @@
1
1
  # Lucid² 💎
2
2
 
3
3
  ![PyPI - Version](https://img.shields.io/pypi/v/lucid-dl?color=red)
4
- ![PyPI - Downloads](https://img.shields.io/pypi/dm/lucid-dl)
5
- ![PyPI - Total Downloads](https://img.shields.io/badge/total%20downloads-30.2k-yellow)
6
- ![GitHub code size in bytes](https://img.shields.io/github/languages/code-size/ChanLumerico/lucid)
4
+ ![PyPI - Downloads](https://img.shields.io/pypi/dm/lucid-dl.svg)
5
+ ![PyPI - Total Downloads](https://img.shields.io/badge/total%20downloads-34.0k-yellow.svg)
6
+ ![GitHub code size in bytes](https://img.shields.io/github/languages/code-size/ChanLumerico/lucid.svg)
7
7
  ![Code Style](https://img.shields.io/badge/code%20style-black-000000.svg)
8
- ![Lines of Code](https://img.shields.io/endpoint?url=https%3A%2F%2Floc-counter.onrender.com%2F%3Frepo%3DChanLumerico%2Flucid%26branch%3Dmain%26ignored%3Ddocs%26stat%3DlinesOfCode&label=Lines%20of%20Code&color=purple&cacheSeconds=5)
8
+ ![Lines of Code](https://img.shields.io/badge/lines%20of%20code-26.5k-purple.svg)
9
9
 
10
10
  **Lucid** is a minimalist deep learning framework built entirely from scratch in Python. It offers a pedagogically rich environment to explore the foundations of modern deep learning systems, including autodiff, neural network modules, and GPU acceleration — all while staying lightweight, readable, and free of complex dependencies.
11
11
 
12
12
  Whether you're a student, educator, or an advanced researcher seeking to demystify deep learning internals, Lucid provides a transparent and highly introspectable API that faithfully replicates key behaviors of major frameworks like PyTorch, yet in a form simple enough to study line by line.
13
13
 
14
- [📑 Lucid Documentation](https://chanlumerico.github.io/lucid/build/html/index.html)
15
-
14
+ [📑 Lucid Documentation](https://chanlumerico.github.io/lucid/build/html/index.html) |
16
15
  [🤗 Lucid Huggingface](https://huggingface.co/ChanLumerico/lucid)
17
16
 
18
17
  ### 🔥 What's New
19
18
 
20
- - Now supports [**`🤗 Safetensors`**](https://github.com/huggingface/safetensors) for Lucid neural module porting along with the legacy `.lcd` format
21
- - Implemented **YOLO-v2**: `lucid.models.YOLO_V2` with variants `yolo_v2`, `yolo_v2_tiny`
22
- - Implemented **YOLO-v1**: `lucid.models.YOLO_V1` with variants `yolo_v1`, `yolo_v1_tiny`
23
- - **DDPM** implemented: `lucid.models.DDPM`
24
- - Added `lucid.cumprod` and `lucid.cumsum`
19
+ - Now supports [**`Safetensors`**](https://github.com/huggingface/safetensors) for Lucid neural module porting along with the legacy `.lcd` format
20
+
21
+ - Added new neural module category `nn.rnn`, including:
22
+
23
+ `nn.RNNBase`, `nn.RNN`, `nn.LSTM`, `nn.GRU`, `nn.RNNCell`, `nn.LSTMCell`, `nn.GRUCell`
25
24
 
26
25
  ## 🔧 How to Install
27
26
 
@@ -42,8 +42,8 @@ def multiply(a: Tensor, b: Tensor, /) -> Tensor:
42
42
  return bfunc.multiply()(a, b)
43
43
 
44
44
 
45
- def div(a: Tensor, b: Tensor, /) -> Tensor:
46
- return bfunc.truediv()(a, b)
45
+ def div(a: Tensor, b: Tensor, /, floor: bool = False) -> Tensor:
46
+ return bfunc.truediv()(a, b) if not floor else bfunc.floordiv()(a, b)
47
47
 
48
48
 
49
49
  def _equal(a: Tensor, b: Tensor, /) -> Tensor:
@@ -129,6 +129,8 @@ _radd: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: add(a, b)
129
129
  _rsub: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: sub(b, a)
130
130
  _rmul: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: multiply(a, b)
131
131
  _rtruediv: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: div(b, a)
132
+ _floordiv: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: div(a, b, floor=True)
133
+ _rfloordiv: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: div(b, a, floor=True)
132
134
  _rbitwise_and: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: _bitwise_and(b, a)
133
135
  _rbitwise_or: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: _bitwise_or(b, a)
134
136
 
@@ -137,10 +139,18 @@ def _pow(a: Tensor, /, exp: _Scalar) -> Tensor:
137
139
  return ufunc._pow(exp)(a)
138
140
 
139
141
 
142
+ def _rpow(a: Tensor, /, base: _Scalar) -> Tensor:
143
+ return ufunc._rpow(base)(a)
144
+
145
+
140
146
  def _neg(a: Tensor, /) -> Tensor:
141
147
  return ufunc._neg()(a)
142
148
 
143
149
 
150
+ def _invert(a: Tensor, /) -> Tensor:
151
+ return ufunc._invert()(a)
152
+
153
+
144
154
  def exp(a: Tensor, /) -> Tensor:
145
155
  return ufunc.exp()(a)
146
156
 
@@ -551,6 +561,8 @@ Tensor.__mul__ = multiply
551
561
  Tensor.__rmul__ = _rmul
552
562
  Tensor.__truediv__ = div
553
563
  Tensor.__rtruediv__ = _rtruediv
564
+ Tensor.__floordiv__ = _floordiv
565
+ Tensor.__rfloordiv__ = _rfloordiv
554
566
  Tensor.__matmul__ = matmul
555
567
 
556
568
  Tensor.__eq__ = _equal
@@ -561,7 +573,9 @@ Tensor.__lt__ = _less
561
573
  Tensor.__le__ = _less_or_equal
562
574
 
563
575
  Tensor.__pow__ = _pow
576
+ Tensor.__rpow__ = _rpow
564
577
  Tensor.__neg__ = _neg
578
+ Tensor.__invert__ = _invert
565
579
 
566
580
  Tensor.__and__ = _bitwise_and
567
581
  Tensor.__rand__ = _rbitwise_and
@@ -108,6 +108,27 @@ class truediv(operation):
108
108
  return _broadcast_flops(a, b)
109
109
 
110
110
 
111
+ class floordiv(operation):
112
+ def __init__(self) -> None:
113
+ super().__init__()
114
+
115
+ @binary_func_op(has_gradient=False)
116
+ def cpu(self, a: Tensor, b: Tensor) -> _FuncOpReturnType:
117
+ self.result = Tensor(a.data // b.data).astype(lucid.Int)
118
+ return self.result, partial(self.__grad__, lib_=np)
119
+
120
+ @binary_func_op(has_gradient=False, device="gpu")
121
+ def gpu(self, a: Tensor, b: Tensor) -> _FuncOpReturnType:
122
+ self.result = Tensor(a.data // b.data).astype(lucid.Int)
123
+ return self.result, partial(self.__grad__, lib_=mx)
124
+
125
+ def __grad__(self, lib_: ModuleType) -> _GradFuncType:
126
+ return lib_.array(0.0), lib_.array(0.0)
127
+
128
+ def __flops__(self, a: Tensor, b: Tensor) -> int:
129
+ return _broadcast_flops(a, b)
130
+
131
+
111
132
  class _equal(operation):
112
133
  def __init__(self) -> None:
113
134
  super().__init__()
@@ -38,6 +38,28 @@ class _pow(operation):
38
38
  return 11 * a.size
39
39
 
40
40
 
41
+ class _rpow(operation):
42
+ def __init__(self, base: _Scalar) -> None:
43
+ super().__init__()
44
+ self.base = base
45
+
46
+ @unary_func_op()
47
+ def cpu(self, a: Tensor) -> _FuncOpReturnType:
48
+ self.result = Tensor(self.base**a.data)
49
+ return self.result, partial(self.__grad__, a=a)
50
+
51
+ @unary_func_op(device="gpu")
52
+ def gpu(self, a: Tensor) -> _FuncOpReturnType:
53
+ self.result = Tensor(self.base**a.data)
54
+ return self.result, partial(self.__grad__, a=a)
55
+
56
+ def __grad__(self, a: Tensor) -> _GradFuncType:
57
+ return (math.log(self.base) * self.base**a.data) * self.result.grad
58
+
59
+ def __flops__(self, a: Tensor) -> int:
60
+ return 11 * a.size
61
+
62
+
41
63
  class _neg(operation):
42
64
  def __init__(self) -> None:
43
65
  super().__init__()
@@ -59,6 +81,24 @@ class _neg(operation):
59
81
  return a.size
60
82
 
61
83
 
84
+ class _invert(operation):
85
+ def __init__(self) -> None:
86
+ super().__init__()
87
+
88
+ @unary_func_op(has_gradient=False)
89
+ def cpu(self, a: Tensor) -> _FuncOpReturnType:
90
+ self.result = Tensor(~a.data)
91
+ return self.result, partial(self.__grad__, lib_=np)
92
+
93
+ @unary_func_op(has_gradient=False, device="gpu")
94
+ def gpu(self, a: Tensor) -> _FuncOpReturnType:
95
+ self.result = Tensor(mx.bitwise_invert(a.data))
96
+ return self.result, partial(self.__grad__, lib_=mx)
97
+
98
+ def __grad__(self, lib_: ModuleType) -> _GradFuncType:
99
+ return lib_.array(0.0)
100
+
101
+
62
102
  class exp(operation):
63
103
  def __init__(self) -> None:
64
104
  super().__init__()
@@ -15,7 +15,7 @@ from lucid.types import (
15
15
  Numeric,
16
16
  )
17
17
 
18
- from lucid._tensor.tensor_ops import _TensorOps
18
+ from lucid._tensor.tensor_ops import _TensorBase
19
19
  from lucid._backend.metal import mx, parse_mlx_indexing, check_metal_availability
20
20
 
21
21
 
@@ -27,7 +27,7 @@ _dtype_map = {int: types.Int64, float: types.Float64, complex: types.Complex64}
27
27
  def _noop() -> None: ...
28
28
 
29
29
 
30
- class Tensor(_TensorOps):
30
+ class Tensor(_TensorBase):
31
31
  def __init__(
32
32
  self,
33
33
  data: _ArrayOrScalar | _MLXArray,
@@ -397,28 +397,33 @@ class Tensor(_TensorOps):
397
397
  return hash(id(self))
398
398
 
399
399
  def __deepcopy__(self, *args: Any) -> Self:
400
+ cls = self.__class__
400
401
  copied_data = Tensor.copy_data(self.data)
401
402
 
402
- new_tensor = Tensor(
403
- copied_data,
404
- requires_grad=self.requires_grad,
405
- keep_grad=self.keep_grad,
406
- dtype=self.dtype,
407
- device=self.device,
408
- )
403
+ if cls is Tensor:
404
+ new = Tensor(
405
+ copied_data, self.requires_grad, self.keep_grad, self.dtype, self.device
406
+ )
407
+ else:
408
+ base = Tensor(copied_data, dtype=self.dtype, device=self.device)
409
+ new = cls(base)
409
410
 
410
- if self.grad is not None:
411
- new_tensor.grad = Tensor.copy_grad(self.grad)
411
+ if self.grad is not None and (
412
+ self.keep_grad or getattr(new, "keep_grad", False)
413
+ ):
414
+ new.grad = Tensor.copy_grad(self.grad)
415
+ else:
416
+ new.grad = None
412
417
 
413
- new_tensor._op = self._op
414
- new_tensor._backward_op = self._backward_op
415
- new_tensor._prev = self._prev.copy()
416
- new_tensor._backward_hooks = self._backward_hooks.copy()
418
+ new._op = None
419
+ new._backward_op = _noop
420
+ new._prev = []
421
+ new._backward_hooks = []
417
422
 
418
- new_tensor._is_free = self._is_free
419
- new_tensor._is_bool_tensor = self._is_bool_tensor
423
+ new._is_free = self._is_free
424
+ new._is_bool_tensor = self._is_bool_tensor
420
425
 
421
- return new_tensor
426
+ return new
422
427
 
423
428
  def __bool__(self) -> bool:
424
429
  if self.data.size != 1:
@@ -428,16 +433,20 @@ class Tensor(_TensorOps):
428
433
  )
429
434
  return bool(self.data.item())
430
435
 
431
- def any(self) -> bool:
436
+ def any(self, axis: int | None = None, keepdims: bool = False) -> bool | Self:
432
437
  if self.is_cpu():
433
- return bool(np.any(self.data))
438
+ result = np.any(self.data, axis=axis, keepdims=keepdims)
439
+ return bool(result) if axis is None else Tensor(result, device="cpu")
434
440
  else:
435
441
  mx.eval(self.data)
436
- return bool(mx.any(self.data).item())
442
+ result = mx.any(self.data, axis=axis, keepdims=keepdims)
443
+ return bool(result.item()) if axis is None else Tensor(result, device="gpu")
437
444
 
438
- def all(self) -> bool:
445
+ def all(self, axis=None, keepdims=False) -> bool | Self:
439
446
  if self.is_cpu():
440
- return bool(np.all(self.data))
447
+ result = np.all(self.data, axis=axis, keepdims=keepdims)
448
+ return bool(result) if axis is None else Tensor(result, device="cpu")
441
449
  else:
442
450
  mx.eval(self.data)
443
- return bool(mx.all(self.data).item())
451
+ result = mx.all(self.data, axis=axis, keepdims=keepdims)
452
+ return bool(result.item()) if axis is None else Tensor(result, device="gpu")
@@ -3,7 +3,7 @@ from typing import Self, Sequence
3
3
  from lucid.types import _Scalar, _ArrayOrScalar, _ShapeLike, _ArrayLikeInt
4
4
 
5
5
 
6
- class _TensorOps:
6
+ class _TensorBase:
7
7
  def __add__(self, other: Self | _ArrayOrScalar) -> Self: ...
8
8
 
9
9
  def __radd__(self, other: Self | _ArrayOrScalar) -> Self: ...
@@ -20,6 +20,10 @@ class _TensorOps:
20
20
 
21
21
  def __rtruediv__(self, other: Self | _ArrayOrScalar) -> Self: ...
22
22
 
23
+ def __floordiv__(self, other: Self | _ArrayOrScalar) -> Self: ...
24
+
25
+ def __rfloordiv__(self, other: Self | _ArrayOrScalar) -> Self: ...
26
+
23
27
  def __matmul__(self, other: Self | _ArrayOrScalar) -> Self: ...
24
28
 
25
29
  def __eq__(self, other: Self | _ArrayOrScalar) -> Self: ...
@@ -36,8 +40,12 @@ class _TensorOps:
36
40
 
37
41
  def __pow__(self, _: _Scalar) -> Self: ...
38
42
 
43
+ def __rpow__(self, other: Self | _ArrayOrScalar) -> Self: ...
44
+
39
45
  def __neg__(self) -> Self: ...
40
46
 
47
+ def __invert__(self) -> Self: ...
48
+
41
49
  def __and__(self, other: Self | _ArrayOrScalar) -> Self: ...
42
50
 
43
51
  def __rand__(self, other: Self | _ArrayOrScalar) -> Self: ...
@@ -919,7 +919,7 @@ class argmax(operation):
919
919
 
920
920
  @unary_func_op(has_gradient=False, device="gpu")
921
921
  def gpu(self, a: Tensor) -> _FuncOpReturnType:
922
- axis = self.aixs if self.axis is not None else 0
922
+ axis = self.axis if self.axis is not None else 0
923
923
  indices = mx.argmax(a.data, axis=axis)
924
924
  if self.keepdims:
925
925
  indices = mx.expand_dims(indices, axis)
@@ -0,0 +1,2 @@
1
+ from lucid.data._base import Dataset, Subset, TensorDataset, ConcatDataset, DataLoader
2
+ from lucid.data._util import *
@@ -1,15 +1,16 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import Callable, Self, Any
2
+ from typing import Callable, Iterator, Self, Any, override
3
3
  import random
4
4
  import math
5
5
 
6
6
  import lucid
7
7
  from lucid._tensor import Tensor
8
+ from lucid.types import _ArrayLike, _IndexLike, _DeviceType
8
9
 
9
10
 
10
11
  class Dataset(ABC):
11
12
  @abstractmethod
12
- def __getitem__(self, index: int) -> None:
13
+ def __getitem__(self, idx: _IndexLike) -> None:
13
14
  raise NotImplementedError("Subclasses must implement __getitem__.")
14
15
 
15
16
  @abstractmethod
@@ -19,6 +20,92 @@ class Dataset(ABC):
19
20
  def __add__(self, other: Self) -> Self:
20
21
  return ConcatDataset([self, other])
21
22
 
23
+ def __iter__(self) -> Iterator[Any]:
24
+ for i in range(len(self)):
25
+ yield self[i]
26
+
27
+ def __repr__(self) -> str:
28
+ return f"Dataset(n={len(self)})"
29
+
30
+
31
+ class Subset(Dataset):
32
+ def __init__(self, dataset: Dataset, indices: list[int]) -> None:
33
+ super().__init__()
34
+ self.dataset = dataset
35
+ self.indices = indices
36
+
37
+ def __getitem__(self, idx: _IndexLike) -> Any:
38
+ return self.dataset[self.indices[idx]]
39
+
40
+ def __len__(self) -> int:
41
+ return len(self.indices)
42
+
43
+ @override
44
+ def __iter__(self) -> Iterator[Any]:
45
+ for i in self.indices:
46
+ yield self.dataset[i]
47
+
48
+ def __getattr__(self, name: str) -> Any:
49
+ return getattr(self.dataset, name)
50
+
51
+ def __repr__(self) -> str:
52
+ return f"Subset(n={len(self)})"
53
+
54
+
55
+ class TensorDataset(Dataset):
56
+ def __init__(self, *tensors_or_arrays: Tensor | _ArrayLike) -> None:
57
+ super().__init__()
58
+ if len(tensors_or_arrays) == 0:
59
+ raise ValueError(
60
+ "TensorDataset requires at least one tensor/array-like object."
61
+ )
62
+ try:
63
+ self._tensors: tuple[Tensor, ...] = tuple(
64
+ lucid._check_is_tensor(t) for t in tensors_or_arrays
65
+ )
66
+ except Exception as e:
67
+ raise RuntimeError(
68
+ "Failed to convert array-like object(s) to tensor."
69
+ ) from e
70
+
71
+ n0 = len(self._tensors[0])
72
+ for i, t in enumerate(self._tensors):
73
+ if t.ndim == 0 or len(t) == 0:
74
+ raise RuntimeError(
75
+ "All tensors must be at least 1D. "
76
+ f"Tensor at index {i} has no length."
77
+ )
78
+ if len(t) != n0:
79
+ raise ValueError(
80
+ "All tensors must have the same length along dim 0: "
81
+ f"got {n0} and {len(t)} at index {i}."
82
+ )
83
+
84
+ def __len__(self) -> int:
85
+ return len(self._tensors[0])
86
+
87
+ @override
88
+ def __getitem__(self, idx: _IndexLike | Tensor) -> tuple[Tensor, ...]:
89
+ return tuple(t[idx] for t in self._tensors)
90
+
91
+ def to(self, device: _DeviceType) -> Self:
92
+ self._tensors = tuple(t.to(device) for t in self._tensors)
93
+ return self
94
+
95
+ @property
96
+ def tensors(self) -> tuple[Tensor, ...]:
97
+ return self._tensors
98
+
99
+ @override
100
+ def __iter__(self) -> Iterator[tuple[Tensor, ...]]:
101
+ return super().__iter__()
102
+
103
+ @override
104
+ def __repr__(self) -> str:
105
+ shapes = ", ".join(str(t.shape) for t in self._tensors)
106
+ devices = {t.device for t in self._tensors}
107
+ return f"TensorDataset(n={len(self)}, shapes=({shapes}), devices={devices})"
108
+
22
109
 
23
110
  class ConcatDataset(Dataset):
24
111
  def __init__(self, datasets: list[Dataset]) -> None:
@@ -38,7 +125,7 @@ class ConcatDataset(Dataset):
38
125
  def __len__(self) -> int:
39
126
  return self.cumulative_sizes[-1] if self.cumulative_sizes else 0
40
127
 
41
- def __getitem__(self, idx: int) -> Any:
128
+ def __getitem__(self, idx: _IndexLike) -> Any:
42
129
  if idx < 0:
43
130
  if -idx > len(self):
44
131
  raise IndexError("Index out of range.")
@@ -101,6 +188,7 @@ class DataLoader:
101
188
  if isinstance(batch[0], (tuple, list)):
102
189
  transposed = list(zip(*batch))
103
190
  return tuple(lucid.stack(tuple(x), axis=0) for x in transposed)
191
+
104
192
  elif isinstance(batch[0], Tensor):
105
193
  return lucid.stack(tuple(batch), axis=0)
106
194
  else:
@@ -0,0 +1,70 @@
1
+ from typing import Sequence
2
+ import random
3
+ import math
4
+
5
+ import lucid
6
+
7
+ from ._base import Dataset, Subset
8
+
9
+
10
+ __all__ = ["random_split"]
11
+
12
+
13
+ def _resolve_lengths_from_fractions(fractions: Sequence[float], n: int) -> list[int]:
14
+ if not fractions:
15
+ raise ValueError("fractions must be non-empty.")
16
+ if any(f < 0 for f in fractions):
17
+ raise ValueError("Fractional lengths mus be non-negative.")
18
+
19
+ s = sum(fractions)
20
+ if not math.isclose(s, 1.0, rel_tol=1e-6, abs_tol=1e-6):
21
+ raise ValueError(f"When passing fractions, they must sum to 1.0 (got {s}).")
22
+
23
+ base = [int(math.floor(f * n)) for f in fractions]
24
+ remainder = n - sum(base)
25
+ for i in range(remainder):
26
+ base[i % len(base)] += 1
27
+
28
+ return base
29
+
30
+
31
+ def random_split(
32
+ dataset: Dataset, lengths: Sequence[int | float], seed: int | None = None
33
+ ) -> tuple[Subset, ...]:
34
+ n = len(dataset)
35
+ if not lengths:
36
+ raise ValueError("lengths must be non-empty.")
37
+
38
+ all_int = all(isinstance(l, int) for l in lengths)
39
+ all_float = all(isinstance(l, float) for l in lengths)
40
+
41
+ if not (all_int or all_float):
42
+ return TypeError("lengths must be all integers or all floats.")
43
+
44
+ if all_float:
45
+ int_lengths = _resolve_lengths_from_fractions(lengths, n)
46
+ else:
47
+ int_lengths = list(lengths)
48
+ s = sum(int_lengths)
49
+ if s != n:
50
+ raise ValueError(
51
+ f"Sum of input lengths ({s}) does not equal dataset length ({n})."
52
+ )
53
+ if any(l < 0 for l in int_lengths):
54
+ raise ValueError("All split lengths must be non-negative.")
55
+
56
+ if seed is None:
57
+ seed = lucid.random.get_seed()
58
+ rng = random.Random(seed)
59
+
60
+ indices = list(range(n))
61
+ rng.shuffle(indices)
62
+
63
+ splits: list[Subset] = []
64
+ offset = 0
65
+ for length in int_lengths:
66
+ split_idx = indices[offset : offset + length]
67
+ splits.append(Subset(dataset, split_idx))
68
+ offset += length
69
+
70
+ return tuple(splits)
@@ -2,6 +2,7 @@ from .alex import *
2
2
  from .coatnet import *
3
3
  from .convnext import *
4
4
  from .crossvit import *
5
+ from .cspnet import *
5
6
  from .cvt import *
6
7
  from .dense import *
7
8
  from .efficient import *