lucid-dl 2.5.5__tar.gz → 2.8.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lucid_dl-2.5.5/lucid_dl.egg-info → lucid_dl-2.8.4}/PKG-INFO +11 -12
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/README.md +10 -11
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_func/__init__.py +16 -2
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_func/bfunc.py +21 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_func/ufunc.py +40 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_tensor/tensor.py +33 -24
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_tensor/tensor_ops.py +9 -1
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_util/func.py +1 -1
- lucid_dl-2.8.4/lucid/data/__init__.py +2 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/data/_base.py +91 -3
- lucid_dl-2.8.4/lucid/data/_util.py +70 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/__init__.py +1 -0
- lucid_dl-2.8.4/lucid/models/imgclf/cspnet.py +425 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/mobile.py +30 -27
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/__init__.py +2 -0
- lucid_dl-2.8.4/lucid/models/objdet/detr.py +948 -0
- lucid_dl-2.8.4/lucid/models/objdet/efficientdet.py +670 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/util.py +8 -2
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/yolo/__init__.py +2 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/yolo/yolo_v2.py +12 -8
- lucid_dl-2.8.4/lucid/models/objdet/yolo/yolo_v3.py +426 -0
- lucid_dl-2.8.4/lucid/models/objdet/yolo/yolo_v4.py +640 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/__init__.py +1 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/__init__.py +23 -3
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_loss.py +55 -1
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_util.py +25 -1
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/module.py +4 -6
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/__init__.py +1 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/activation.py +9 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/loss.py +24 -1
- lucid_dl-2.8.4/lucid/nn/modules/rnn.py +529 -0
- lucid_dl-2.8.4/lucid/nn/util.py +60 -0
- lucid_dl-2.8.4/lucid/optim/__init__.py +8 -0
- lucid_dl-2.8.4/lucid/optim/_base.py +142 -0
- lucid_dl-2.8.4/lucid/optim/lr_scheduler/__init__.py +2 -0
- lucid_dl-2.8.4/lucid/optim/lr_scheduler/_base.py +85 -0
- lucid_dl-2.5.5/lucid/optim/lr_scheduler/_sched.py → lucid_dl-2.8.4/lucid/optim/lr_scheduler/_schedulers.py +36 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/port.py +13 -12
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/random/__init__.py +18 -3
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/types.py +2 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/weights/__init__.py +2 -2
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/weights/__init__.pyi +45 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4/lucid_dl.egg-info}/PKG-INFO +11 -12
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid_dl.egg-info/SOURCES.txt +15 -8
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/setup.py +1 -1
- lucid_dl-2.5.5/lucid/data/__init__.py +0 -2
- lucid_dl-2.5.5/lucid/data/util.py +0 -1
- lucid_dl-2.5.5/lucid/optim/__init__.py +0 -8
- lucid_dl-2.5.5/lucid/optim/base.py +0 -57
- lucid_dl-2.5.5/lucid/optim/lr_scheduler/__init__.py +0 -2
- lucid_dl-2.5.5/lucid/optim/lr_scheduler/base.py +0 -63
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/LICENSE +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_backend/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_backend/core.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_backend/metal.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_func/gfunc.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_tensor/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/_util/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/datasets/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/datasets/_base.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/datasets/cifar.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/datasets/mnist.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/einops/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/einops/_func.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/error.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/linalg/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/linalg/_func.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/alex.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/coatnet.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/convnext.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/crossvit.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/cvt.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/dense.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/efficient.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/efficientformer.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/inception.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/inception_next.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/inception_res.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/lenet.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/maxvit.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/pvt.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/resnest.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/resnet.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/resnext.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/senet.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/sknet.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/swin.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/vgg.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/vit.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/xception.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imgclf/zfnet.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imggen/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imggen/ddpm.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/imggen/vae.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/fast_rcnn.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/faster_rcnn.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/rcnn.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/objdet/yolo/yolo_v1.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/seq2seq/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/seq2seq/transformer.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/models/util.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_activation.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_attention.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_conv.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_drop.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_linear.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_norm.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_pool.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/functional/_spatial.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/fused.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/init/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/init/_dist.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/attention.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/conv.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/drop.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/einops.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/linear.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/norm.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/pool.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/sparse.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/transformer.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/modules/vision.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/nn/parameter.py +0 -0
- /lucid_dl-2.5.5/lucid/optim/_ada.py → /lucid_dl-2.8.4/lucid/optim/ada.py +0 -0
- /lucid_dl-2.5.5/lucid/optim/_adam.py → /lucid_dl-2.8.4/lucid/optim/adam.py +0 -0
- /lucid_dl-2.5.5/lucid/optim/_prop.py → /lucid_dl-2.8.4/lucid/optim/prop.py +0 -0
- /lucid_dl-2.5.5/lucid/optim/_sgd.py → /lucid_dl-2.8.4/lucid/optim/sgd.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/random/_func.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/transforms/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/transforms/_base.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/transforms/image.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/visual/__init__.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid/visual/graph.py +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid_dl.egg-info/dependency_links.txt +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid_dl.egg-info/requires.txt +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/lucid_dl.egg-info/top_level.txt +0 -0
- {lucid_dl-2.5.5 → lucid_dl-2.8.4}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lucid-dl
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.8.4
|
|
4
4
|
Summary: Lumerico's Comprehensive Interface for Deep Learning
|
|
5
5
|
Home-page: https://github.com/ChanLumerico/lucid
|
|
6
6
|
Author: ChanLumerico
|
|
@@ -29,27 +29,26 @@ Dynamic: summary
|
|
|
29
29
|
# Lucid² 💎
|
|
30
30
|
|
|
31
31
|

|
|
32
|
-

|
|
33
|
-

|
|
32
|
+

|
|
33
|
+

|
|
34
|
+

|
|
35
35
|

|
|
36
|
-

|
|
37
37
|
|
|
38
38
|
**Lucid** is a minimalist deep learning framework built entirely from scratch in Python. It offers a pedagogically rich environment to explore the foundations of modern deep learning systems, including autodiff, neural network modules, and GPU acceleration — all while staying lightweight, readable, and free of complex dependencies.
|
|
39
39
|
|
|
40
40
|
Whether you're a student, educator, or an advanced researcher seeking to demystify deep learning internals, Lucid provides a transparent and highly introspectable API that faithfully replicates key behaviors of major frameworks like PyTorch, yet in a form simple enough to study line by line.
|
|
41
41
|
|
|
42
|
-
[📑 Lucid Documentation](https://chanlumerico.github.io/lucid/build/html/index.html)
|
|
43
|
-
|
|
42
|
+
[📑 Lucid Documentation](https://chanlumerico.github.io/lucid/build/html/index.html) |
|
|
44
43
|
[🤗 Lucid Huggingface](https://huggingface.co/ChanLumerico/lucid)
|
|
45
44
|
|
|
46
45
|
### 🔥 What's New
|
|
47
46
|
|
|
48
|
-
- Now supports [
|
|
49
|
-
|
|
50
|
-
-
|
|
51
|
-
|
|
52
|
-
|
|
47
|
+
- Now supports [**`Safetensors`**](https://github.com/huggingface/safetensors) for Lucid neural module porting along with the legacy `.lcd` format
|
|
48
|
+
|
|
49
|
+
- Added new neural module category `nn.rnn`, including:
|
|
50
|
+
|
|
51
|
+
`nn.RNNBase`, `nn.RNN`, `nn.LSTM`, `nn.GRU`, `nn.RNNCell`, `nn.LSTMCell`, `nn.GRUCell`
|
|
53
52
|
|
|
54
53
|
## 🔧 How to Install
|
|
55
54
|
|
|
@@ -1,27 +1,26 @@
|
|
|
1
1
|
# Lucid² 💎
|
|
2
2
|
|
|
3
3
|

|
|
4
|
-

|
|
5
|
-

|
|
4
|
+

|
|
5
|
+

|
|
6
|
+

|
|
7
7
|

|
|
8
|
-

|
|
9
9
|
|
|
10
10
|
**Lucid** is a minimalist deep learning framework built entirely from scratch in Python. It offers a pedagogically rich environment to explore the foundations of modern deep learning systems, including autodiff, neural network modules, and GPU acceleration — all while staying lightweight, readable, and free of complex dependencies.
|
|
11
11
|
|
|
12
12
|
Whether you're a student, educator, or an advanced researcher seeking to demystify deep learning internals, Lucid provides a transparent and highly introspectable API that faithfully replicates key behaviors of major frameworks like PyTorch, yet in a form simple enough to study line by line.
|
|
13
13
|
|
|
14
|
-
[📑 Lucid Documentation](https://chanlumerico.github.io/lucid/build/html/index.html)
|
|
15
|
-
|
|
14
|
+
[📑 Lucid Documentation](https://chanlumerico.github.io/lucid/build/html/index.html) |
|
|
16
15
|
[🤗 Lucid Huggingface](https://huggingface.co/ChanLumerico/lucid)
|
|
17
16
|
|
|
18
17
|
### 🔥 What's New
|
|
19
18
|
|
|
20
|
-
- Now supports [
|
|
21
|
-
|
|
22
|
-
-
|
|
23
|
-
|
|
24
|
-
|
|
19
|
+
- Now supports [**`Safetensors`**](https://github.com/huggingface/safetensors) for Lucid neural module porting along with the legacy `.lcd` format
|
|
20
|
+
|
|
21
|
+
- Added new neural module category `nn.rnn`, including:
|
|
22
|
+
|
|
23
|
+
`nn.RNNBase`, `nn.RNN`, `nn.LSTM`, `nn.GRU`, `nn.RNNCell`, `nn.LSTMCell`, `nn.GRUCell`
|
|
25
24
|
|
|
26
25
|
## 🔧 How to Install
|
|
27
26
|
|
|
@@ -42,8 +42,8 @@ def multiply(a: Tensor, b: Tensor, /) -> Tensor:
|
|
|
42
42
|
return bfunc.multiply()(a, b)
|
|
43
43
|
|
|
44
44
|
|
|
45
|
-
def div(a: Tensor, b: Tensor,
|
|
46
|
-
return bfunc.truediv()(a, b)
|
|
45
|
+
def div(a: Tensor, b: Tensor, /, floor: bool = False) -> Tensor:
|
|
46
|
+
return bfunc.truediv()(a, b) if not floor else bfunc.floordiv()(a, b)
|
|
47
47
|
|
|
48
48
|
|
|
49
49
|
def _equal(a: Tensor, b: Tensor, /) -> Tensor:
|
|
@@ -129,6 +129,8 @@ _radd: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: add(a, b)
|
|
|
129
129
|
_rsub: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: sub(b, a)
|
|
130
130
|
_rmul: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: multiply(a, b)
|
|
131
131
|
_rtruediv: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: div(b, a)
|
|
132
|
+
_floordiv: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: div(a, b, floor=True)
|
|
133
|
+
_rfloordiv: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: div(b, a, floor=True)
|
|
132
134
|
_rbitwise_and: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: _bitwise_and(b, a)
|
|
133
135
|
_rbitwise_or: Callable[[Tensor, Tensor], Tensor] = lambda a, b, /: _bitwise_or(b, a)
|
|
134
136
|
|
|
@@ -137,10 +139,18 @@ def _pow(a: Tensor, /, exp: _Scalar) -> Tensor:
|
|
|
137
139
|
return ufunc._pow(exp)(a)
|
|
138
140
|
|
|
139
141
|
|
|
142
|
+
def _rpow(a: Tensor, /, base: _Scalar) -> Tensor:
|
|
143
|
+
return ufunc._rpow(base)(a)
|
|
144
|
+
|
|
145
|
+
|
|
140
146
|
def _neg(a: Tensor, /) -> Tensor:
|
|
141
147
|
return ufunc._neg()(a)
|
|
142
148
|
|
|
143
149
|
|
|
150
|
+
def _invert(a: Tensor, /) -> Tensor:
|
|
151
|
+
return ufunc._invert()(a)
|
|
152
|
+
|
|
153
|
+
|
|
144
154
|
def exp(a: Tensor, /) -> Tensor:
|
|
145
155
|
return ufunc.exp()(a)
|
|
146
156
|
|
|
@@ -551,6 +561,8 @@ Tensor.__mul__ = multiply
|
|
|
551
561
|
Tensor.__rmul__ = _rmul
|
|
552
562
|
Tensor.__truediv__ = div
|
|
553
563
|
Tensor.__rtruediv__ = _rtruediv
|
|
564
|
+
Tensor.__floordiv__ = _floordiv
|
|
565
|
+
Tensor.__rfloordiv__ = _rfloordiv
|
|
554
566
|
Tensor.__matmul__ = matmul
|
|
555
567
|
|
|
556
568
|
Tensor.__eq__ = _equal
|
|
@@ -561,7 +573,9 @@ Tensor.__lt__ = _less
|
|
|
561
573
|
Tensor.__le__ = _less_or_equal
|
|
562
574
|
|
|
563
575
|
Tensor.__pow__ = _pow
|
|
576
|
+
Tensor.__rpow__ = _rpow
|
|
564
577
|
Tensor.__neg__ = _neg
|
|
578
|
+
Tensor.__invert__ = _invert
|
|
565
579
|
|
|
566
580
|
Tensor.__and__ = _bitwise_and
|
|
567
581
|
Tensor.__rand__ = _rbitwise_and
|
|
@@ -108,6 +108,27 @@ class truediv(operation):
|
|
|
108
108
|
return _broadcast_flops(a, b)
|
|
109
109
|
|
|
110
110
|
|
|
111
|
+
class floordiv(operation):
|
|
112
|
+
def __init__(self) -> None:
|
|
113
|
+
super().__init__()
|
|
114
|
+
|
|
115
|
+
@binary_func_op(has_gradient=False)
|
|
116
|
+
def cpu(self, a: Tensor, b: Tensor) -> _FuncOpReturnType:
|
|
117
|
+
self.result = Tensor(a.data // b.data).astype(lucid.Int)
|
|
118
|
+
return self.result, partial(self.__grad__, lib_=np)
|
|
119
|
+
|
|
120
|
+
@binary_func_op(has_gradient=False, device="gpu")
|
|
121
|
+
def gpu(self, a: Tensor, b: Tensor) -> _FuncOpReturnType:
|
|
122
|
+
self.result = Tensor(a.data // b.data).astype(lucid.Int)
|
|
123
|
+
return self.result, partial(self.__grad__, lib_=mx)
|
|
124
|
+
|
|
125
|
+
def __grad__(self, lib_: ModuleType) -> _GradFuncType:
|
|
126
|
+
return lib_.array(0.0), lib_.array(0.0)
|
|
127
|
+
|
|
128
|
+
def __flops__(self, a: Tensor, b: Tensor) -> int:
|
|
129
|
+
return _broadcast_flops(a, b)
|
|
130
|
+
|
|
131
|
+
|
|
111
132
|
class _equal(operation):
|
|
112
133
|
def __init__(self) -> None:
|
|
113
134
|
super().__init__()
|
|
@@ -38,6 +38,28 @@ class _pow(operation):
|
|
|
38
38
|
return 11 * a.size
|
|
39
39
|
|
|
40
40
|
|
|
41
|
+
class _rpow(operation):
|
|
42
|
+
def __init__(self, base: _Scalar) -> None:
|
|
43
|
+
super().__init__()
|
|
44
|
+
self.base = base
|
|
45
|
+
|
|
46
|
+
@unary_func_op()
|
|
47
|
+
def cpu(self, a: Tensor) -> _FuncOpReturnType:
|
|
48
|
+
self.result = Tensor(self.base**a.data)
|
|
49
|
+
return self.result, partial(self.__grad__, a=a)
|
|
50
|
+
|
|
51
|
+
@unary_func_op(device="gpu")
|
|
52
|
+
def gpu(self, a: Tensor) -> _FuncOpReturnType:
|
|
53
|
+
self.result = Tensor(self.base**a.data)
|
|
54
|
+
return self.result, partial(self.__grad__, a=a)
|
|
55
|
+
|
|
56
|
+
def __grad__(self, a: Tensor) -> _GradFuncType:
|
|
57
|
+
return (math.log(self.base) * self.base**a.data) * self.result.grad
|
|
58
|
+
|
|
59
|
+
def __flops__(self, a: Tensor) -> int:
|
|
60
|
+
return 11 * a.size
|
|
61
|
+
|
|
62
|
+
|
|
41
63
|
class _neg(operation):
|
|
42
64
|
def __init__(self) -> None:
|
|
43
65
|
super().__init__()
|
|
@@ -59,6 +81,24 @@ class _neg(operation):
|
|
|
59
81
|
return a.size
|
|
60
82
|
|
|
61
83
|
|
|
84
|
+
class _invert(operation):
|
|
85
|
+
def __init__(self) -> None:
|
|
86
|
+
super().__init__()
|
|
87
|
+
|
|
88
|
+
@unary_func_op(has_gradient=False)
|
|
89
|
+
def cpu(self, a: Tensor) -> _FuncOpReturnType:
|
|
90
|
+
self.result = Tensor(~a.data)
|
|
91
|
+
return self.result, partial(self.__grad__, lib_=np)
|
|
92
|
+
|
|
93
|
+
@unary_func_op(has_gradient=False, device="gpu")
|
|
94
|
+
def gpu(self, a: Tensor) -> _FuncOpReturnType:
|
|
95
|
+
self.result = Tensor(mx.bitwise_invert(a.data))
|
|
96
|
+
return self.result, partial(self.__grad__, lib_=mx)
|
|
97
|
+
|
|
98
|
+
def __grad__(self, lib_: ModuleType) -> _GradFuncType:
|
|
99
|
+
return lib_.array(0.0)
|
|
100
|
+
|
|
101
|
+
|
|
62
102
|
class exp(operation):
|
|
63
103
|
def __init__(self) -> None:
|
|
64
104
|
super().__init__()
|
|
@@ -15,7 +15,7 @@ from lucid.types import (
|
|
|
15
15
|
Numeric,
|
|
16
16
|
)
|
|
17
17
|
|
|
18
|
-
from lucid._tensor.tensor_ops import
|
|
18
|
+
from lucid._tensor.tensor_ops import _TensorBase
|
|
19
19
|
from lucid._backend.metal import mx, parse_mlx_indexing, check_metal_availability
|
|
20
20
|
|
|
21
21
|
|
|
@@ -27,7 +27,7 @@ _dtype_map = {int: types.Int64, float: types.Float64, complex: types.Complex64}
|
|
|
27
27
|
def _noop() -> None: ...
|
|
28
28
|
|
|
29
29
|
|
|
30
|
-
class Tensor(
|
|
30
|
+
class Tensor(_TensorBase):
|
|
31
31
|
def __init__(
|
|
32
32
|
self,
|
|
33
33
|
data: _ArrayOrScalar | _MLXArray,
|
|
@@ -397,28 +397,33 @@ class Tensor(_TensorOps):
|
|
|
397
397
|
return hash(id(self))
|
|
398
398
|
|
|
399
399
|
def __deepcopy__(self, *args: Any) -> Self:
|
|
400
|
+
cls = self.__class__
|
|
400
401
|
copied_data = Tensor.copy_data(self.data)
|
|
401
402
|
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
device=self.device
|
|
408
|
-
|
|
403
|
+
if cls is Tensor:
|
|
404
|
+
new = Tensor(
|
|
405
|
+
copied_data, self.requires_grad, self.keep_grad, self.dtype, self.device
|
|
406
|
+
)
|
|
407
|
+
else:
|
|
408
|
+
base = Tensor(copied_data, dtype=self.dtype, device=self.device)
|
|
409
|
+
new = cls(base)
|
|
409
410
|
|
|
410
|
-
if self.grad is not None
|
|
411
|
-
|
|
411
|
+
if self.grad is not None and (
|
|
412
|
+
self.keep_grad or getattr(new, "keep_grad", False)
|
|
413
|
+
):
|
|
414
|
+
new.grad = Tensor.copy_grad(self.grad)
|
|
415
|
+
else:
|
|
416
|
+
new.grad = None
|
|
412
417
|
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
418
|
+
new._op = None
|
|
419
|
+
new._backward_op = _noop
|
|
420
|
+
new._prev = []
|
|
421
|
+
new._backward_hooks = []
|
|
417
422
|
|
|
418
|
-
|
|
419
|
-
|
|
423
|
+
new._is_free = self._is_free
|
|
424
|
+
new._is_bool_tensor = self._is_bool_tensor
|
|
420
425
|
|
|
421
|
-
return
|
|
426
|
+
return new
|
|
422
427
|
|
|
423
428
|
def __bool__(self) -> bool:
|
|
424
429
|
if self.data.size != 1:
|
|
@@ -428,16 +433,20 @@ class Tensor(_TensorOps):
|
|
|
428
433
|
)
|
|
429
434
|
return bool(self.data.item())
|
|
430
435
|
|
|
431
|
-
def any(self) -> bool:
|
|
436
|
+
def any(self, axis: int | None = None, keepdims: bool = False) -> bool | Self:
|
|
432
437
|
if self.is_cpu():
|
|
433
|
-
|
|
438
|
+
result = np.any(self.data, axis=axis, keepdims=keepdims)
|
|
439
|
+
return bool(result) if axis is None else Tensor(result, device="cpu")
|
|
434
440
|
else:
|
|
435
441
|
mx.eval(self.data)
|
|
436
|
-
|
|
442
|
+
result = mx.any(self.data, axis=axis, keepdims=keepdims)
|
|
443
|
+
return bool(result.item()) if axis is None else Tensor(result, device="gpu")
|
|
437
444
|
|
|
438
|
-
def all(self) -> bool:
|
|
445
|
+
def all(self, axis=None, keepdims=False) -> bool | Self:
|
|
439
446
|
if self.is_cpu():
|
|
440
|
-
|
|
447
|
+
result = np.all(self.data, axis=axis, keepdims=keepdims)
|
|
448
|
+
return bool(result) if axis is None else Tensor(result, device="cpu")
|
|
441
449
|
else:
|
|
442
450
|
mx.eval(self.data)
|
|
443
|
-
|
|
451
|
+
result = mx.all(self.data, axis=axis, keepdims=keepdims)
|
|
452
|
+
return bool(result.item()) if axis is None else Tensor(result, device="gpu")
|
|
@@ -3,7 +3,7 @@ from typing import Self, Sequence
|
|
|
3
3
|
from lucid.types import _Scalar, _ArrayOrScalar, _ShapeLike, _ArrayLikeInt
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
class
|
|
6
|
+
class _TensorBase:
|
|
7
7
|
def __add__(self, other: Self | _ArrayOrScalar) -> Self: ...
|
|
8
8
|
|
|
9
9
|
def __radd__(self, other: Self | _ArrayOrScalar) -> Self: ...
|
|
@@ -20,6 +20,10 @@ class _TensorOps:
|
|
|
20
20
|
|
|
21
21
|
def __rtruediv__(self, other: Self | _ArrayOrScalar) -> Self: ...
|
|
22
22
|
|
|
23
|
+
def __floordiv__(self, other: Self | _ArrayOrScalar) -> Self: ...
|
|
24
|
+
|
|
25
|
+
def __rfloordiv__(self, other: Self | _ArrayOrScalar) -> Self: ...
|
|
26
|
+
|
|
23
27
|
def __matmul__(self, other: Self | _ArrayOrScalar) -> Self: ...
|
|
24
28
|
|
|
25
29
|
def __eq__(self, other: Self | _ArrayOrScalar) -> Self: ...
|
|
@@ -36,8 +40,12 @@ class _TensorOps:
|
|
|
36
40
|
|
|
37
41
|
def __pow__(self, _: _Scalar) -> Self: ...
|
|
38
42
|
|
|
43
|
+
def __rpow__(self, other: Self | _ArrayOrScalar) -> Self: ...
|
|
44
|
+
|
|
39
45
|
def __neg__(self) -> Self: ...
|
|
40
46
|
|
|
47
|
+
def __invert__(self) -> Self: ...
|
|
48
|
+
|
|
41
49
|
def __and__(self, other: Self | _ArrayOrScalar) -> Self: ...
|
|
42
50
|
|
|
43
51
|
def __rand__(self, other: Self | _ArrayOrScalar) -> Self: ...
|
|
@@ -919,7 +919,7 @@ class argmax(operation):
|
|
|
919
919
|
|
|
920
920
|
@unary_func_op(has_gradient=False, device="gpu")
|
|
921
921
|
def gpu(self, a: Tensor) -> _FuncOpReturnType:
|
|
922
|
-
axis = self.
|
|
922
|
+
axis = self.axis if self.axis is not None else 0
|
|
923
923
|
indices = mx.argmax(a.data, axis=axis)
|
|
924
924
|
if self.keepdims:
|
|
925
925
|
indices = mx.expand_dims(indices, axis)
|
|
@@ -1,15 +1,16 @@
|
|
|
1
1
|
from abc import ABC, abstractmethod
|
|
2
|
-
from typing import Callable, Self, Any
|
|
2
|
+
from typing import Callable, Iterator, Self, Any, override
|
|
3
3
|
import random
|
|
4
4
|
import math
|
|
5
5
|
|
|
6
6
|
import lucid
|
|
7
7
|
from lucid._tensor import Tensor
|
|
8
|
+
from lucid.types import _ArrayLike, _IndexLike, _DeviceType
|
|
8
9
|
|
|
9
10
|
|
|
10
11
|
class Dataset(ABC):
|
|
11
12
|
@abstractmethod
|
|
12
|
-
def __getitem__(self,
|
|
13
|
+
def __getitem__(self, idx: _IndexLike) -> None:
|
|
13
14
|
raise NotImplementedError("Subclasses must implement __getitem__.")
|
|
14
15
|
|
|
15
16
|
@abstractmethod
|
|
@@ -19,6 +20,92 @@ class Dataset(ABC):
|
|
|
19
20
|
def __add__(self, other: Self) -> Self:
|
|
20
21
|
return ConcatDataset([self, other])
|
|
21
22
|
|
|
23
|
+
def __iter__(self) -> Iterator[Any]:
|
|
24
|
+
for i in range(len(self)):
|
|
25
|
+
yield self[i]
|
|
26
|
+
|
|
27
|
+
def __repr__(self) -> str:
|
|
28
|
+
return f"Dataset(n={len(self)})"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class Subset(Dataset):
|
|
32
|
+
def __init__(self, dataset: Dataset, indices: list[int]) -> None:
|
|
33
|
+
super().__init__()
|
|
34
|
+
self.dataset = dataset
|
|
35
|
+
self.indices = indices
|
|
36
|
+
|
|
37
|
+
def __getitem__(self, idx: _IndexLike) -> Any:
|
|
38
|
+
return self.dataset[self.indices[idx]]
|
|
39
|
+
|
|
40
|
+
def __len__(self) -> int:
|
|
41
|
+
return len(self.indices)
|
|
42
|
+
|
|
43
|
+
@override
|
|
44
|
+
def __iter__(self) -> Iterator[Any]:
|
|
45
|
+
for i in self.indices:
|
|
46
|
+
yield self.dataset[i]
|
|
47
|
+
|
|
48
|
+
def __getattr__(self, name: str) -> Any:
|
|
49
|
+
return getattr(self.dataset, name)
|
|
50
|
+
|
|
51
|
+
def __repr__(self) -> str:
|
|
52
|
+
return f"Subset(n={len(self)})"
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class TensorDataset(Dataset):
|
|
56
|
+
def __init__(self, *tensors_or_arrays: Tensor | _ArrayLike) -> None:
|
|
57
|
+
super().__init__()
|
|
58
|
+
if len(tensors_or_arrays) == 0:
|
|
59
|
+
raise ValueError(
|
|
60
|
+
"TensorDataset requires at least one tensor/array-like object."
|
|
61
|
+
)
|
|
62
|
+
try:
|
|
63
|
+
self._tensors: tuple[Tensor, ...] = tuple(
|
|
64
|
+
lucid._check_is_tensor(t) for t in tensors_or_arrays
|
|
65
|
+
)
|
|
66
|
+
except Exception as e:
|
|
67
|
+
raise RuntimeError(
|
|
68
|
+
"Failed to convert array-like object(s) to tensor."
|
|
69
|
+
) from e
|
|
70
|
+
|
|
71
|
+
n0 = len(self._tensors[0])
|
|
72
|
+
for i, t in enumerate(self._tensors):
|
|
73
|
+
if t.ndim == 0 or len(t) == 0:
|
|
74
|
+
raise RuntimeError(
|
|
75
|
+
"All tensors must be at least 1D. "
|
|
76
|
+
f"Tensor at index {i} has no length."
|
|
77
|
+
)
|
|
78
|
+
if len(t) != n0:
|
|
79
|
+
raise ValueError(
|
|
80
|
+
"All tensors must have the same length along dim 0: "
|
|
81
|
+
f"got {n0} and {len(t)} at index {i}."
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
def __len__(self) -> int:
|
|
85
|
+
return len(self._tensors[0])
|
|
86
|
+
|
|
87
|
+
@override
|
|
88
|
+
def __getitem__(self, idx: _IndexLike | Tensor) -> tuple[Tensor, ...]:
|
|
89
|
+
return tuple(t[idx] for t in self._tensors)
|
|
90
|
+
|
|
91
|
+
def to(self, device: _DeviceType) -> Self:
|
|
92
|
+
self._tensors = tuple(t.to(device) for t in self._tensors)
|
|
93
|
+
return self
|
|
94
|
+
|
|
95
|
+
@property
|
|
96
|
+
def tensors(self) -> tuple[Tensor, ...]:
|
|
97
|
+
return self._tensors
|
|
98
|
+
|
|
99
|
+
@override
|
|
100
|
+
def __iter__(self) -> Iterator[tuple[Tensor, ...]]:
|
|
101
|
+
return super().__iter__()
|
|
102
|
+
|
|
103
|
+
@override
|
|
104
|
+
def __repr__(self) -> str:
|
|
105
|
+
shapes = ", ".join(str(t.shape) for t in self._tensors)
|
|
106
|
+
devices = {t.device for t in self._tensors}
|
|
107
|
+
return f"TensorDataset(n={len(self)}, shapes=({shapes}), devices={devices})"
|
|
108
|
+
|
|
22
109
|
|
|
23
110
|
class ConcatDataset(Dataset):
|
|
24
111
|
def __init__(self, datasets: list[Dataset]) -> None:
|
|
@@ -38,7 +125,7 @@ class ConcatDataset(Dataset):
|
|
|
38
125
|
def __len__(self) -> int:
|
|
39
126
|
return self.cumulative_sizes[-1] if self.cumulative_sizes else 0
|
|
40
127
|
|
|
41
|
-
def __getitem__(self, idx:
|
|
128
|
+
def __getitem__(self, idx: _IndexLike) -> Any:
|
|
42
129
|
if idx < 0:
|
|
43
130
|
if -idx > len(self):
|
|
44
131
|
raise IndexError("Index out of range.")
|
|
@@ -101,6 +188,7 @@ class DataLoader:
|
|
|
101
188
|
if isinstance(batch[0], (tuple, list)):
|
|
102
189
|
transposed = list(zip(*batch))
|
|
103
190
|
return tuple(lucid.stack(tuple(x), axis=0) for x in transposed)
|
|
191
|
+
|
|
104
192
|
elif isinstance(batch[0], Tensor):
|
|
105
193
|
return lucid.stack(tuple(batch), axis=0)
|
|
106
194
|
else:
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
from typing import Sequence
|
|
2
|
+
import random
|
|
3
|
+
import math
|
|
4
|
+
|
|
5
|
+
import lucid
|
|
6
|
+
|
|
7
|
+
from ._base import Dataset, Subset
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
__all__ = ["random_split"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _resolve_lengths_from_fractions(fractions: Sequence[float], n: int) -> list[int]:
|
|
14
|
+
if not fractions:
|
|
15
|
+
raise ValueError("fractions must be non-empty.")
|
|
16
|
+
if any(f < 0 for f in fractions):
|
|
17
|
+
raise ValueError("Fractional lengths mus be non-negative.")
|
|
18
|
+
|
|
19
|
+
s = sum(fractions)
|
|
20
|
+
if not math.isclose(s, 1.0, rel_tol=1e-6, abs_tol=1e-6):
|
|
21
|
+
raise ValueError(f"When passing fractions, they must sum to 1.0 (got {s}).")
|
|
22
|
+
|
|
23
|
+
base = [int(math.floor(f * n)) for f in fractions]
|
|
24
|
+
remainder = n - sum(base)
|
|
25
|
+
for i in range(remainder):
|
|
26
|
+
base[i % len(base)] += 1
|
|
27
|
+
|
|
28
|
+
return base
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def random_split(
|
|
32
|
+
dataset: Dataset, lengths: Sequence[int | float], seed: int | None = None
|
|
33
|
+
) -> tuple[Subset, ...]:
|
|
34
|
+
n = len(dataset)
|
|
35
|
+
if not lengths:
|
|
36
|
+
raise ValueError("lengths must be non-empty.")
|
|
37
|
+
|
|
38
|
+
all_int = all(isinstance(l, int) for l in lengths)
|
|
39
|
+
all_float = all(isinstance(l, float) for l in lengths)
|
|
40
|
+
|
|
41
|
+
if not (all_int or all_float):
|
|
42
|
+
return TypeError("lengths must be all integers or all floats.")
|
|
43
|
+
|
|
44
|
+
if all_float:
|
|
45
|
+
int_lengths = _resolve_lengths_from_fractions(lengths, n)
|
|
46
|
+
else:
|
|
47
|
+
int_lengths = list(lengths)
|
|
48
|
+
s = sum(int_lengths)
|
|
49
|
+
if s != n:
|
|
50
|
+
raise ValueError(
|
|
51
|
+
f"Sum of input lengths ({s}) does not equal dataset length ({n})."
|
|
52
|
+
)
|
|
53
|
+
if any(l < 0 for l in int_lengths):
|
|
54
|
+
raise ValueError("All split lengths must be non-negative.")
|
|
55
|
+
|
|
56
|
+
if seed is None:
|
|
57
|
+
seed = lucid.random.get_seed()
|
|
58
|
+
rng = random.Random(seed)
|
|
59
|
+
|
|
60
|
+
indices = list(range(n))
|
|
61
|
+
rng.shuffle(indices)
|
|
62
|
+
|
|
63
|
+
splits: list[Subset] = []
|
|
64
|
+
offset = 0
|
|
65
|
+
for length in int_lengths:
|
|
66
|
+
split_idx = indices[offset : offset + length]
|
|
67
|
+
splits.append(Subset(dataset, split_idx))
|
|
68
|
+
offset += length
|
|
69
|
+
|
|
70
|
+
return tuple(splits)
|