lucid-dl 2.10.0__py3-none-any.whl → 2.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lucid/__init__.py CHANGED
@@ -49,6 +49,7 @@ import lucid.random as random
49
49
  import lucid.einops as einops
50
50
  import lucid.nn as nn
51
51
  import lucid.types as types
52
+ import lucid.autograd as autograd
52
53
 
53
54
  from lucid._fusion import ENABLE_FUSION
54
55
 
lucid/_tensor/tensor.py CHANGED
@@ -140,110 +140,9 @@ class Tensor(_TensorBase):
140
140
  return self
141
141
 
142
142
  def backward(self, retain_grad: bool = False, retain_graph: bool = False) -> None:
143
- if self.grad is None:
144
- self.grad = (
145
- np.ones_like(self.data) if self.is_cpu() else mx.ones_like(self.data)
146
- )
147
- visited = set()
148
- topo_order: list[Self] = []
149
- stack = [self]
150
- ops_to_clear = set()
151
-
152
- while stack:
153
- tensor = stack[-1]
154
- if tensor in visited:
155
- stack.pop()
156
- topo_order.append(tensor)
157
- continue
158
-
159
- visited.add(tensor)
160
- for parent in tensor._prev:
161
- if parent not in visited:
162
- stack.append(parent)
163
-
164
- if lucid.ENABLE_FUSION and self.is_cpu():
165
- self._try_backward_fusion(topo_order)
166
-
167
- for tensor in reversed(topo_order):
168
- try:
169
- tensor._backward_op()
170
- except Exception as e:
171
- raise lucid.BackwardError(shape=tensor.shape, op=tensor._op) from e
172
-
173
- for hook in tensor._backward_hooks:
174
- hook(tensor, tensor.grad)
175
-
176
- if tensor._op is not None:
177
- ops_to_clear.add(tensor._op)
178
-
179
- if not (tensor.is_leaf or retain_grad or tensor.keep_grad):
180
- tensor.grad = None
181
-
182
- if not retain_graph:
183
- for tensor in topo_order:
184
- tensor.clear_node()
185
- for op in ops_to_clear:
186
- try:
187
- op.clear()
188
- except Exception:
189
- try:
190
- op.result = None
191
- except Exception:
192
- pass
193
-
194
- def _try_backward_fusion(self, topo_order: list[Self]) -> None:
195
- consumer_of: dict[int, Self] = {}
196
- multi_consumer: set[int] = set()
197
-
198
- for consumer in topo_order:
199
- for parent in consumer._prev:
200
- pid = id(parent)
201
- if pid in multi_consumer:
202
- continue
203
-
204
- prev_consumer = consumer_of.get(pid)
205
- if prev_consumer is None:
206
- consumer_of[pid] = consumer
207
- else:
208
- multi_consumer.add(pid)
209
- consumer_of.pop(pid, None)
210
-
211
- if not consumer_of:
212
- return
213
-
214
- from lucid._fusion import match_fusion_table
215
-
216
- for pid, v in list(consumer_of.items()):
217
- p = next((t for t in v._prev if id(t) == pid), None)
218
- if p is None:
219
- continue
220
- if p._op is None or v._op is None:
221
- continue
222
-
223
- fused_backward_op = match_fusion_table(p._op, v._op)
224
- if fused_backward_op is None:
225
- continue
226
- if v.size < fused_backward_op.heuristic_thresh:
227
- continue
228
-
229
- # NOTE (fusion limitation): --- IGNORE ---
230
- # TEMP: only fuse simple unary chains p -> v.
231
- # If v has multiple parents (e.g., binary ops), a fused grad func would
232
- # need to account for all inputs; skip for now.
233
- if len(v._prev) != 1 or v._prev[0] is not p:
234
- continue
235
-
236
- p_parents = tuple(p._prev)
237
- v._prev.remove(p)
238
- v._prev.extend(p_parents)
239
- p.clear_node(clear_op=False)
240
-
241
- v._backward_op.override_tensor_refs(tuple(weakref.ref(t) for t in v._prev))
242
- v._backward_op.override_grad_func(
243
- fused_backward_op.get_fused_grad_func(
244
- inputs=p_parents, results=v, device=v.device
245
- )
246
- )
143
+ import lucid.autograd as autograd
144
+
145
+ autograd.backward(self, retain_grad=retain_grad, retain_graph=retain_graph)
247
146
 
248
147
  def register_hook(self, hook: _HookType) -> Callable:
249
148
  self._backward_hooks.append(hook)
@@ -0,0 +1,219 @@
1
+ from typing import Iterable, Sequence
2
+ import weakref
3
+
4
+ import numpy as np
5
+
6
+ from lucid._backend.metal import mx
7
+ from lucid.error import BackwardError
8
+ from lucid.types import _MLXArray, _NumPyArray, _TensorLike, _Scalar, _Gradient
9
+
10
+
11
+ __all__ = ["grad", "backward"]
12
+
13
+
14
+ def _as_tuple(value: _TensorLike | Sequence[_TensorLike]) -> tuple[_TensorLike, ...]:
15
+ if isinstance(value, (tuple, list)):
16
+ return tuple(value)
17
+ return (value,)
18
+
19
+
20
+ def _coerce_grad_output(
21
+ output: _TensorLike, grad_output: _TensorLike | _Gradient | _Scalar
22
+ ) -> _Gradient:
23
+ if grad_output is None:
24
+ if output.is_cpu():
25
+ return np.ones_like(output.data)
26
+ return mx.ones_like(output.data)
27
+
28
+ if isinstance(grad_output, _TensorLike):
29
+ grad_output = grad_output.data
30
+
31
+ if isinstance(grad_output, _NumPyArray):
32
+ if output.is_gpu():
33
+ grad_output = mx.array(grad_output)
34
+ return grad_output
35
+
36
+ if isinstance(grad_output, _MLXArray):
37
+ if output.is_cpu():
38
+ grad_output = np.array(grad_output)
39
+ return grad_output
40
+
41
+ if output.is_cpu():
42
+ return np.ones_like(output.data) * grad_output
43
+ return mx.ones_like(output.data) * grad_output
44
+
45
+
46
+ def grad(
47
+ outputs: _TensorLike | Iterable[_TensorLike],
48
+ inputs: _TensorLike | Iterable[_TensorLike],
49
+ grad_outputs: _TensorLike | Iterable[_TensorLike] | Iterable[_Scalar] | None = None,
50
+ retain_graph: bool = False,
51
+ allow_unused: bool = False,
52
+ ) -> tuple[_Gradient, ...] | _Gradient:
53
+ out_tensors = _as_tuple(outputs)
54
+ in_tensors = _as_tuple(inputs)
55
+
56
+ if grad_outputs is None:
57
+ grad_outs = (None,) * len(out_tensors)
58
+ else:
59
+ grad_outs = _as_tuple(grad_outputs)
60
+ if len(grad_outs) != len(out_tensors):
61
+ raise ValueError("grad_outputs length must match outputs length.")
62
+
63
+ for tensor in out_tensors:
64
+ if not isinstance(tensor, _TensorLike):
65
+ raise TypeError("All outputs must be _TensorLike instances.")
66
+
67
+ for tensor in in_tensors:
68
+ if not isinstance(tensor, _TensorLike):
69
+ raise TypeError("All inputs must be _TensorLike instances.")
70
+
71
+ prev_grads = {tensor: tensor.grad for tensor in in_tensors}
72
+ prev_out_grads = {tensor: tensor.grad for tensor in out_tensors}
73
+ prev_keep = {tensor: tensor.keep_grad for tensor in in_tensors}
74
+
75
+ for tensor in in_tensors:
76
+ tensor.grad = None
77
+ tensor.keep_grad = True
78
+
79
+ try:
80
+ for i, (output, grad_output) in enumerate(zip(out_tensors, grad_outs)):
81
+ if not output.requires_grad:
82
+ if allow_unused:
83
+ continue
84
+ raise RuntimeError("All outputs must require gradients.")
85
+
86
+ coerced = _coerce_grad_output(output, grad_output)
87
+ if coerced.shape != output.shape:
88
+ raise ValueError("grad_output shape must match output shape.")
89
+
90
+ output.grad = coerced
91
+ output.backward(
92
+ retain_grad=True,
93
+ retain_graph=(i < len(out_tensors) - 1) or retain_graph,
94
+ )
95
+
96
+ grads = tuple(tensor.grad for tensor in in_tensors)
97
+ if not allow_unused and any(grad is None for grad in grads):
98
+ raise RuntimeError(
99
+ "Some inputs did not receive gradients. Set allow_unused=True."
100
+ )
101
+
102
+ if len(grads) == 1:
103
+ return grads[0]
104
+ return grads
105
+ finally:
106
+ for tensor, grad in prev_out_grads.items():
107
+ tensor.grad = grad
108
+ for tensor in in_tensors:
109
+ tensor.grad = prev_grads[tensor]
110
+ tensor.keep_grad = prev_keep[tensor]
111
+
112
+
113
+ def backward(
114
+ tensor: _TensorLike, retain_grad: bool = False, retain_graph: bool = False
115
+ ) -> None:
116
+ if tensor.grad is None:
117
+ tensor.grad = (
118
+ np.ones_like(tensor.data) if tensor.is_cpu() else mx.ones_like(tensor.data)
119
+ )
120
+
121
+ visited = set()
122
+ topo_order: list[_TensorLike] = []
123
+ stack = [tensor]
124
+ ops_to_clear = set()
125
+
126
+ while stack:
127
+ node = stack[-1]
128
+ if node in visited:
129
+ stack.pop()
130
+ topo_order.append(node)
131
+ continue
132
+
133
+ visited.add(node)
134
+ for parent in node._prev:
135
+ if parent not in visited:
136
+ stack.append(parent)
137
+
138
+ from lucid._fusion import ENABLE_FUSION
139
+
140
+ if ENABLE_FUSION and tensor.is_cpu():
141
+ _try_backward_fusion(topo_order)
142
+
143
+ for node in reversed(topo_order):
144
+ try:
145
+ node._backward_op()
146
+ except Exception as e:
147
+ raise BackwardError(shape=node.shape, op=node._op) from e
148
+
149
+ for hook in node._backward_hooks:
150
+ hook(node, node.grad)
151
+
152
+ if node._op is not None:
153
+ ops_to_clear.add(node._op)
154
+
155
+ if not (node.is_leaf or retain_grad or node.keep_grad):
156
+ node.grad = None
157
+
158
+ if not retain_graph:
159
+ for node in topo_order:
160
+ node.clear_node()
161
+ for op in ops_to_clear:
162
+ try:
163
+ op.clear()
164
+ except Exception:
165
+ try:
166
+ op.result = None
167
+ except Exception:
168
+ pass
169
+
170
+
171
+ def _try_backward_fusion(topo_order: list[_TensorLike]) -> None:
172
+ from lucid._fusion import match_fusion_table
173
+
174
+ consumer_of: dict[int, _TensorLike] = {}
175
+ multi_consumer: set[int] = set()
176
+
177
+ for consumer in topo_order:
178
+ for parent in consumer._prev:
179
+ pid = id(parent)
180
+ if pid in multi_consumer:
181
+ continue
182
+
183
+ prev_consumer = consumer_of.get(pid)
184
+ if prev_consumer is None:
185
+ consumer_of[pid] = consumer
186
+ else:
187
+ multi_consumer.add(pid)
188
+ consumer_of.pop(pid, None)
189
+
190
+ if not consumer_of:
191
+ return
192
+
193
+ for pid, v in list(consumer_of.items()):
194
+ p = next((t for t in v._prev if id(t) == pid), None)
195
+ if p is None:
196
+ continue
197
+ if p._op is None or v._op is None:
198
+ continue
199
+
200
+ fused_backward_op = match_fusion_table(p._op, v._op)
201
+ if fused_backward_op is None:
202
+ continue
203
+ if v.size < fused_backward_op.heuristic_thresh:
204
+ continue
205
+
206
+ if len(v._prev) != 1 or v._prev[0] is not p:
207
+ continue
208
+
209
+ p_parents = tuple(p._prev)
210
+ v._prev.remove(p)
211
+ v._prev.extend(p_parents)
212
+ p.clear_node(clear_op=False)
213
+
214
+ v._backward_op.override_tensor_refs(tuple(weakref.ref(t) for t in v._prev))
215
+ v._backward_op.override_grad_func(
216
+ fused_backward_op.get_fused_grad_func(
217
+ inputs=p_parents, results=v, device=v.device
218
+ )
219
+ )
lucid/types.py CHANGED
@@ -46,15 +46,30 @@ class _TensorLike(Protocol):
46
46
  device: _DeviceType
47
47
  shape: Any
48
48
  data: Any
49
+ grad: Any
50
+ keep_grad: bool
51
+ is_leaf: bool
52
+ size: Any
49
53
 
50
54
  _op: object | None
51
55
  _prev: list[_TensorLike]
52
56
  _backward_op: Any
57
+ _backward_hooks: Any
53
58
 
54
59
  def to(self, device: _DeviceType) -> None: ...
55
60
 
56
61
  def free(self) -> None: ...
57
62
 
63
+ def is_cpu(self) -> bool: ...
64
+
65
+ def is_gpu(self) -> bool: ...
66
+
67
+ def clear_node(self, clear_op: bool = True) -> None: ...
68
+
69
+ def backward(
70
+ self, retain_grad: bool = False, retain_graph: bool = False
71
+ ) -> None: ...
72
+
58
73
 
59
74
  class Numeric:
60
75
  def __init__(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lucid-dl
3
- Version: 2.10.0
3
+ Version: 2.11.0
4
4
  Summary: Lumerico's Comprehensive Interface for Deep Learning
5
5
  Home-page: https://github.com/ChanLumerico/lucid
6
6
  Author: ChanLumerico
@@ -28,9 +28,9 @@ Dynamic: summary
28
28
 
29
29
  # Lucid² 💎
30
30
 
31
- ![PyPI - Version](https://img.shields.io/pypi/v/lucid-dl?color=red)
32
- ![PyPI - Downloads](https://img.shields.io/pypi/dm/lucid-dl.svg)
33
- ![PyPI - Total Downloads](https://img.shields.io/badge/total%20downloads-34.0k-yellow.svg)
31
+ ![PyPI Version](https://img.shields.io/pypi/v/lucid-dl?color=red)
32
+ ![PyPI Downloads](https://img.shields.io/pypi/dm/lucid-dl.svg)
33
+ [![PyPI Total Downloads](https://static.pepy.tech/personalized-badge/lucid-dl?period=total&units=NONE&left_color=GRAY&right_color=yellow&left_text=total%20downloads)](https://pepy.tech/projects/lucid-dl)
34
34
  ![GitHub code size in bytes](https://img.shields.io/github/languages/code-size/ChanLumerico/lucid.svg)
35
35
  ![Code Style](https://img.shields.io/badge/code%20style-black-000000.svg)
36
36
  ![Lines of Code](https://img.shields.io/badge/lines%20of%20code-27.7k-purple.svg)
@@ -50,6 +50,17 @@ Whether you're a student, educator, or an advanced researcher seeking to demysti
50
50
 
51
51
  - Now supports [**`Safetensors`**](https://github.com/huggingface/safetensors) for Lucid neural module porting along with the legacy `.lcd` format
52
52
 
53
+ - Branched a Stand-alone Autograd Engine as `lucid.autograd`
54
+
55
+ - Provides a generalized API of computing gradients:
56
+
57
+ ```python
58
+ import lucid.autograd as autograd
59
+ x = lucid.Tensor([1., 2.], requires_grad=True)
60
+ y = (x ** 2).sum()
61
+ autograd.grad(y, x) # ∂y/∂x
62
+ ```
63
+
53
64
  - Introduced **Backward Fusion** for CPU execution:
54
65
  - Automatically fuses selected operation patterns during backpropagation to reduce graph overhead
55
66
  - Supports identity/unary fusion (e.g. `log∘exp`, double negation, and view-like ops such as reshape/squeeze)
@@ -1,7 +1,7 @@
1
- lucid/__init__.py,sha256=aqui9d7CMjxCcb0txpd68amluwuMwNTbTOxVuuS4N6Y,9055
1
+ lucid/__init__.py,sha256=Rd4OF5hgSNFuSzeB4--gfkRrrQpSEhYJ10-vV93YShk,9089
2
2
  lucid/error.py,sha256=qnTiVuZm3c5-DIt-OOyobZ7RUm7E1K4NR0j998LG1ug,709
3
3
  lucid/port.py,sha256=Kt1YaSWef_eKF4KRj-UFhirvFC5urEESfYQ_BSlBZGE,3811
4
- lucid/types.py,sha256=n225G0mVm0im3r9jZi2YY0hMfhOgsJroRlHZiqT_Sn4,3981
4
+ lucid/types.py,sha256=7mkE4_kAn6I8bToqoNb4E5IKwVOjYMQ_kMXZB7O_KeA,4310
5
5
  lucid/_backend/__init__.py,sha256=n1bnYdeb_bNDBKASWGywTRa0Ne9hMAkal3AuVZJgovI,5
6
6
  lucid/_backend/conv.py,sha256=EkU_AbNcSFJWwLzk7tY4OBaA5RUh44CgDcVGviH_AZA,16641
7
7
  lucid/_backend/core.py,sha256=TYuLbv5V-H3sxhgTjwUO2iPxJ6TIJjsYx8EugpUGJ2g,8531
@@ -15,10 +15,11 @@ lucid/_fusion/__init__.py,sha256=SVzLiFzs4m1mMOpefKDLFkYqV0zV5FGwFd9hEbUZtSo,68
15
15
  lucid/_fusion/base.py,sha256=d6nWuPjYxkie9Xrtbj3JVusnIN61PIoSFFSthJNm9os,3821
16
16
  lucid/_fusion/func.py,sha256=9tXzB-QNrx_AvNJiPto807faXKlzjuMG4o9gRgI5usc,1659
17
17
  lucid/_tensor/__init__.py,sha256=wFWAMhTnQwThNiBEIT4fcw4ryIm8A4AoR-m9KDhklOQ,40
18
- lucid/_tensor/tensor.py,sha256=uRz7bNveY2e5ET0kIvGP_Ml7n0ZPMsjETOQCET1JVZs,16939
18
+ lucid/_tensor/tensor.py,sha256=kccMHLPi2aGpkjwzEb7SBpp3Hhbe1TZv3VUDUSOv2ik,13570
19
19
  lucid/_tensor/tensor_ops.py,sha256=5XtqcPbrkOb87pVTSOMNaje9K78tgLpsNdV8XXnbpHc,3991
20
20
  lucid/_util/__init__.py,sha256=NgOleItHJGVLdJlKHKfpzuSl3vofzJpNsZByHAYJmKs,6838
21
21
  lucid/_util/func.py,sha256=ZODVlGdAejMTJnwEls7yMCI5WJ9Thkb3RIq1WwQCS4E,44239
22
+ lucid/autograd/__init__.py,sha256=uKuGXUUSF8rBx7iEuVA6a_AhY9wfkC82TPmc1AKbay4,6675
22
23
  lucid/data/__init__.py,sha256=qrDIQsnix5ZUEa0yrtomaaWbNJyJ3xEr2gdhRvg70_8,118
23
24
  lucid/data/_base.py,sha256=RM8xpBl8qFhm19n7eER_jOsRaxkL3rbOkwUvn6VetSE,5921
24
25
  lucid/data/_util.py,sha256=UsbliOrGmM0f1vqppoBPn3RSx53PIqcVx_yVOlHZB6A,1985
@@ -126,8 +127,8 @@ lucid/visual/__init__.py,sha256=6TuFDfmXTwpLyHl7_KqBfdzW6zqHjGzIFvymjFPlvjI,21
126
127
  lucid/visual/graph.py,sha256=YjpIDM_lloZARw3sCBiXPl_hT5A2gTk2fEHvwvJWXTk,4599
127
128
  lucid/weights/__init__.py,sha256=z1AikA3rOEeckWGkYWlcZkxNlJo9Xwa39PL6ly3hWnc,8801
128
129
  lucid/weights/__init__.pyi,sha256=lFonYC3cUx2Idolf3AEPnjFcyqcn3UDU84oJlZafqLY,3013
129
- lucid_dl-2.10.0.dist-info/licenses/LICENSE,sha256=vxRFYnVD1IeYtsvw-KmoElfqrjxKHv1h9YTvsG54loQ,1065
130
- lucid_dl-2.10.0.dist-info/METADATA,sha256=WpXt6J0ErHy2z-Kl0W7lKcEcJ5n6CNypk1HYv_TNIm8,11756
131
- lucid_dl-2.10.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
132
- lucid_dl-2.10.0.dist-info/top_level.txt,sha256=uzP_qBx9iNWIHKJRlElYcBLYVqMpdm9Q1Ma63QPYbFc,6
133
- lucid_dl-2.10.0.dist-info/RECORD,,
130
+ lucid_dl-2.11.0.dist-info/licenses/LICENSE,sha256=vxRFYnVD1IeYtsvw-KmoElfqrjxKHv1h9YTvsG54loQ,1065
131
+ lucid_dl-2.11.0.dist-info/METADATA,sha256=_wOx1Pjq_HgSvzL_4eMHGMsHz8cOwpvU9EpiFhYglco,12159
132
+ lucid_dl-2.11.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
133
+ lucid_dl-2.11.0.dist-info/top_level.txt,sha256=uzP_qBx9iNWIHKJRlElYcBLYVqMpdm9Q1Ma63QPYbFc,6
134
+ lucid_dl-2.11.0.dist-info/RECORD,,