lucid-dl 2.7.7__py3-none-any.whl → 2.7.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucid/_tensor/tensor.py +21 -16
- lucid/optim/lr_scheduler/_schedulers.py +34 -0
- {lucid_dl-2.7.7.dist-info → lucid_dl-2.7.9.dist-info}/METADATA +9 -3
- {lucid_dl-2.7.7.dist-info → lucid_dl-2.7.9.dist-info}/RECORD +7 -7
- {lucid_dl-2.7.7.dist-info → lucid_dl-2.7.9.dist-info}/WHEEL +0 -0
- {lucid_dl-2.7.7.dist-info → lucid_dl-2.7.9.dist-info}/licenses/LICENSE +0 -0
- {lucid_dl-2.7.7.dist-info → lucid_dl-2.7.9.dist-info}/top_level.txt +0 -0
lucid/_tensor/tensor.py
CHANGED
|
@@ -397,28 +397,33 @@ class Tensor(_TensorBase):
|
|
|
397
397
|
return hash(id(self))
|
|
398
398
|
|
|
399
399
|
def __deepcopy__(self, *args: Any) -> Self:
|
|
400
|
+
cls = self.__class__
|
|
400
401
|
copied_data = Tensor.copy_data(self.data)
|
|
401
402
|
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
device=self.device
|
|
408
|
-
|
|
403
|
+
if cls is Tensor:
|
|
404
|
+
new = Tensor(
|
|
405
|
+
copied_data, self.requires_grad, self.keep_grad, self.dtype, self.device
|
|
406
|
+
)
|
|
407
|
+
else:
|
|
408
|
+
base = Tensor(copied_data, dtype=self.dtype, device=self.device)
|
|
409
|
+
new = cls(base)
|
|
409
410
|
|
|
410
|
-
if self.grad is not None
|
|
411
|
-
|
|
411
|
+
if self.grad is not None and (
|
|
412
|
+
self.keep_grad or getattr(new, "keep_grad", False)
|
|
413
|
+
):
|
|
414
|
+
new.grad = Tensor.copy_grad(self.grad)
|
|
415
|
+
else:
|
|
416
|
+
new.grad = None
|
|
412
417
|
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
418
|
+
new._op = None
|
|
419
|
+
new._backward_op = _noop
|
|
420
|
+
new._prev = []
|
|
421
|
+
new._backward_hooks = []
|
|
417
422
|
|
|
418
|
-
|
|
419
|
-
|
|
423
|
+
new._is_free = self._is_free
|
|
424
|
+
new._is_bool_tensor = self._is_bool_tensor
|
|
420
425
|
|
|
421
|
-
return
|
|
426
|
+
return new
|
|
422
427
|
|
|
423
428
|
def __bool__(self) -> bool:
|
|
424
429
|
if self.data.size != 1:
|
|
@@ -13,6 +13,7 @@ __all__ = [
|
|
|
13
13
|
"CosineAnnealingLR",
|
|
14
14
|
"ReduceLROnPlateau",
|
|
15
15
|
"CyclicLR",
|
|
16
|
+
"NoamScheduler",
|
|
16
17
|
]
|
|
17
18
|
|
|
18
19
|
|
|
@@ -265,3 +266,36 @@ class CyclicLR(LRScheduler):
|
|
|
265
266
|
for _ in self.base_lrs
|
|
266
267
|
]
|
|
267
268
|
return new_lrs
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
class NoamScheduler(LRScheduler):
|
|
272
|
+
def __init__(
|
|
273
|
+
self,
|
|
274
|
+
optimizer: Optimizer,
|
|
275
|
+
model_size: int,
|
|
276
|
+
warmup_steps: int,
|
|
277
|
+
factor: float = 1.0,
|
|
278
|
+
last_epoch: int = -1,
|
|
279
|
+
verbose: bool = False,
|
|
280
|
+
) -> None:
|
|
281
|
+
if model_size <= 0:
|
|
282
|
+
raise ValueError("model_size must be a positive integer.")
|
|
283
|
+
if warmup_steps <= 0:
|
|
284
|
+
raise ValueError("warmup_steps must be a positive integer.")
|
|
285
|
+
if factor <= 0:
|
|
286
|
+
raise ValueError("factor must be a positive float.")
|
|
287
|
+
|
|
288
|
+
self.model_size = model_size
|
|
289
|
+
self.warmup_steps = warmup_steps
|
|
290
|
+
self.factor = factor
|
|
291
|
+
super().__init__(optimizer, last_epoch, verbose)
|
|
292
|
+
|
|
293
|
+
def get_lr(self) -> list[float]:
|
|
294
|
+
step_num = max(self.last_epoch, 1)
|
|
295
|
+
scale = self.factor * (self.model_size**-0.5)
|
|
296
|
+
|
|
297
|
+
warmup_term = step_num * (self.warmup_steps**-1.5)
|
|
298
|
+
decay_term = step_num**-0.5
|
|
299
|
+
lr_factor = scale * min(decay_term, warmup_term)
|
|
300
|
+
|
|
301
|
+
return [base_lr * lr_factor for base_lr in self.base_lrs]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lucid-dl
|
|
3
|
-
Version: 2.7.
|
|
3
|
+
Version: 2.7.9
|
|
4
4
|
Summary: Lumerico's Comprehensive Interface for Deep Learning
|
|
5
5
|
Home-page: https://github.com/ChanLumerico/lucid
|
|
6
6
|
Author: ChanLumerico
|
|
@@ -30,10 +30,10 @@ Dynamic: summary
|
|
|
30
30
|
|
|
31
31
|

|
|
32
32
|

|
|
33
|
-

|
|
34
34
|

|
|
35
35
|

|
|
36
|
-

|
|
37
37
|
|
|
38
38
|
**Lucid** is a minimalist deep learning framework built entirely from scratch in Python. It offers a pedagogically rich environment to explore the foundations of modern deep learning systems, including autodiff, neural network modules, and GPU acceleration — all while staying lightweight, readable, and free of complex dependencies.
|
|
39
39
|
|
|
@@ -46,6 +46,12 @@ Whether you're a student, educator, or an advanced researcher seeking to demysti
|
|
|
46
46
|
|
|
47
47
|
- Now supports [**`Safetensors`**](https://github.com/huggingface/safetensors) for Lucid neural module porting along with the legacy `.lcd` format
|
|
48
48
|
|
|
49
|
+
- Added multiple `nn` utilities:
|
|
50
|
+
|
|
51
|
+
- `nn.util.grad_norm` - Returns the global norm of the gradients
|
|
52
|
+
- `nn.util.clip_grad_norm` - Rescales the gradients based on the global norm
|
|
53
|
+
- `nn.util.clip_grad_value` - Rescales the gradients based on their values.
|
|
54
|
+
|
|
49
55
|
- Implemented **EfficientDet**: `lucid.models.EfficientDet` with variants from `D0` to `D7`
|
|
50
56
|
|
|
51
57
|
## 🔧 How to Install
|
|
@@ -10,7 +10,7 @@ lucid/_func/bfunc.py,sha256=OEgtBWFL2S46UTtNU93ekYCe37Pj4oS8MEnJt-rcids,20571
|
|
|
10
10
|
lucid/_func/gfunc.py,sha256=_gEJnYnAYhcN6aV1Z6ArIOGTgd0NEB2_eM86tE6uq-Q,4866
|
|
11
11
|
lucid/_func/ufunc.py,sha256=OucEx6QdXzO2_y8Xie8Deh5IC7wnt2EIQcgCAJraWqs,30425
|
|
12
12
|
lucid/_tensor/__init__.py,sha256=wFWAMhTnQwThNiBEIT4fcw4ryIm8A4AoR-m9KDhklOQ,40
|
|
13
|
-
lucid/_tensor/tensor.py,sha256=
|
|
13
|
+
lucid/_tensor/tensor.py,sha256=HSpV9XUpMI79gw1vlmuoGh7Bkl3qFR_4alOjoq4Lq70,13819
|
|
14
14
|
lucid/_tensor/tensor_ops.py,sha256=5XtqcPbrkOb87pVTSOMNaje9K78tgLpsNdV8XXnbpHc,3991
|
|
15
15
|
lucid/_util/__init__.py,sha256=NgOleItHJGVLdJlKHKfpzuSl3vofzJpNsZByHAYJmKs,6838
|
|
16
16
|
lucid/_util/func.py,sha256=jPpjDw-pdK9bw2U3ACEtgCbV47j6Dp9ycbkvkHKefJo,44383
|
|
@@ -110,7 +110,7 @@ lucid/optim/prop.py,sha256=CbsWmoBb_g_8z16M3T6dMoSR9c72hm8M375IT1UHjpw,4740
|
|
|
110
110
|
lucid/optim/sgd.py,sha256=DBZ1ZXQ9TfKZCRECfNRMDH9mvqUWCOPdY5TobnVxpz8,4477
|
|
111
111
|
lucid/optim/lr_scheduler/__init__.py,sha256=kUoyN2g9nwTtEAqEVij832WSRvzEpKZywSJdfD7MQvY,58
|
|
112
112
|
lucid/optim/lr_scheduler/_base.py,sha256=NNJnjwmJpsRXathrbLtH4tjfBHtwOiJ5HwF1_S6Ym5c,3092
|
|
113
|
-
lucid/optim/lr_scheduler/_schedulers.py,sha256=
|
|
113
|
+
lucid/optim/lr_scheduler/_schedulers.py,sha256=wxG6XvlTozz2TP57yXQL-krtSiO0hy2bySZq_sRDjh0,9227
|
|
114
114
|
lucid/random/__init__.py,sha256=s8EAaKhEiTKT_vYjP4IFHx0xQVa1jqc_qIyvMauUu7M,2727
|
|
115
115
|
lucid/random/_func.py,sha256=1Lu4m-ciEK037chNDGqv_j00RgGGzQ7UfslSfYActUk,2232
|
|
116
116
|
lucid/transforms/__init__.py,sha256=DGznMbqhXdU9FLDMKnJawScO4HCqu40Sf_j4vJGJrjc,90
|
|
@@ -120,8 +120,8 @@ lucid/visual/__init__.py,sha256=6TuFDfmXTwpLyHl7_KqBfdzW6zqHjGzIFvymjFPlvjI,21
|
|
|
120
120
|
lucid/visual/graph.py,sha256=YjpIDM_lloZARw3sCBiXPl_hT5A2gTk2fEHvwvJWXTk,4599
|
|
121
121
|
lucid/weights/__init__.py,sha256=z1AikA3rOEeckWGkYWlcZkxNlJo9Xwa39PL6ly3hWnc,8801
|
|
122
122
|
lucid/weights/__init__.pyi,sha256=lFonYC3cUx2Idolf3AEPnjFcyqcn3UDU84oJlZafqLY,3013
|
|
123
|
-
lucid_dl-2.7.
|
|
124
|
-
lucid_dl-2.7.
|
|
125
|
-
lucid_dl-2.7.
|
|
126
|
-
lucid_dl-2.7.
|
|
127
|
-
lucid_dl-2.7.
|
|
123
|
+
lucid_dl-2.7.9.dist-info/licenses/LICENSE,sha256=vxRFYnVD1IeYtsvw-KmoElfqrjxKHv1h9YTvsG54loQ,1065
|
|
124
|
+
lucid_dl-2.7.9.dist-info/METADATA,sha256=-h9IwL5SzshStMIt4eYUB0rWJ7nTELdGWSD9FIIWM00,11519
|
|
125
|
+
lucid_dl-2.7.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
126
|
+
lucid_dl-2.7.9.dist-info/top_level.txt,sha256=uzP_qBx9iNWIHKJRlElYcBLYVqMpdm9Q1Ma63QPYbFc,6
|
|
127
|
+
lucid_dl-2.7.9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|