lucid-dl 2.7.6__py3-none-any.whl → 2.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lucid/_tensor/tensor.py CHANGED
@@ -397,28 +397,33 @@ class Tensor(_TensorBase):
397
397
  return hash(id(self))
398
398
 
399
399
  def __deepcopy__(self, *args: Any) -> Self:
400
+ cls = self.__class__
400
401
  copied_data = Tensor.copy_data(self.data)
401
402
 
402
- new_tensor = Tensor(
403
- copied_data,
404
- requires_grad=self.requires_grad,
405
- keep_grad=self.keep_grad,
406
- dtype=self.dtype,
407
- device=self.device,
408
- )
403
+ if cls is Tensor:
404
+ new = Tensor(
405
+ copied_data, self.requires_grad, self.keep_grad, self.dtype, self.device
406
+ )
407
+ else:
408
+ base = Tensor(copied_data, dtype=self.dtype, device=self.device)
409
+ new = cls(base)
409
410
 
410
- if self.grad is not None:
411
- new_tensor.grad = Tensor.copy_grad(self.grad)
411
+ if self.grad is not None and (
412
+ self.keep_grad or getattr(new, "keep_grad", False)
413
+ ):
414
+ new.grad = Tensor.copy_grad(self.grad)
415
+ else:
416
+ new.grad = None
412
417
 
413
- new_tensor._op = self._op
414
- new_tensor._backward_op = self._backward_op
415
- new_tensor._prev = self._prev.copy()
416
- new_tensor._backward_hooks = self._backward_hooks.copy()
418
+ new._op = None
419
+ new._backward_op = _noop
420
+ new._prev = []
421
+ new._backward_hooks = []
417
422
 
418
- new_tensor._is_free = self._is_free
419
- new_tensor._is_bool_tensor = self._is_bool_tensor
423
+ new._is_free = self._is_free
424
+ new._is_bool_tensor = self._is_bool_tensor
420
425
 
421
- return new_tensor
426
+ return new
422
427
 
423
428
  def __bool__(self) -> bool:
424
429
  if self.data.size != 1:
lucid/nn/__init__.py CHANGED
@@ -4,3 +4,4 @@ from lucid.nn.modules import *
4
4
  from lucid.nn.fused import *
5
5
 
6
6
  import lucid.nn.init as init
7
+ import lucid.nn.util as util
lucid/nn/util.py ADDED
@@ -0,0 +1,60 @@
1
+ from typing import Iterable
2
+
3
+ import lucid
4
+
5
+ from lucid._tensor import Tensor
6
+ from lucid.types import _Scalar
7
+
8
+
9
+ __all__ = ["grad_norm", "clip_grad_norm", "clip_grad_value"]
10
+
11
+
12
+ def _as_iter(parameters: Iterable[Tensor] | Tensor) -> list[Tensor]:
13
+ if isinstance(parameters, Tensor):
14
+ return [parameters]
15
+ return list(parameters)
16
+
17
+
18
+ def grad_norm(parameters: Iterable[Tensor] | Tensor, norm_type: int = 2) -> Tensor:
19
+ parameters = _as_iter(parameters)
20
+ device = parameters[0].device
21
+
22
+ params: list[Tensor] = [p for p in parameters if p.grad is not None]
23
+ if not params:
24
+ return Tensor(0.0, device=device)
25
+
26
+ norm_pow_sum = 0.0
27
+ for p in params:
28
+ param_norm = lucid.linalg.norm(lucid.ravel(p.grad), ord=norm_type).item()
29
+ norm_pow_sum += param_norm**norm_type
30
+
31
+ total_norm = norm_pow_sum ** (1.0 / norm_type)
32
+ return Tensor(total_norm, device=device)
33
+
34
+
35
+ def clip_grad_norm(
36
+ parameters: Iterable[Tensor] | Tensor,
37
+ max_norm: _Scalar,
38
+ norm_type: int = 2,
39
+ eps: float = 1e-7,
40
+ ) -> float:
41
+ params: list[Tensor] = [p for p in _as_iter(parameters) if p.grad is not None]
42
+ total_norm = grad_norm(params, norm_type=norm_type)
43
+
44
+ clip_coef = float(max_norm) / (total_norm.item() + eps)
45
+ if clip_coef < 1.0:
46
+ for p in params:
47
+ p.grad = p.grad * clip_coef
48
+
49
+ return total_norm
50
+
51
+
52
+ def clip_grad_value(parameters: Iterable[Tensor] | Tensor, clip_value: _Scalar) -> None:
53
+ params = [p for p in _as_iter(parameters) if p.grad is not None]
54
+ if not params:
55
+ return
56
+
57
+ lo, hi = -float(clip_value), float(clip_value)
58
+ for p in params:
59
+ g_clip = lucid.clip(p.grad, lo, hi).data
60
+ p.grad = g_clip
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lucid-dl
3
- Version: 2.7.6
3
+ Version: 2.7.8
4
4
  Summary: Lumerico's Comprehensive Interface for Deep Learning
5
5
  Home-page: https://github.com/ChanLumerico/lucid
6
6
  Author: ChanLumerico
@@ -46,6 +46,12 @@ Whether you're a student, educator, or an advanced researcher seeking to demysti
46
46
 
47
47
  - Now supports [**`Safetensors`**](https://github.com/huggingface/safetensors) for Lucid neural module porting along with the legacy `.lcd` format
48
48
 
49
+ - Added multiple `nn` utilities:
50
+
51
+ - `nn.util.grad_norm` - Returns the global norm of the gradients
52
+ - `nn.util.clip_grad_norm` - Rescales the gradients based on the global norm
53
+ - `nn.util.clip_grad_value` - Rescales the gradients based on their values.
54
+
49
55
  - Implemented **EfficientDet**: `lucid.models.EfficientDet` with variants from `D0` to `D7`
50
56
 
51
57
  ## 🔧 How to Install
@@ -10,7 +10,7 @@ lucid/_func/bfunc.py,sha256=OEgtBWFL2S46UTtNU93ekYCe37Pj4oS8MEnJt-rcids,20571
10
10
  lucid/_func/gfunc.py,sha256=_gEJnYnAYhcN6aV1Z6ArIOGTgd0NEB2_eM86tE6uq-Q,4866
11
11
  lucid/_func/ufunc.py,sha256=OucEx6QdXzO2_y8Xie8Deh5IC7wnt2EIQcgCAJraWqs,30425
12
12
  lucid/_tensor/__init__.py,sha256=wFWAMhTnQwThNiBEIT4fcw4ryIm8A4AoR-m9KDhklOQ,40
13
- lucid/_tensor/tensor.py,sha256=GnoJzsaDg_nL9pBHTiaID-JwlUaLvNgRcC9O0QGTqLQ,13717
13
+ lucid/_tensor/tensor.py,sha256=HSpV9XUpMI79gw1vlmuoGh7Bkl3qFR_4alOjoq4Lq70,13819
14
14
  lucid/_tensor/tensor_ops.py,sha256=5XtqcPbrkOb87pVTSOMNaje9K78tgLpsNdV8XXnbpHc,3991
15
15
  lucid/_util/__init__.py,sha256=NgOleItHJGVLdJlKHKfpzuSl3vofzJpNsZByHAYJmKs,6838
16
16
  lucid/_util/func.py,sha256=jPpjDw-pdK9bw2U3ACEtgCbV47j6Dp9ycbkvkHKefJo,44383
@@ -71,10 +71,11 @@ lucid/models/objdet/yolo/yolo_v3.py,sha256=B5U42Npwfg8nSgU9E261zf0cbQS9RVYrX1ADD
71
71
  lucid/models/objdet/yolo/yolo_v4.py,sha256=RFbBumreXmy6s8IYZvUuhW0893ss8sx_8Vgi6KbBKWo,21467
72
72
  lucid/models/seq2seq/__init__.py,sha256=wjsrhj4H_AcqwwbebAN8b68QBA8L6p1_12dkG2995-w,27
73
73
  lucid/models/seq2seq/transformer.py,sha256=y5rerCs1s6jXTsVvbgscWScKpQKuSu1fezsBe7PNTRA,3513
74
- lucid/nn/__init__.py,sha256=Kc6_wlpWo0_AtywX8aEWtzjKb0ju2c2cKGNsEY9ho4E,153
74
+ lucid/nn/__init__.py,sha256=_hk6KltQIJuWXowXstMSu3TjiaTP8zMLNvGpjnA9Mpw,182
75
75
  lucid/nn/fused.py,sha256=ZGOQmDThaGNQLC59y3M7s993K_K09ce6IZP8cFX8FUE,5498
76
76
  lucid/nn/module.py,sha256=XvFWJ8NqXeZpr3RmKBQBz5eqT535Oi_7DaPN1Zi9gJc,21971
77
77
  lucid/nn/parameter.py,sha256=jDaWukWecCcH9ri65SefNls66MmyTyucFolWbzSjapc,856
78
+ lucid/nn/util.py,sha256=Yw1iBSPrGV_r_F51qpqLYdafNE_hyaA0DPWYP-rjaig,1699
78
79
  lucid/nn/functional/__init__.py,sha256=90Zi7jClPOiiSYx-Qkg0QTideKD6GigbWON9eFCoxzg,13869
79
80
  lucid/nn/functional/_activation.py,sha256=nQVwArvPuwkUpLMLCNABTw96Zgw9VsPB8SyXCL6t2LM,1331
80
81
  lucid/nn/functional/_attention.py,sha256=nrZF3-2AR03kNo1PGNszujhWlAVcab_FNQwOCWZT47I,946
@@ -119,8 +120,8 @@ lucid/visual/__init__.py,sha256=6TuFDfmXTwpLyHl7_KqBfdzW6zqHjGzIFvymjFPlvjI,21
119
120
  lucid/visual/graph.py,sha256=YjpIDM_lloZARw3sCBiXPl_hT5A2gTk2fEHvwvJWXTk,4599
120
121
  lucid/weights/__init__.py,sha256=z1AikA3rOEeckWGkYWlcZkxNlJo9Xwa39PL6ly3hWnc,8801
121
122
  lucid/weights/__init__.pyi,sha256=lFonYC3cUx2Idolf3AEPnjFcyqcn3UDU84oJlZafqLY,3013
122
- lucid_dl-2.7.6.dist-info/licenses/LICENSE,sha256=vxRFYnVD1IeYtsvw-KmoElfqrjxKHv1h9YTvsG54loQ,1065
123
- lucid_dl-2.7.6.dist-info/METADATA,sha256=GgdsxwpPv_EGIOIARff6VweVKfgDi_A8vs9aHvFQcCI,11260
124
- lucid_dl-2.7.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
125
- lucid_dl-2.7.6.dist-info/top_level.txt,sha256=uzP_qBx9iNWIHKJRlElYcBLYVqMpdm9Q1Ma63QPYbFc,6
126
- lucid_dl-2.7.6.dist-info/RECORD,,
123
+ lucid_dl-2.7.8.dist-info/licenses/LICENSE,sha256=vxRFYnVD1IeYtsvw-KmoElfqrjxKHv1h9YTvsG54loQ,1065
124
+ lucid_dl-2.7.8.dist-info/METADATA,sha256=t8DZ8085SXcg8SH2ibdQH5K9zq2toJ3B4qwm-SpLkAk,11519
125
+ lucid_dl-2.7.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
126
+ lucid_dl-2.7.8.dist-info/top_level.txt,sha256=uzP_qBx9iNWIHKJRlElYcBLYVqMpdm9Q1Ma63QPYbFc,6
127
+ lucid_dl-2.7.8.dist-info/RECORD,,