lucid-dl 2.7.5__py3-none-any.whl → 2.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lucid/nn/__init__.py CHANGED
@@ -4,3 +4,4 @@ from lucid.nn.modules import *
4
4
  from lucid.nn.fused import *
5
5
 
6
6
  import lucid.nn.init as init
7
+ import lucid.nn.util as util
lucid/nn/module.py CHANGED
@@ -118,7 +118,7 @@ class Module:
118
118
  for param in self.parameters():
119
119
  param.zero()
120
120
 
121
- def forward(self, *args: Any, **kwargs: Any) -> Tensor | tuple[Tensor, ...]:
121
+ def forward(self) -> Tensor | tuple[Tensor, ...]:
122
122
  raise NotImplementedError(
123
123
  "The forward method must be implemented by the subclass."
124
124
  )
@@ -204,7 +204,7 @@ class Module:
204
204
  destination=destination, prefix=prefix + name + ".", keep_vars=keep_vars
205
205
  )
206
206
 
207
- for key in destination.keys():
207
+ for key in list(destination.keys()):
208
208
  if key in self._state_dict_pass_attr:
209
209
  del destination[key]
210
210
 
@@ -229,10 +229,8 @@ class Module:
229
229
  if key in own_state:
230
230
  attr = own_state[key]
231
231
  if isinstance(attr, (nn.Parameter, nn.Buffer)):
232
- if isinstance(value, Tensor):
233
- attr.data = value.data
234
- else:
235
- attr.data = value
232
+ value_t = Tensor(value, device=self.device)
233
+ attr.data = value_t.data
236
234
  else:
237
235
  setattr(self, key, value)
238
236
  elif strict:
lucid/nn/util.py ADDED
@@ -0,0 +1,60 @@
1
+ from typing import Iterable
2
+
3
+ import lucid
4
+
5
+ from lucid._tensor import Tensor
6
+ from lucid.types import _Scalar
7
+
8
+
9
+ __all__ = ["grad_norm", "clip_grad_norm", "clip_grad_value"]
10
+
11
+
12
+ def _as_iter(parameters: Iterable[Tensor] | Tensor) -> list[Tensor]:
13
+ if isinstance(parameters, Tensor):
14
+ return [parameters]
15
+ return list(parameters)
16
+
17
+
18
+ def grad_norm(parameters: Iterable[Tensor] | Tensor, norm_type: int = 2) -> Tensor:
19
+ parameters = _as_iter(parameters)
20
+ device = parameters[0].device
21
+
22
+ params: list[Tensor] = [p for p in parameters if p.grad is not None]
23
+ if not params:
24
+ return Tensor(0.0, device=device)
25
+
26
+ norm_pow_sum = 0.0
27
+ for p in params:
28
+ param_norm = lucid.linalg.norm(lucid.ravel(p.grad), ord=norm_type).item()
29
+ norm_pow_sum += param_norm**norm_type
30
+
31
+ total_norm = norm_pow_sum ** (1.0 / norm_type)
32
+ return Tensor(total_norm, device=device)
33
+
34
+
35
+ def clip_grad_norm(
36
+ parameters: Iterable[Tensor] | Tensor,
37
+ max_norm: _Scalar,
38
+ norm_type: int = 2,
39
+ eps: float = 1e-7,
40
+ ) -> float:
41
+ params: list[Tensor] = [p for p in _as_iter(parameters) if p.grad is not None]
42
+ total_norm = grad_norm(params, norm_type=norm_type)
43
+
44
+ clip_coef = float(max_norm) / (total_norm.item() + eps)
45
+ if clip_coef < 1.0:
46
+ for p in params:
47
+ p.grad = p.grad * clip_coef
48
+
49
+ return total_norm
50
+
51
+
52
+ def clip_grad_value(parameters: Iterable[Tensor] | Tensor, clip_value: _Scalar) -> None:
53
+ params = [p for p in _as_iter(parameters) if p.grad is not None]
54
+ if not params:
55
+ return
56
+
57
+ lo, hi = -float(clip_value), float(clip_value)
58
+ for p in params:
59
+ g_clip = lucid.clip(p.grad, lo, hi).data
60
+ p.grad = g_clip
lucid/optim/_base.py CHANGED
@@ -1,5 +1,5 @@
1
1
  from collections import defaultdict
2
- from typing import Any, Iterable, OrderedDict
2
+ from typing import Any, Iterable
3
3
  from abc import ABC, abstractmethod
4
4
  import copy
5
5
 
@@ -12,19 +12,19 @@ class Optimizer(ABC):
12
12
  def __init__(
13
13
  self, params: Iterable[nn.Parameter], defaults: dict[str, Any]
14
14
  ) -> None:
15
- super().__init__()
16
15
  if not isinstance(params, Iterable):
17
16
  raise TypeError("params should be an iterable of Parameters.")
18
17
 
19
- params = list(params)
20
- self.param_groups = self.param_groups_setup(params, defaults)
21
- self.defaults = defaults
22
- self.state: dict[nn.Parameter, dict[str, Any]] = defaultdict(dict)
18
+ param_list = list(params)
19
+ for p in param_list:
20
+ if not isinstance(p, nn.Parameter):
21
+ raise TypeError(f"Expected nn.Parameter, got {type(p).__name__}.")
23
22
 
24
- def param_groups_setup(
25
- self, params: list[nn.Parameter], defaults: dict[str, Any]
26
- ) -> list[dict[str, Any]]:
27
- return [{"params": list(params), **defaults}]
23
+ self.defaults: dict[str, Any] = dict(defaults)
24
+ self.param_groups: list[dict[str, Any]] = self.param_groups_setup(
25
+ param_list, self.defaults
26
+ )
27
+ self.state: dict[nn.Parameter, dict[str, Any]] = defaultdict(dict)
28
28
 
29
29
  @abstractmethod
30
30
  def step(self, closure: _OptimClosure | None = None) -> Any | None:
@@ -33,25 +33,110 @@ class Optimizer(ABC):
33
33
  def zero_grad(self) -> None:
34
34
  for group in self.param_groups:
35
35
  for param in group["params"]:
36
- param.zero_grad()
36
+ if isinstance(param, nn.Parameter):
37
+ param.zero_grad()
38
+
39
+ def param_groups_setup(
40
+ self, params: list[nn.Parameter], defaults: dict[str, Any]
41
+ ) -> list[dict[str, Any]]:
42
+ return [{"params": list(params), **defaults}]
37
43
 
38
44
  def add_param_group(self, param_group: dict[str, Any]) -> None:
39
- for group in self.param_groups:
40
- if set(group["params"]) & set(param_group["params"]):
41
- raise ValueError(
42
- "Some parameters appear in more than one parameter group."
45
+ if "params" not in param_group:
46
+ raise ValueError("param_group must have a 'params' key.")
47
+
48
+ params = list(param_group["params"])
49
+ if len(params) == 0:
50
+ raise ValueError("param_group['params'] must be non-empty.")
51
+
52
+ for p in params:
53
+ if not isinstance(p, nn.Parameter):
54
+ raise TypeError(
55
+ f"Expected nn.Parameter in param_group, got {type(p).__name__}."
43
56
  )
44
- self.param_groups.append(param_group)
45
57
 
46
- def state_dict(self) -> OrderedDict:
47
- return {
48
- "state": copy.deepcopy(self.state),
49
- "param_groups": copy.deepcopy(self.param_groups),
58
+ existing = set()
59
+ for g in self.param_groups:
60
+ existing.update(g["params"])
61
+
62
+ if any(p in existing for p in params):
63
+ raise ValueError("Some parameters appear in more than one parameter group.")
64
+
65
+ filled = {
66
+ **self.defaults,
67
+ **{k: v for k, v in param_group.items() if k != "params"},
50
68
  }
69
+ filled["params"] = params
70
+ self.param_groups.append(filled)
71
+
72
+ def _flat_params(self) -> list[nn.Parameter]:
73
+ flat: list[nn.Parameter] = []
74
+ for g in self.param_groups:
75
+ flat.extend(g["params"])
76
+
77
+ return flat
78
+
79
+ def state_dict(self) -> dict:
80
+ param_to_idx: dict[nn.Parameter, int] = {}
81
+ for idx, p in enumerate(self._flat_params()):
82
+ if p not in param_to_idx:
83
+ param_to_idx[p] = idx
84
+
85
+ packed_state: dict[int, dict[str, Any]] = {}
86
+ for p, st in self.state.items():
87
+ if p in param_to_idx:
88
+ packed_state[param_to_idx[p]] = copy.deepcopy(st)
89
+
90
+ packed_groups: list[dict[str, Any]] = []
91
+ for g in self.param_groups:
92
+ new_g: dict[str, Any] = {}
93
+
94
+ for k, v in g.items():
95
+ if k == "params":
96
+ new_g[k] = [param_to_idx[p] for p in v]
97
+ else:
98
+ new_g[k] = copy.deepcopy(v)
99
+
100
+ packed_groups.append(new_g)
101
+
102
+ return {"state": packed_state, "param_groups": packed_groups}
103
+
104
+ def load_state_dict(self, state_dict: dict) -> None:
105
+ if (
106
+ not isinstance(state_dict, dict)
107
+ or "state" not in state_dict
108
+ or "param_groups" not in state_dict
109
+ ):
110
+ raise TypeError("Invalid state_dict format for Optimizer.")
111
+
112
+ saved_groups = state_dict["param_groups"]
113
+ saved_state = state_dict["state"]
114
+
115
+ current_params = self._flat_params()
116
+ n_current = len(current_params)
117
+
118
+ new_groups: list[dict[str, Any]] = []
119
+ for sg in saved_groups:
120
+ if "params" not in sg:
121
+ raise KeyError("Saved param_group missing 'params'.")
122
+ indices: list[int] = list(sg["params"])
123
+
124
+ if any(i < 0 or i >= n_current for i in indices):
125
+ raise IndexError("Saved state refers to parameter index out of range.")
126
+
127
+ params = [current_params[i] for i in indices]
128
+ ng = {
129
+ k: (params if k == "params" else copy.deepcopy(v))
130
+ for k, v in sg.items()
131
+ }
132
+ new_groups.append(ng)
133
+
134
+ self.param_groups = new_groups
51
135
 
52
- def load_state_dict(self, state_dict: OrderedDict) -> None:
53
- self.state = defaultdict(dict, copy.deepcopy(state_dict["state"]))
54
- self.param_groups = copy.deepcopy(state_dict["param_groups"])
136
+ self.state = defaultdict(dict)
137
+ for i, p in enumerate(self._flat_params()):
138
+ if i in saved_state:
139
+ self.state[p] = copy.deepcopy(saved_state[i])
55
140
 
56
141
  def __repr__(self) -> str:
57
- return f"{type(self).__name__}({self.param_groups})"
142
+ return f"{type(self).__name__}({self.defaults})"
@@ -8,56 +8,78 @@ class LRScheduler(ABC):
8
8
  def __init__(
9
9
  self, optimizer: Optimizer, last_epoch: int = -1, verbose: bool = False
10
10
  ) -> None:
11
- super().__init__()
12
11
  if not hasattr(optimizer, "param_groups"):
13
12
  raise TypeError(f"{type(optimizer).__name__} is not a valid optimizer.")
14
13
 
15
14
  self.optimizer = optimizer
16
15
  self.last_epoch = last_epoch
17
16
  self.verbose = verbose
18
- self.base_lrs = [group["lr"] for group in optimizer.param_groups]
17
+ self.base_lrs: list[float] = [float(g["lr"]) for g in optimizer.param_groups]
19
18
 
20
19
  self._step_count = 0
21
- self._last_lr = [group["lr"] for group in optimizer.param_groups]
20
+ self._last_lr: list[float] = [float(g["lr"]) for g in optimizer.param_groups]
22
21
 
23
22
  @abstractmethod
24
23
  def get_lr(self) -> list[float]:
25
- raise NotImplementedError("get_lr must be implemented in subclasses.")
24
+ raise NotImplementedError
26
25
 
27
26
  def step(self, epoch: int | None = None) -> None:
28
- if epoch is not None:
29
- self.last_epoch = epoch
27
+ if epoch is None:
28
+ self.last_epoch += 1
30
29
  else:
31
- self._step_count += 1
32
- self.last_epoch = self._step_count
30
+ self.last_epoch = int(epoch)
31
+ self._step_count += 1
33
32
 
34
33
  new_lrs = self.get_lr()
35
- for param_group, lr in zip(self.optimizer.param_groups, new_lrs):
36
- param_group["lr"] = lr
34
+ if len(new_lrs) != len(self.optimizer.param_groups):
35
+ raise ValueError(
36
+ f"get_lr returned {len(new_lrs)} values, "
37
+ f"but optimizer has {len(self.optimizer.param_groups)} param groups."
38
+ )
37
39
 
38
- self._last_lr = new_lrs
40
+ for group, lr in zip(self.optimizer.param_groups, new_lrs):
41
+ group["lr"] = float(lr)
42
+
43
+ self._last_lr = [float(g["lr"]) for g in self.optimizer.param_groups]
39
44
 
40
45
  if self.verbose:
41
- print(f"Epoch {self.last_epoch}: setting learning rates to {new_lrs}.")
46
+ print(
47
+ f"Epoch {self.last_epoch}: setting learning rates to {self._last_lr}."
48
+ )
42
49
 
43
50
  def state_dict(self) -> dict[str, Any]:
44
51
  return {
45
- "last_epoch": self.last_epoch,
46
- "base_lrs": self.base_lrs,
47
- "_step_count": self._step_count,
48
- "_last_lr": self._last_lr,
52
+ "last_epoch": int(self.last_epoch),
53
+ "base_lrs": [float(x) for x in self.base_lrs],
54
+ "_step_count": int(self._step_count),
55
+ "_last_lr": [float(x) for x in self._last_lr],
56
+ "_group_count": len(self.optimizer.param_groups),
49
57
  }
50
58
 
51
59
  def load_state_dict(self, state_dict: dict[str, Any]) -> None:
52
- self.last_epoch = state_dict["last_epoch"]
53
- self.base_lrs = state_dict["base_lrs"]
60
+ required = {"last_epoch", "base_lrs", "_step_count", "_last_lr"}
61
+ missing = required - set(state_dict)
62
+ if missing:
63
+ raise KeyError(f"Missing keys in scheduler state_dict: {missing}")
64
+
65
+ saved_group_count = int(
66
+ state_dict.get("_group_count", len(state_dict["_last_lr"]))
67
+ )
68
+ current_group_count = len(self.optimizer.param_groups)
69
+ if saved_group_count != current_group_count:
70
+ raise ValueError(
71
+ "Cannot load scheduler state: param group count mismatch "
72
+ f"(saved={saved_group_count}, current={current_group_count})."
73
+ )
54
74
 
55
- self._step_count = state_dict["_step_count"]
56
- self._last_lr = state_dict["_last_lr"]
75
+ self.last_epoch = int(state_dict["last_epoch"])
76
+ self.base_lrs = [float(x) for x in state_dict["base_lrs"]]
77
+ self._step_count = int(state_dict["_step_count"])
78
+ self._last_lr = [float(x) for x in state_dict["_last_lr"]]
57
79
 
58
- for param_group, lr in zip(self.optimizer.param_groups, self._last_lr):
59
- param_group["lr"] = lr
80
+ for group, lr in zip(self.optimizer.param_groups, self._last_lr):
81
+ group["lr"] = float(lr)
60
82
 
61
83
  @property
62
84
  def last_lr(self) -> list[float]:
63
- return self._last_lr
85
+ return list(self._last_lr)
lucid/port.py CHANGED
@@ -5,14 +5,13 @@ from typing import Literal
5
5
 
6
6
  from lucid._tensor import Tensor
7
7
  from lucid.nn import Module
8
- from lucid.types import _NumPyArray
9
8
 
10
9
 
11
10
  __all__ = ["save", "load"]
12
11
 
13
- _LucidPortable = Tensor | Module | OrderedDict
12
+ _LucidPortable = Tensor | Module | OrderedDict | dict
14
13
 
15
- FORMAT_VERSION: float = 1.0
14
+ FORMAT_VERSION: float = 1.1
16
15
 
17
16
  EXTENSIONS = Literal[".lct", ".lcd", ".safetensors"]
18
17
 
@@ -30,7 +29,7 @@ def save(obj: _LucidPortable, path: Path | str, safetensors: bool = False) -> Pa
30
29
  if path.suffix == "":
31
30
  if isinstance(obj, Tensor):
32
31
  path = path.with_suffix(".lct")
33
- elif isinstance(obj, (Module, OrderedDict)):
32
+ elif isinstance(obj, (Module, OrderedDict, dict)):
34
33
  path = (
35
34
  path.with_suffix(".safetensors")
36
35
  if safetensors
@@ -56,10 +55,14 @@ def save(obj: _LucidPortable, path: Path | str, safetensors: bool = False) -> Pa
56
55
  elif suffix == ".lcd":
57
56
  if isinstance(obj, Module):
58
57
  obj = obj.state_dict()
59
- if not isinstance(obj, OrderedDict):
60
- raise TypeError("Expected a state_dict (OrderedDict) for .lcd file.")
58
+ if not isinstance(obj, (OrderedDict, dict)):
59
+ raise TypeError("Expected a state_dict for .lcd file.")
61
60
 
62
- data = {"type": "OrderedDict", "format_version": FORMAT_VERSION, "content": obj}
61
+ data = {
62
+ "type": type(obj).__name__,
63
+ "format_version": FORMAT_VERSION,
64
+ "content": obj,
65
+ }
63
66
 
64
67
  elif suffix == ".safetensors":
65
68
  try:
@@ -72,10 +75,8 @@ def save(obj: _LucidPortable, path: Path | str, safetensors: bool = False) -> Pa
72
75
 
73
76
  if isinstance(obj, Module):
74
77
  obj = obj.state_dict()
75
- if not isinstance(obj, OrderedDict):
76
- raise TypeError(
77
- "Expected a state_dict (OrderedDict) for .safetensors file."
78
- )
78
+ if not isinstance(obj, (OrderedDict, dict)):
79
+ raise TypeError("Expected a state_dict for .safetensors file.")
79
80
 
80
81
  save_file(obj, str(path))
81
82
  return path.resolve()
@@ -122,7 +123,7 @@ def load(path: Path | str) -> _LucidPortable:
122
123
  array = data["content"]
123
124
  return Tensor(array)
124
125
 
125
- elif file_type == "OrderedDict":
126
+ elif file_type in {"OrderedDict", "dict"}:
126
127
  return data["content"]
127
128
 
128
129
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lucid-dl
3
- Version: 2.7.5
3
+ Version: 2.7.7
4
4
  Summary: Lumerico's Comprehensive Interface for Deep Learning
5
5
  Home-page: https://github.com/ChanLumerico/lucid
6
6
  Author: ChanLumerico
@@ -1,6 +1,6 @@
1
1
  lucid/__init__.py,sha256=NzXUnsIgDEq0Yx146zxhqID44X_0yNirkxt68NuL6Kg,7900
2
2
  lucid/error.py,sha256=qnTiVuZm3c5-DIt-OOyobZ7RUm7E1K4NR0j998LG1ug,709
3
- lucid/port.py,sha256=Ld5h6F3U6qNA-PSpxuPhbYeyqhZhMVCfTBXaT0xLcW0,3814
3
+ lucid/port.py,sha256=Kt1YaSWef_eKF4KRj-UFhirvFC5urEESfYQ_BSlBZGE,3811
4
4
  lucid/types.py,sha256=3yj99eZNv8N3bupP4htHU_SEvrqNyRuq2k7WGLVD-X0,3605
5
5
  lucid/_backend/__init__.py,sha256=n1bnYdeb_bNDBKASWGywTRa0Ne9hMAkal3AuVZJgovI,5
6
6
  lucid/_backend/core.py,sha256=xMyhNoEswIxCr69wTNo3QXtNVUfX7xsmDP4t0JRlRzQ,5798
@@ -71,10 +71,11 @@ lucid/models/objdet/yolo/yolo_v3.py,sha256=B5U42Npwfg8nSgU9E261zf0cbQS9RVYrX1ADD
71
71
  lucid/models/objdet/yolo/yolo_v4.py,sha256=RFbBumreXmy6s8IYZvUuhW0893ss8sx_8Vgi6KbBKWo,21467
72
72
  lucid/models/seq2seq/__init__.py,sha256=wjsrhj4H_AcqwwbebAN8b68QBA8L6p1_12dkG2995-w,27
73
73
  lucid/models/seq2seq/transformer.py,sha256=y5rerCs1s6jXTsVvbgscWScKpQKuSu1fezsBe7PNTRA,3513
74
- lucid/nn/__init__.py,sha256=Kc6_wlpWo0_AtywX8aEWtzjKb0ju2c2cKGNsEY9ho4E,153
74
+ lucid/nn/__init__.py,sha256=_hk6KltQIJuWXowXstMSu3TjiaTP8zMLNvGpjnA9Mpw,182
75
75
  lucid/nn/fused.py,sha256=ZGOQmDThaGNQLC59y3M7s993K_K09ce6IZP8cFX8FUE,5498
76
- lucid/nn/module.py,sha256=PeMVEdGYg5bVgRVQUdoTTT5zrZ-irt2vyuWQN6xCaBI,22048
76
+ lucid/nn/module.py,sha256=XvFWJ8NqXeZpr3RmKBQBz5eqT535Oi_7DaPN1Zi9gJc,21971
77
77
  lucid/nn/parameter.py,sha256=jDaWukWecCcH9ri65SefNls66MmyTyucFolWbzSjapc,856
78
+ lucid/nn/util.py,sha256=Yw1iBSPrGV_r_F51qpqLYdafNE_hyaA0DPWYP-rjaig,1699
78
79
  lucid/nn/functional/__init__.py,sha256=90Zi7jClPOiiSYx-Qkg0QTideKD6GigbWON9eFCoxzg,13869
79
80
  lucid/nn/functional/_activation.py,sha256=nQVwArvPuwkUpLMLCNABTw96Zgw9VsPB8SyXCL6t2LM,1331
80
81
  lucid/nn/functional/_attention.py,sha256=nrZF3-2AR03kNo1PGNszujhWlAVcab_FNQwOCWZT47I,946
@@ -102,13 +103,13 @@ lucid/nn/modules/sparse.py,sha256=EpjiviED2nI55wUjh1twFwa4Lvlrzw0TR6lpCDGeSbo,11
102
103
  lucid/nn/modules/transformer.py,sha256=z56emF_eX18pxRELjfmmsY-7Bn9h2yjIdxCaxs6YDwA,11246
103
104
  lucid/nn/modules/vision.py,sha256=8xYasT7TNj4NXwMwwJIw1nbV1paeWEFg_ZohXn9kZBg,1579
104
105
  lucid/optim/__init__.py,sha256=21EcCCPwrhPGP9TXvDje075_S2hPr0pHToygCaq8keI,201
105
- lucid/optim/_base.py,sha256=yHCjOw9AotqDl64nJyvYl6lVwGo4MpgPHwH_YAF9pKw,2022
106
+ lucid/optim/_base.py,sha256=KxM5h5ONeO8hCpAzD2_vverFRKeymu2XC6AHN_L_v3g,4859
106
107
  lucid/optim/ada.py,sha256=POIl7dbv3qqwKxGGaceSrs-lZF1tD-vyvDxjtZdx--E,5807
107
108
  lucid/optim/adam.py,sha256=pVlZIcXD1s-IYK-WAfFognId8RhxzmlS5227-i0Vhq4,10347
108
109
  lucid/optim/prop.py,sha256=CbsWmoBb_g_8z16M3T6dMoSR9c72hm8M375IT1UHjpw,4740
109
110
  lucid/optim/sgd.py,sha256=DBZ1ZXQ9TfKZCRECfNRMDH9mvqUWCOPdY5TobnVxpz8,4477
110
111
  lucid/optim/lr_scheduler/__init__.py,sha256=kUoyN2g9nwTtEAqEVij832WSRvzEpKZywSJdfD7MQvY,58
111
- lucid/optim/lr_scheduler/_base.py,sha256=Qvw0OCaZQJaAmgkZbfqqYBb1sSKx0rE2obGHhN4u35E,2019
112
+ lucid/optim/lr_scheduler/_base.py,sha256=NNJnjwmJpsRXathrbLtH4tjfBHtwOiJ5HwF1_S6Ym5c,3092
112
113
  lucid/optim/lr_scheduler/_schedulers.py,sha256=OIzduTXV6Ut4qcvw6neMPr3jlv6BgTSsys0-6KoHxK4,8140
113
114
  lucid/random/__init__.py,sha256=s8EAaKhEiTKT_vYjP4IFHx0xQVa1jqc_qIyvMauUu7M,2727
114
115
  lucid/random/_func.py,sha256=1Lu4m-ciEK037chNDGqv_j00RgGGzQ7UfslSfYActUk,2232
@@ -119,8 +120,8 @@ lucid/visual/__init__.py,sha256=6TuFDfmXTwpLyHl7_KqBfdzW6zqHjGzIFvymjFPlvjI,21
119
120
  lucid/visual/graph.py,sha256=YjpIDM_lloZARw3sCBiXPl_hT5A2gTk2fEHvwvJWXTk,4599
120
121
  lucid/weights/__init__.py,sha256=z1AikA3rOEeckWGkYWlcZkxNlJo9Xwa39PL6ly3hWnc,8801
121
122
  lucid/weights/__init__.pyi,sha256=lFonYC3cUx2Idolf3AEPnjFcyqcn3UDU84oJlZafqLY,3013
122
- lucid_dl-2.7.5.dist-info/licenses/LICENSE,sha256=vxRFYnVD1IeYtsvw-KmoElfqrjxKHv1h9YTvsG54loQ,1065
123
- lucid_dl-2.7.5.dist-info/METADATA,sha256=TFYQJ8ntSJLvF7J3qoAWsR7K95-DtnI-uprRQbQWzvM,11260
124
- lucid_dl-2.7.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
125
- lucid_dl-2.7.5.dist-info/top_level.txt,sha256=uzP_qBx9iNWIHKJRlElYcBLYVqMpdm9Q1Ma63QPYbFc,6
126
- lucid_dl-2.7.5.dist-info/RECORD,,
123
+ lucid_dl-2.7.7.dist-info/licenses/LICENSE,sha256=vxRFYnVD1IeYtsvw-KmoElfqrjxKHv1h9YTvsG54loQ,1065
124
+ lucid_dl-2.7.7.dist-info/METADATA,sha256=UVi3p93MdqSoYHvUkdil5C12wx5sfZy5Obw4Hkd7pxs,11260
125
+ lucid_dl-2.7.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
126
+ lucid_dl-2.7.7.dist-info/top_level.txt,sha256=uzP_qBx9iNWIHKJRlElYcBLYVqMpdm9Q1Ma63QPYbFc,6
127
+ lucid_dl-2.7.7.dist-info/RECORD,,