sae-lens 6.16.3__py3-none-any.whl → 6.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sae_lens/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
1
  # ruff: noqa: E402
2
- __version__ = "6.16.3"
2
+ __version__ = "6.17.0"
3
3
 
4
4
  import logging
5
5
 
@@ -10,7 +10,7 @@ from datasets import Array2D, Dataset, Features, Sequence, Value
10
10
  from datasets.fingerprint import generate_fingerprint
11
11
  from huggingface_hub import HfApi
12
12
  from jaxtyping import Float, Int
13
- from tqdm import tqdm
13
+ from tqdm.auto import tqdm
14
14
  from transformer_lens.HookedTransformer import HookedRootModule
15
15
 
16
16
  from sae_lens import logger
sae_lens/config.py CHANGED
@@ -171,6 +171,7 @@ class LanguageModelSAERunnerConfig(Generic[T_TRAINING_SAE_CONFIG]):
171
171
  n_checkpoints (int): The number of checkpoints to save during training. 0 means no checkpoints.
172
172
  checkpoint_path (str | None): The path to save checkpoints. A unique ID will be appended to this path. Set to None to disable checkpoint saving. (default is "checkpoints")
173
173
  save_final_checkpoint (bool): Whether to include an additional final checkpoint when training is finished. (default is False).
174
+ resume_from_checkpoint (str | None): The path to the checkpoint to resume training from. (default is None).
174
175
  output_path (str | None): The path to save outputs. Set to None to disable output saving. (default is "output")
175
176
  verbose (bool): Whether to print verbose output. (default is True)
176
177
  model_kwargs (dict[str, Any]): Keyword arguments for `model.run_with_cache`
@@ -261,6 +262,7 @@ class LanguageModelSAERunnerConfig(Generic[T_TRAINING_SAE_CONFIG]):
261
262
  checkpoint_path: str | None = "checkpoints"
262
263
  save_final_checkpoint: bool = False
263
264
  output_path: str | None = "output"
265
+ resume_from_checkpoint: str | None = None
264
266
 
265
267
  # Misc
266
268
  verbose: bool = True
sae_lens/constants.py CHANGED
@@ -17,5 +17,6 @@ SAE_WEIGHTS_FILENAME = "sae_weights.safetensors"
17
17
  SAE_CFG_FILENAME = "cfg.json"
18
18
  RUNNER_CFG_FILENAME = "runner_cfg.json"
19
19
  SPARSIFY_WEIGHTS_FILENAME = "sae.safetensors"
20
+ TRAINER_STATE_FILENAME = "trainer_state.pt"
20
21
  ACTIVATIONS_STORE_STATE_FILENAME = "activations_store_state.safetensors"
21
22
  ACTIVATION_SCALER_CFG_FILENAME = "activation_scaler.json"
@@ -16,7 +16,6 @@ from typing_extensions import deprecated
16
16
  from sae_lens import logger
17
17
  from sae_lens.config import HfDataset, LanguageModelSAERunnerConfig
18
18
  from sae_lens.constants import (
19
- ACTIVATIONS_STORE_STATE_FILENAME,
20
19
  RUNNER_CFG_FILENAME,
21
20
  SPARSITY_FILENAME,
22
21
  )
@@ -112,6 +111,7 @@ class LanguageModelSAETrainingRunner:
112
111
  override_dataset: HfDataset | None = None,
113
112
  override_model: HookedRootModule | None = None,
114
113
  override_sae: TrainingSAE[Any] | None = None,
114
+ resume_from_checkpoint: Path | str | None = None,
115
115
  ):
116
116
  if override_dataset is not None:
117
117
  logger.warning(
@@ -153,6 +153,7 @@ class LanguageModelSAETrainingRunner:
153
153
  )
154
154
  else:
155
155
  self.sae = override_sae
156
+
156
157
  self.sae.to(self.cfg.device)
157
158
 
158
159
  def run(self):
@@ -185,6 +186,12 @@ class LanguageModelSAETrainingRunner:
185
186
  cfg=self.cfg.to_sae_trainer_config(),
186
187
  )
187
188
 
189
+ if self.cfg.resume_from_checkpoint is not None:
190
+ logger.info(f"Resuming from checkpoint: {self.cfg.resume_from_checkpoint}")
191
+ trainer.load_trainer_state(self.cfg.resume_from_checkpoint)
192
+ self.sae.load_weights_from_checkpoint(self.cfg.resume_from_checkpoint)
193
+ self.activations_store.load_from_checkpoint(self.cfg.resume_from_checkpoint)
194
+
188
195
  self._compile_if_needed()
189
196
  sae = self.run_trainer_with_interruption_handling(trainer)
190
197
 
@@ -304,9 +311,7 @@ class LanguageModelSAETrainingRunner:
304
311
  if checkpoint_path is None:
305
312
  return
306
313
 
307
- self.activations_store.save(
308
- str(checkpoint_path / ACTIVATIONS_STORE_STATE_FILENAME)
309
- )
314
+ self.activations_store.save_to_checkpoint(checkpoint_path)
310
315
 
311
316
  runner_config = self.cfg.to_dict()
312
317
  with open(checkpoint_path / RUNNER_CFG_FILENAME, "w") as f:
sae_lens/saes/sae.py CHANGED
@@ -21,7 +21,7 @@ import einops
21
21
  import torch
22
22
  from jaxtyping import Float
23
23
  from numpy.typing import NDArray
24
- from safetensors.torch import save_file
24
+ from safetensors.torch import load_file, save_file
25
25
  from torch import nn
26
26
  from transformer_lens.hook_points import HookedRootModule, HookPoint
27
27
  from typing_extensions import deprecated, overload, override
@@ -1018,6 +1018,12 @@ class TrainingSAE(SAE[T_TRAINING_SAE_CONFIG], ABC):
1018
1018
  ) -> type[TrainingSAEConfig]:
1019
1019
  return get_sae_training_class(architecture)[1]
1020
1020
 
1021
+ def load_weights_from_checkpoint(self, checkpoint_path: Path | str) -> None:
1022
+ checkpoint_path = Path(checkpoint_path)
1023
+ state_dict = load_file(checkpoint_path / SAE_WEIGHTS_FILENAME)
1024
+ self.process_state_dict_for_loading(state_dict)
1025
+ self.load_state_dict(state_dict)
1026
+
1021
1027
 
1022
1028
  _blank_hook = nn.Identity()
1023
1029
 
@@ -1,5 +1,6 @@
1
1
  import json
2
2
  from dataclasses import dataclass
3
+ from pathlib import Path
3
4
  from statistics import mean
4
5
 
5
6
  import torch
@@ -51,3 +52,9 @@ class ActivationScaler:
51
52
 
52
53
  with open(file_path, "w") as f:
53
54
  json.dump({"scaling_factor": self.scaling_factor}, f)
55
+
56
+ def load(self, file_path: str | Path):
57
+ """load the state dict from a file in json format"""
58
+ with open(file_path) as f:
59
+ data = json.load(f)
60
+ self.scaling_factor = data["scaling_factor"]
@@ -4,6 +4,7 @@ import json
4
4
  import os
5
5
  import warnings
6
6
  from collections.abc import Generator, Iterator, Sequence
7
+ from pathlib import Path
7
8
  from typing import Any, Literal, cast
8
9
 
9
10
  import datasets
@@ -13,8 +14,8 @@ from huggingface_hub import hf_hub_download
13
14
  from huggingface_hub.utils import HfHubHTTPError
14
15
  from jaxtyping import Float, Int
15
16
  from requests import HTTPError
16
- from safetensors.torch import save_file
17
- from tqdm import tqdm
17
+ from safetensors.torch import load_file, save_file
18
+ from tqdm.auto import tqdm
18
19
  from transformer_lens.hook_points import HookedRootModule
19
20
  from transformers import AutoTokenizer, PreTrainedTokenizerBase
20
21
 
@@ -24,7 +25,7 @@ from sae_lens.config import (
24
25
  HfDataset,
25
26
  LanguageModelSAERunnerConfig,
26
27
  )
27
- from sae_lens.constants import DTYPE_MAP
28
+ from sae_lens.constants import ACTIVATIONS_STORE_STATE_FILENAME, DTYPE_MAP
28
29
  from sae_lens.pretokenize_runner import get_special_token_from_cfg
29
30
  from sae_lens.saes.sae import SAE, T_SAE_CONFIG, T_TRAINING_SAE_CONFIG
30
31
  from sae_lens.tokenization_and_batching import concat_and_batch_sequences
@@ -729,6 +730,48 @@ class ActivationsStore:
729
730
  """save the state dict to a file in safetensors format"""
730
731
  save_file(self.state_dict(), file_path)
731
732
 
733
+ def save_to_checkpoint(self, checkpoint_path: str | Path):
734
+ """Save the state dict to a checkpoint path"""
735
+ self.save(str(Path(checkpoint_path) / ACTIVATIONS_STORE_STATE_FILENAME))
736
+
737
+ def load_from_checkpoint(self, checkpoint_path: str | Path):
738
+ """Load the state dict from a checkpoint path"""
739
+ self.load(str(Path(checkpoint_path) / ACTIVATIONS_STORE_STATE_FILENAME))
740
+
741
+ def load(self, file_path: str):
742
+ """Load the state dict from a file in safetensors format"""
743
+
744
+ state_dict = load_file(file_path)
745
+
746
+ if "n_dataset_processed" in state_dict:
747
+ target_n_dataset_processed = state_dict["n_dataset_processed"].item()
748
+
749
+ # Only fast-forward if needed
750
+
751
+ if target_n_dataset_processed > self.n_dataset_processed:
752
+ logger.info(
753
+ "Fast-forwarding through dataset samples to match checkpoint position"
754
+ )
755
+ samples_to_skip = target_n_dataset_processed - self.n_dataset_processed
756
+
757
+ pbar = tqdm(
758
+ total=samples_to_skip,
759
+ desc="Fast-forwarding through dataset",
760
+ leave=False,
761
+ )
762
+ while target_n_dataset_processed > self.n_dataset_processed:
763
+ start = self.n_dataset_processed
764
+ try:
765
+ # Just consume and ignore the values to fast-forward
766
+ next(self.iterable_sequences)
767
+ except StopIteration:
768
+ logger.warning(
769
+ "Dataset exhausted during fast-forward. Resetting dataset."
770
+ )
771
+ self.iterable_sequences = self._iterate_tokenized_sequences()
772
+ pbar.update(self.n_dataset_processed - start)
773
+ pbar.close()
774
+
732
775
 
733
776
  def validate_pretokenized_dataset_tokenizer(
734
777
  dataset_path: str, model_tokenizer: PreTrainedTokenizerBase
@@ -2,6 +2,8 @@
2
2
  Took the LR scheduler from my previous work: https://github.com/jbloomAus/DecisionTransformerInterpretability/blob/ee55df35cdb92e81d689c72fb9dd5a7252893363/src/decision_transformer/utils.py#L425
3
3
  """
4
4
 
5
+ from typing import Any
6
+
5
7
  import torch.optim as optim
6
8
  import torch.optim.lr_scheduler as lr_scheduler
7
9
 
@@ -150,3 +152,12 @@ class CoefficientScheduler:
150
152
  def value(self) -> float:
151
153
  """Returns the current scalar value."""
152
154
  return self.current_value
155
+
156
+ def state_dict(self) -> dict[str, Any]:
157
+ return {
158
+ "current_step": self.current_step,
159
+ }
160
+
161
+ def load_state_dict(self, state_dict: dict[str, Any]):
162
+ for k in state_dict:
163
+ setattr(self, k, state_dict[k])
@@ -1,4 +1,5 @@
1
1
  import contextlib
2
+ import math
2
3
  from pathlib import Path
3
4
  from typing import Any, Callable, Generic, Protocol
4
5
 
@@ -10,7 +11,11 @@ from tqdm.auto import tqdm
10
11
 
11
12
  from sae_lens import __version__
12
13
  from sae_lens.config import SAETrainerConfig
13
- from sae_lens.constants import ACTIVATION_SCALER_CFG_FILENAME, SPARSITY_FILENAME
14
+ from sae_lens.constants import (
15
+ ACTIVATION_SCALER_CFG_FILENAME,
16
+ SPARSITY_FILENAME,
17
+ TRAINER_STATE_FILENAME,
18
+ )
14
19
  from sae_lens.saes.sae import (
15
20
  T_TRAINING_SAE,
16
21
  T_TRAINING_SAE_CONFIG,
@@ -56,6 +61,7 @@ class SAETrainer(Generic[T_TRAINING_SAE, T_TRAINING_SAE_CONFIG]):
56
61
  data_provider: DataProvider
57
62
  activation_scaler: ActivationScaler
58
63
  evaluator: Evaluator[T_TRAINING_SAE] | None
64
+ coefficient_schedulers: dict[str, CoefficientScheduler]
59
65
 
60
66
  def __init__(
61
67
  self,
@@ -84,7 +90,9 @@ class SAETrainer(Generic[T_TRAINING_SAE, T_TRAINING_SAE_CONFIG]):
84
90
  range(
85
91
  0,
86
92
  cfg.total_training_samples,
87
- cfg.total_training_samples // self.cfg.n_checkpoints,
93
+ math.ceil(
94
+ cfg.total_training_samples / (self.cfg.n_checkpoints + 1)
95
+ ),
88
96
  )
89
97
  )[1:]
90
98
 
@@ -93,11 +101,6 @@ class SAETrainer(Generic[T_TRAINING_SAE, T_TRAINING_SAE_CONFIG]):
93
101
  sae.cfg.d_sae, device=cfg.device
94
102
  )
95
103
  self.n_frac_active_samples = 0
96
- # we don't train the scaling factor (initially)
97
- # set requires grad to false for the scaling factor
98
- for name, param in self.sae.named_parameters():
99
- if "scaling_factor" in name:
100
- param.requires_grad = False
101
104
 
102
105
  self.optimizer = Adam(
103
106
  sae.parameters(),
@@ -210,10 +213,7 @@ class SAETrainer(Generic[T_TRAINING_SAE, T_TRAINING_SAE_CONFIG]):
210
213
  sparsity_path = checkpoint_path / SPARSITY_FILENAME
211
214
  save_file({"sparsity": self.log_feature_sparsity}, sparsity_path)
212
215
 
213
- activation_scaler_path = (
214
- checkpoint_path / ACTIVATION_SCALER_CFG_FILENAME
215
- )
216
- self.activation_scaler.save(str(activation_scaler_path))
216
+ self.save_trainer_state(checkpoint_path)
217
217
 
218
218
  if self.cfg.logger.log_to_wandb:
219
219
  self.cfg.logger.log(
@@ -227,6 +227,44 @@ class SAETrainer(Generic[T_TRAINING_SAE, T_TRAINING_SAE_CONFIG]):
227
227
  if self.save_checkpoint_fn is not None:
228
228
  self.save_checkpoint_fn(checkpoint_path=checkpoint_path)
229
229
 
230
+ def save_trainer_state(self, checkpoint_path: Path) -> None:
231
+ checkpoint_path.mkdir(exist_ok=True, parents=True)
232
+ scheduler_state_dicts = {
233
+ name: scheduler.state_dict()
234
+ for name, scheduler in self.coefficient_schedulers.items()
235
+ }
236
+ torch.save(
237
+ {
238
+ "optimizer": self.optimizer.state_dict(),
239
+ "lr_scheduler": self.lr_scheduler.state_dict(),
240
+ "n_training_samples": self.n_training_samples,
241
+ "n_training_steps": self.n_training_steps,
242
+ "act_freq_scores": self.act_freq_scores,
243
+ "n_forward_passes_since_fired": self.n_forward_passes_since_fired,
244
+ "n_frac_active_samples": self.n_frac_active_samples,
245
+ "started_fine_tuning": self.started_fine_tuning,
246
+ "coefficient_schedulers": scheduler_state_dicts,
247
+ },
248
+ str(checkpoint_path / TRAINER_STATE_FILENAME),
249
+ )
250
+ activation_scaler_path = checkpoint_path / ACTIVATION_SCALER_CFG_FILENAME
251
+ self.activation_scaler.save(str(activation_scaler_path))
252
+
253
+ def load_trainer_state(self, checkpoint_path: Path | str) -> None:
254
+ checkpoint_path = Path(checkpoint_path)
255
+ self.activation_scaler.load(checkpoint_path / ACTIVATION_SCALER_CFG_FILENAME)
256
+ state_dict = torch.load(checkpoint_path / TRAINER_STATE_FILENAME)
257
+ self.optimizer.load_state_dict(state_dict["optimizer"])
258
+ self.lr_scheduler.load_state_dict(state_dict["lr_scheduler"])
259
+ self.n_training_samples = state_dict["n_training_samples"]
260
+ self.n_training_steps = state_dict["n_training_steps"]
261
+ self.act_freq_scores = state_dict["act_freq_scores"]
262
+ self.n_forward_passes_since_fired = state_dict["n_forward_passes_since_fired"]
263
+ self.n_frac_active_samples = state_dict["n_frac_active_samples"]
264
+ self.started_fine_tuning = state_dict["started_fine_tuning"]
265
+ for name, scheduler_state_dict in state_dict["coefficient_schedulers"].items():
266
+ self.coefficient_schedulers[name].load_state_dict(scheduler_state_dict)
267
+
230
268
  def _train_step(
231
269
  self,
232
270
  sae: T_TRAINING_SAE,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sae-lens
3
- Version: 6.16.3
3
+ Version: 6.17.0
4
4
  Summary: Training and Analyzing Sparse Autoencoders (SAEs)
5
5
  License: MIT
6
6
  License-File: LICENSE
@@ -1,12 +1,12 @@
1
- sae_lens/__init__.py,sha256=c1rxG64QCdP4n1LI8Du_dxEn30E1fXXbEfZ0kaZ2JiI,3886
1
+ sae_lens/__init__.py,sha256=gwdputTF_w4CT5j_4kN-DzwDI_EHHrrMmkub75qjOuc,3886
2
2
  sae_lens/analysis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  sae_lens/analysis/hooked_sae_transformer.py,sha256=vRu6JseH1lZaEeILD5bEkQEQ1wYHHDcxD-f2olKmE9Y,14275
4
4
  sae_lens/analysis/neuronpedia_integration.py,sha256=Gx1W7hUBEuMoasNcnOnZ1wmqbXDd1pSZ1nqKEya1HQc,4962
5
- sae_lens/cache_activations_runner.py,sha256=cNeAtp2JQ_vKbeddZVM-tcPLYyyfTWL8NDna5KQpkLI,12583
6
- sae_lens/config.py,sha256=IdRXSKPfYY3hwUovj-u83eep8z52gkJHII0mY0KseYY,28739
7
- sae_lens/constants.py,sha256=CSjmiZ-bhjQeVLyRvWxAjBokCgkfM8mnvd7-vxLIWTY,639
5
+ sae_lens/cache_activations_runner.py,sha256=KFR5SHZt_fy3iLBtHd_aBG-fskAqEtRE-7A-nqy1L-w,12588
6
+ sae_lens/config.py,sha256=YCovmjivhsnBCFggC_kneEgxCwMJtDg-1jZwZuzuL30,28901
7
+ sae_lens/constants.py,sha256=qX12uAE_xkha6hjss_0MGTbakI7gEkJzHABkZaHWQFU,683
8
8
  sae_lens/evals.py,sha256=P0NUsJeGzYxFBiVKhbPzd72IFKY4gH40HHlEZ3jEAmg,39598
9
- sae_lens/llm_sae_training_runner.py,sha256=UHRcLqvtnORsZ7u7ymbrv-Ib2BD84czHBvu03jNbtcE,14834
9
+ sae_lens/llm_sae_training_runner.py,sha256=M7BK55gSFYu2qFQKABHX3c8i46P1LfODCeyHFzGGuqU,15196
10
10
  sae_lens/load_model.py,sha256=C8AMykctj6H7tz_xRwB06-EXj6TfW64PtSJZR5Jxn1Y,8649
11
11
  sae_lens/loading/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  sae_lens/loading/pretrained_sae_loaders.py,sha256=SM4aT8NM6ezYix5c2u7p72Fz2RfvTtf7gw5RdOSKXhc,49846
@@ -19,22 +19,22 @@ sae_lens/saes/batchtopk_sae.py,sha256=x4EbgZl0GUickRPcCmtKNGS2Ra3Uy1Z1OtF2FnrSab
19
19
  sae_lens/saes/gated_sae.py,sha256=qcmM9JwBA8aZR8z_IRHV1_gQX-q_63tKewWXRnhdXuo,8986
20
20
  sae_lens/saes/jumprelu_sae.py,sha256=HHBF1sJ95lZvxwP5vwLSQFKdnJN2KKYK0WAEaLTrta0,13399
21
21
  sae_lens/saes/matryoshka_batchtopk_sae.py,sha256=4_1cVaxk6c6jgJEbxqebtG-cjQNIzaMAfjSPGfR7_VU,6062
22
- sae_lens/saes/sae.py,sha256=vABlwyZ0JtL896xxBGIoqfiByoszIf-e4ggPgz34RL0,38300
22
+ sae_lens/saes/sae.py,sha256=busKFD-XgQU2zo5h34UEXRCYKshEYxUIvtkU28CGdXc,38609
23
23
  sae_lens/saes/standard_sae.py,sha256=9UqYyYtQuThYxXKNaDjYcyowpOx2-7cShG-TeUP6JCQ,5940
24
24
  sae_lens/saes/topk_sae.py,sha256=tzQM5eQFifMe--8_8NUBYWY7hpjQa6A_olNe6U71FE8,21275
25
25
  sae_lens/saes/transcoder.py,sha256=BfLSbTYVNZh-ruGxseZiZJ_acEL6_7QyTdfqUr0lDOg,12156
26
26
  sae_lens/tokenization_and_batching.py,sha256=D_o7cXvRqhT89H3wNzoRymNALNE6eHojBWLdXOUwUGE,5438
27
27
  sae_lens/training/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
- sae_lens/training/activation_scaler.py,sha256=seEE-2Qd2JMHxqgnsNWPt-DGtYGZxWPnOwCGuVNSOtI,1719
29
- sae_lens/training/activations_store.py,sha256=hHY6rW-T7sLq2a8JPEyWdm8leuIRm_MsObZs3jRTZmE,31931
28
+ sae_lens/training/activation_scaler.py,sha256=FzNfgBplLWmyiSlZ6TUvE-nur3lOiGTrlvC97ys8S24,1973
29
+ sae_lens/training/activations_store.py,sha256=vRte4-MrANEfPreTDJNdWazar2jNdD7h1-BxciBqlyk,33876
30
30
  sae_lens/training/mixing_buffer.py,sha256=vDpYG5ZE70szDvBsRKcNHEES3h_WTKJ16qDYk5jPOVA,2015
31
- sae_lens/training/optim.py,sha256=TiI9nbffzXNsI8WjcIsqa2uheW6suxqL_KDDmWXobWI,5312
32
- sae_lens/training/sae_trainer.py,sha256=ESA8FjHmIsLSgU-p5zUDtOkhzLEMFAijrWDmcYow6Rs,15693
31
+ sae_lens/training/optim.py,sha256=bJpqqcK4enkcPvQAJkeH4Ci1LUOlfjIMTv6-IlaAbRA,5588
32
+ sae_lens/training/sae_trainer.py,sha256=zhkabyIKxI_tZTV3_kwz6zMrHZ95Ecr97krmwc-9ffs,17600
33
33
  sae_lens/training/types.py,sha256=1FpLx_Doda9vZpmfm-x1e8wGBYpyhe9Kpb_JuM5nIFM,90
34
34
  sae_lens/training/upload_saes_to_huggingface.py,sha256=r_WzI1zLtGZ5TzAxuG3xa_8T09j3zXJrWd_vzPsPGkQ,4469
35
35
  sae_lens/tutorial/tsea.py,sha256=fd1am_XXsf2KMbByDapJo-2qlxduKaa62Z2qcQZ3QKU,18145
36
36
  sae_lens/util.py,sha256=tCovQ-eZa1L7thPpNDL6PGOJrIMML2yLI5e0EHCOpS8,3309
37
- sae_lens-6.16.3.dist-info/METADATA,sha256=sZBPyQRO8rpyB21LtAfOtfxiXhBQ7RiG8F3IluD_8mw,5318
38
- sae_lens-6.16.3.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
39
- sae_lens-6.16.3.dist-info/licenses/LICENSE,sha256=DW6e-hDosiu4CfW0-imI57sV1I5f9UEslpviNQcOAKs,1069
40
- sae_lens-6.16.3.dist-info/RECORD,,
37
+ sae_lens-6.17.0.dist-info/METADATA,sha256=QpmfC6-EhWZ0-dOPZ8uRWMVEcQ11dmTzjzLNrS7gxhY,5318
38
+ sae_lens-6.17.0.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
39
+ sae_lens-6.17.0.dist-info/licenses/LICENSE,sha256=DW6e-hDosiu4CfW0-imI57sV1I5f9UEslpviNQcOAKs,1069
40
+ sae_lens-6.17.0.dist-info/RECORD,,