sae-lens 6.7.0__py3-none-any.whl → 6.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sae_lens/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
1
  # ruff: noqa: E402
2
- __version__ = "6.7.0"
2
+ __version__ = "6.9.0"
3
3
 
4
4
  import logging
5
5
 
@@ -1,14 +1,10 @@
1
- import asyncio
2
1
  import json
3
- import os
4
2
  import urllib.parse
5
3
  import webbrowser
6
- from datetime import datetime
7
- from typing import Any, TypeVar
4
+ from typing import Any
8
5
 
9
6
  import requests
10
7
  from dotenv import load_dotenv
11
- from tenacity import retry, stop_after_attempt, wait_random_exponential
12
8
 
13
9
  from sae_lens import SAE, logger
14
10
 
@@ -126,47 +122,6 @@ class NeuronpediaFeature:
126
122
  return any(max(activation.act_values) > 0 for activation in self.activations)
127
123
 
128
124
 
129
- T = TypeVar("T")
130
-
131
-
132
- @retry(wait=wait_random_exponential(min=1, max=500), stop=stop_after_attempt(10))
133
- def sleep_identity(x: T) -> T:
134
- """Dummy function for retrying."""
135
- return x
136
-
137
-
138
- @retry(wait=wait_random_exponential(min=1, max=500), stop=stop_after_attempt(10))
139
- async def simulate_and_score( # type: ignore
140
- simulator: Any,
141
- activation_records: list[Any],
142
- ) -> Any:
143
- """Score an explanation of a neuron by how well it predicts activations on the given text sequences."""
144
- try:
145
- from neuron_explainer.explanations.scoring import (
146
- _simulate_and_score_sequence,
147
- aggregate_scored_sequence_simulations,
148
- )
149
- except ImportError as e:
150
- raise ImportError(
151
- "The neuron_explainer package is required to use this function. "
152
- "Please install SAELens with the neuronpedia optional dependencies: "
153
- "pip install sae-lens[neuronpedia]"
154
- ) from e
155
-
156
- scored_sequence_simulations = await asyncio.gather(
157
- *[
158
- sleep_identity(
159
- _simulate_and_score_sequence(
160
- simulator,
161
- activation_record,
162
- )
163
- )
164
- for activation_record in activation_records
165
- ]
166
- )
167
- return aggregate_scored_sequence_simulations(scored_sequence_simulations)
168
-
169
-
170
125
  def make_neuronpedia_list_with_features(
171
126
  api_key: str,
172
127
  list_name: str,
@@ -206,305 +161,3 @@ def test_key(api_key: str):
206
161
  response = requests.post(url, json=body)
207
162
  if response.status_code != 200:
208
163
  raise Exception("Neuronpedia API key is not valid.")
209
-
210
-
211
- async def autointerp_neuronpedia_features( # noqa: C901
212
- features: list[NeuronpediaFeature],
213
- openai_api_key: str | None = None,
214
- autointerp_retry_attempts: int = 3,
215
- autointerp_score_max_concurrent: int = 20,
216
- neuronpedia_api_key: str | None = None,
217
- skip_neuronpedia_api_key_test: bool = False,
218
- do_score: bool = True,
219
- output_dir: str = "neuronpedia_outputs/autointerp",
220
- num_activations_to_use: int = 20,
221
- max_explanation_activation_records: int = 20,
222
- upload_to_neuronpedia: bool = True,
223
- autointerp_explainer_model_name: str = "gpt-4-1106-preview",
224
- autointerp_scorer_model_name: str | None = "gpt-3.5-turbo",
225
- save_to_disk: bool = True,
226
- ):
227
- """
228
- Autointerp Neuronpedia features.
229
-
230
- Args:
231
- features: List of NeuronpediaFeature objects.
232
- openai_api_key: OpenAI API key.
233
- autointerp_retry_attempts: Number of retry attempts for autointerp.
234
- autointerp_score_max_concurrent: Maximum number of concurrent requests for autointerp scoring.
235
- neuronpedia_api_key: Neuronpedia API key.
236
- do_score: Whether to score the features.
237
- output_dir: Output directory for saving the results.
238
- num_activations_to_use: Number of activations to use.
239
- max_explanation_activation_records: Maximum number of activation records for explanation.
240
- upload_to_neuronpedia: Whether to upload the results to Neuronpedia.
241
- autointerp_explainer_model_name: Model name for autointerp explainer.
242
- autointerp_scorer_model_name: Model name for autointerp scorer.
243
-
244
- Returns:
245
- None
246
- """
247
- try:
248
- from neuron_explainer.activations.activation_records import (
249
- calculate_max_activation,
250
- )
251
- from neuron_explainer.activations.activations import ActivationRecord
252
- from neuron_explainer.explanations.calibrated_simulator import (
253
- UncalibratedNeuronSimulator,
254
- )
255
- from neuron_explainer.explanations.explainer import (
256
- HARMONY_V4_MODELS,
257
- ContextSize,
258
- TokenActivationPairExplainer,
259
- )
260
- from neuron_explainer.explanations.few_shot_examples import FewShotExampleSet
261
- from neuron_explainer.explanations.prompt_builder import PromptFormat
262
- from neuron_explainer.explanations.simulator import (
263
- LogprobFreeExplanationTokenSimulator,
264
- )
265
- except ImportError as e:
266
- raise ImportError(
267
- "The automated-interpretability package is required to use autointerp functionality. "
268
- "Please install SAELens with the neuronpedia optional dependencies: "
269
- "pip install sae-lens[neuronpedia]"
270
- ) from e
271
-
272
- logger.info("\n\n")
273
-
274
- if os.getenv("OPENAI_API_KEY") is None:
275
- if openai_api_key is None:
276
- raise Exception(
277
- "You need to provide an OpenAI API key either in environment variable OPENAI_API_KEY or as an argument."
278
- )
279
- os.environ["OPENAI_API_KEY"] = openai_api_key
280
-
281
- if autointerp_explainer_model_name not in HARMONY_V4_MODELS:
282
- raise Exception(
283
- f"Invalid explainer model name: {autointerp_explainer_model_name}. Must be one of: {HARMONY_V4_MODELS}"
284
- )
285
-
286
- if do_score and autointerp_scorer_model_name not in HARMONY_V4_MODELS:
287
- raise Exception(
288
- f"Invalid scorer model name: {autointerp_scorer_model_name}. Must be one of: {HARMONY_V4_MODELS}"
289
- )
290
-
291
- if upload_to_neuronpedia:
292
- if neuronpedia_api_key is None:
293
- raise Exception(
294
- "You need to provide a Neuronpedia API key to upload the results to Neuronpedia."
295
- )
296
- if not skip_neuronpedia_api_key_test:
297
- test_key(neuronpedia_api_key)
298
-
299
- logger.info("\n\n=== Step 1) Fetching features from Neuronpedia")
300
- for feature in features:
301
- feature_data = get_neuronpedia_feature(
302
- feature=feature.feature,
303
- layer=feature.layer,
304
- model=feature.modelId,
305
- dataset=feature.dataset,
306
- )
307
-
308
- if "modelId" not in feature_data:
309
- raise Exception(
310
- f"Feature {feature.feature} in layer {feature.layer} of model {feature.modelId} and dataset {feature.dataset} does not exist."
311
- )
312
-
313
- if "activations" not in feature_data or len(feature_data["activations"]) == 0:
314
- raise Exception(
315
- f"Feature {feature.feature} in layer {feature.layer} of model {feature.modelId} and dataset {feature.dataset} does not have activations."
316
- )
317
-
318
- activations = feature_data["activations"]
319
- activations_to_add = []
320
- for activation in activations:
321
- if len(activations_to_add) < num_activations_to_use:
322
- activations_to_add.append(
323
- NeuronpediaActivation(
324
- id=activation["id"],
325
- tokens=activation["tokens"],
326
- act_values=activation["values"],
327
- )
328
- )
329
- feature.activations = activations_to_add
330
-
331
- if not feature.has_activating_text():
332
- raise Exception(
333
- f"Feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature} appears dead - it does not have activating text."
334
- )
335
-
336
- for iteration_num, feature in enumerate(features):
337
- start_time = datetime.now()
338
-
339
- logger.info(
340
- f"\n========== Feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature} ({iteration_num + 1} of {len(features)} Features) =========="
341
- )
342
- logger.info(
343
- f"\n=== Step 2) Explaining feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature}"
344
- )
345
-
346
- if feature.activations is None:
347
- feature.activations = []
348
- activation_records = [
349
- ActivationRecord(
350
- tokens=activation.tokens, # type: ignore
351
- activations=activation.act_values, # type: ignore
352
- ) # type: ignore
353
- for activation in feature.activations
354
- ]
355
-
356
- activation_records_explaining = activation_records[
357
- :max_explanation_activation_records
358
- ]
359
-
360
- explainer = TokenActivationPairExplainer(
361
- model_name=autointerp_explainer_model_name,
362
- prompt_format=PromptFormat.HARMONY_V4,
363
- context_size=ContextSize.SIXTEEN_K,
364
- max_concurrent=1,
365
- )
366
-
367
- explanations = []
368
- for _ in range(autointerp_retry_attempts):
369
- try:
370
- explanations = await explainer.generate_explanations(
371
- all_activation_records=activation_records_explaining,
372
- max_activation=calculate_max_activation(
373
- activation_records_explaining
374
- ),
375
- num_samples=1,
376
- )
377
- except Exception as e:
378
- logger.error(f"ERROR, RETRYING: {e}")
379
- else:
380
- break
381
- else:
382
- logger.error(
383
- f"ERROR: Failed to explain feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature}"
384
- )
385
-
386
- if len(explanations) != 1:
387
- raise ValueError(
388
- f"Expected exactly one explanation but got {len(explanations)}. This may indicate an issue with the explainer's response."
389
- )
390
- explanation = explanations[0].rstrip(".")
391
- logger.info(
392
- f"===== {autointerp_explainer_model_name}'s explanation: {explanation}"
393
- )
394
- feature.autointerp_explanation = explanation
395
-
396
- scored_simulation = None
397
- if do_score and autointerp_scorer_model_name:
398
- logger.info(
399
- f"\n=== Step 3) Scoring feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature}"
400
- )
401
- logger.info("=== This can take up to 30 seconds.")
402
-
403
- temp_activation_records = [
404
- ActivationRecord(
405
- tokens=[ # type: ignore
406
- token.replace("<|endoftext|>", "<|not_endoftext|>")
407
- .replace(" 55", "_55")
408
- .encode("ascii", errors="backslashreplace")
409
- .decode("ascii")
410
- for token in activation_record.tokens # type: ignore
411
- ],
412
- activations=activation_record.activations, # type: ignore
413
- ) # type: ignore
414
- for activation_record in activation_records
415
- ]
416
-
417
- score = None
418
- scored_simulation = None
419
- for _ in range(autointerp_retry_attempts):
420
- try:
421
- simulator = UncalibratedNeuronSimulator(
422
- LogprobFreeExplanationTokenSimulator(
423
- autointerp_scorer_model_name,
424
- explanation,
425
- json_mode=True,
426
- max_concurrent=autointerp_score_max_concurrent,
427
- few_shot_example_set=FewShotExampleSet.JL_FINE_TUNED,
428
- prompt_format=PromptFormat.HARMONY_V4,
429
- )
430
- )
431
- scored_simulation = await simulate_and_score(
432
- simulator, temp_activation_records
433
- )
434
- score = scored_simulation.get_preferred_score()
435
- except Exception as e:
436
- logger.error(f"ERROR, RETRYING: {e}")
437
- else:
438
- break
439
-
440
- if (
441
- score is None
442
- or scored_simulation is None
443
- or len(scored_simulation.scored_sequence_simulations)
444
- != num_activations_to_use
445
- ):
446
- logger.error(
447
- f"ERROR: Failed to score feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature}. Skipping it."
448
- )
449
- continue
450
- feature.autointerp_explanation_score = score
451
- logger.info(
452
- f"===== {autointerp_scorer_model_name}'s score: {(score * 100):.0f}"
453
- )
454
-
455
- else:
456
- logger.info("=== Step 3) Skipping scoring as instructed.")
457
-
458
- feature_data = {
459
- "modelId": feature.modelId,
460
- "layer": f"{feature.layer}-{feature.dataset}",
461
- "index": feature.feature,
462
- "explanation": feature.autointerp_explanation,
463
- "explanationScore": feature.autointerp_explanation_score,
464
- "explanationModel": autointerp_explainer_model_name,
465
- }
466
- if do_score and autointerp_scorer_model_name and scored_simulation:
467
- feature_data["activations"] = feature.activations
468
- feature_data["simulationModel"] = autointerp_scorer_model_name
469
- feature_data["simulationActivations"] = (
470
- scored_simulation.scored_sequence_simulations
471
- ) # type: ignore
472
- feature_data["simulationScore"] = feature.autointerp_explanation_score
473
- feature_data_str = json.dumps(feature_data, default=vars)
474
-
475
- if save_to_disk:
476
- output_file = f"{output_dir}/{feature.modelId}-{feature.layer}-{feature.dataset}_feature-{feature.feature}_time-{datetime.now().strftime('%Y%m%d-%H%M%S')}.jsonl"
477
- os.makedirs(output_dir, exist_ok=True)
478
- logger.info(f"\n=== Step 4) Saving feature to {output_file}")
479
- with open(output_file, "a") as f:
480
- f.write(feature_data_str)
481
- f.write("\n")
482
- else:
483
- logger.info("\n=== Step 4) Skipping saving to disk.")
484
-
485
- if upload_to_neuronpedia:
486
- logger.info("\n=== Step 5) Uploading feature to Neuronpedia")
487
- upload_data = json.dumps(
488
- {
489
- "feature": feature_data,
490
- },
491
- default=vars,
492
- )
493
- upload_data_json = json.loads(upload_data, parse_constant=NanAndInfReplacer)
494
- url = f"{NEURONPEDIA_DOMAIN}/api/explanation/new"
495
- response = requests.post(
496
- url, json=upload_data_json, headers={"x-api-key": neuronpedia_api_key}
497
- )
498
- if response.status_code != 200:
499
- logger.error(
500
- f"ERROR: Couldn't upload explanation to Neuronpedia: {response.text}"
501
- )
502
- else:
503
- logger.info(
504
- f"===== Uploaded to Neuronpedia: {NEURONPEDIA_DOMAIN}/{feature.modelId}/{feature.layer}-{feature.dataset}/{feature.feature}"
505
- )
506
-
507
- end_time = datetime.now()
508
- logger.info(f"\n========== Time Spent for Feature: {end_time - start_time}\n")
509
-
510
- logger.info("\n\n========== Generation and Upload Complete ==========\n\n")
@@ -14744,4 +14744,88 @@ mwhanna-qwen3-0.6b-transcoders-lowl0:
14744
14744
  neuronpedia: qwen3-0.6b/26-transcoder-hp-lowl0
14745
14745
  - id: layer_27
14746
14746
  path: layer_27.safetensors
14747
- neuronpedia: qwen3-0.6b/27-transcoder-hp-lowl0
14747
+ neuronpedia: qwen3-0.6b/27-transcoder-hp-lowl0
14748
+
14749
+ mntss-gemma-2-2b-2.5m-clt-as-per-layer:
14750
+ conversion_func: mntss_clt_layer_transcoder
14751
+ model: gemma-2-2b
14752
+ repo_id: mntss/clt-gemma-2-2b-2.5M
14753
+ saes:
14754
+ - id: layer_0
14755
+ path: 0
14756
+ neuronpedia: gemma-2-2b/0-clt-hp
14757
+ - id: layer_1
14758
+ path: 1
14759
+ neuronpedia: gemma-2-2b/1-clt-hp
14760
+ - id: layer_2
14761
+ path: 2
14762
+ neuronpedia: gemma-2-2b/2-clt-hp
14763
+ - id: layer_3
14764
+ path: 3
14765
+ neuronpedia: gemma-2-2b/3-clt-hp
14766
+ - id: layer_4
14767
+ path: 4
14768
+ neuronpedia: gemma-2-2b/4-clt-hp
14769
+ - id: layer_5
14770
+ path: 5
14771
+ neuronpedia: gemma-2-2b/5-clt-hp
14772
+ - id: layer_6
14773
+ path: 6
14774
+ neuronpedia: gemma-2-2b/6-clt-hp
14775
+ - id: layer_7
14776
+ path: 7
14777
+ neuronpedia: gemma-2-2b/7-clt-hp
14778
+ - id: layer_8
14779
+ path: 8
14780
+ neuronpedia: gemma-2-2b/8-clt-hp
14781
+ - id: layer_9
14782
+ path: 9
14783
+ neuronpedia: gemma-2-2b/9-clt-hp
14784
+ - id: layer_10
14785
+ path: 10
14786
+ neuronpedia: gemma-2-2b/10-clt-hp
14787
+ - id: layer_11
14788
+ path: 11
14789
+ neuronpedia: gemma-2-2b/11-clt-hp
14790
+ - id: layer_12
14791
+ path: 12
14792
+ neuronpedia: gemma-2-2b/12-clt-hp
14793
+ - id: layer_13
14794
+ path: 13
14795
+ neuronpedia: gemma-2-2b/13-clt-hp
14796
+ - id: layer_14
14797
+ path: 14
14798
+ neuronpedia: gemma-2-2b/14-clt-hp
14799
+ - id: layer_15
14800
+ path: 15
14801
+ neuronpedia: gemma-2-2b/15-clt-hp
14802
+ - id: layer_16
14803
+ path: 16
14804
+ neuronpedia: gemma-2-2b/16-clt-hp
14805
+ - id: layer_17
14806
+ path: 17
14807
+ neuronpedia: gemma-2-2b/17-clt-hp
14808
+ - id: layer_18
14809
+ path: 18
14810
+ neuronpedia: gemma-2-2b/18-clt-hp
14811
+ - id: layer_19
14812
+ path: 19
14813
+ neuronpedia: gemma-2-2b/19-clt-hp
14814
+ - id: layer_20
14815
+ path: 20
14816
+ neuronpedia: gemma-2-2b/20-clt-hp
14817
+ - id: layer_21
14818
+ path: 21
14819
+ neuronpedia: gemma-2-2b/21-clt-hp
14820
+ - id: layer_22
14821
+ path: 22
14822
+ neuronpedia: gemma-2-2b/22-clt-hp
14823
+ - id: layer_23
14824
+ path: 23
14825
+ neuronpedia: gemma-2-2b/23-clt-hp
14826
+ - id: layer_24
14827
+ path: 24
14828
+ neuronpedia: gemma-2-2b/24-clt-hp
14829
+ - id: layer_25
14830
+ path: 25
14831
+ neuronpedia: gemma-2-2b/25-clt-hp
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: sae-lens
3
- Version: 6.7.0
3
+ Version: 6.9.0
4
4
  Summary: Training and Analyzing Sparse Autoencoders (SAEs)
5
5
  License: MIT
6
6
  Keywords: deep-learning,sparse-autoencoders,mechanistic-interpretability,PyTorch
@@ -14,8 +14,6 @@ Classifier: Programming Language :: Python :: 3.12
14
14
  Classifier: Programming Language :: Python :: 3.13
15
15
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
16
16
  Provides-Extra: mamba
17
- Provides-Extra: neuronpedia
18
- Requires-Dist: automated-interpretability (>=0.0.5,<1.0.0) ; extra == "neuronpedia"
19
17
  Requires-Dist: babe (>=0.0.7,<0.0.8)
20
18
  Requires-Dist: datasets (>=3.1.0)
21
19
  Requires-Dist: mamba-lens (>=0.0.4,<0.0.5) ; extra == "mamba"
@@ -1,7 +1,7 @@
1
- sae_lens/__init__.py,sha256=JT234I0wGYcCdf_zNYt-22puv4Tjpz9tuftPnr6SXqM,3588
1
+ sae_lens/__init__.py,sha256=TQtVpMEI_qo11x4o5w2cfOF9pqHeXXtyMCypr3lSRCI,3588
2
2
  sae_lens/analysis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  sae_lens/analysis/hooked_sae_transformer.py,sha256=vRu6JseH1lZaEeILD5bEkQEQ1wYHHDcxD-f2olKmE9Y,14275
4
- sae_lens/analysis/neuronpedia_integration.py,sha256=Fj4gVyaXMGBUxoK0vPeTwGVFr4n40fmfPrRENo4WzPs,19324
4
+ sae_lens/analysis/neuronpedia_integration.py,sha256=Gx1W7hUBEuMoasNcnOnZ1wmqbXDd1pSZ1nqKEya1HQc,4962
5
5
  sae_lens/cache_activations_runner.py,sha256=cNeAtp2JQ_vKbeddZVM-tcPLYyyfTWL8NDna5KQpkLI,12583
6
6
  sae_lens/config.py,sha256=IrjbsKBbaZoFXYrsPJ5xBwIqi9uZJIIFXjV_uoErJaE,28176
7
7
  sae_lens/constants.py,sha256=CSjmiZ-bhjQeVLyRvWxAjBokCgkfM8mnvd7-vxLIWTY,639
@@ -12,7 +12,7 @@ sae_lens/loading/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
12
12
  sae_lens/loading/pretrained_sae_loaders.py,sha256=IVtgxWN0w96ZORnWPYW2ndYWey7e5GpzlpedWF3NJ8k,46818
13
13
  sae_lens/loading/pretrained_saes_directory.py,sha256=4Vn-Jex6SveD7EbxcSOBv8cx1gkPfUMLU1QOP-ww1ZE,3752
14
14
  sae_lens/pretokenize_runner.py,sha256=w0f6SfZLAxbp5eAAKnet8RqUB_DKofZ9RGsoJwFnYbA,7058
15
- sae_lens/pretrained_saes.yaml,sha256=O_FwoOe7fU9_WLEOnMk1IWXRxD4nwzf1tCfbof1r0D0,598578
15
+ sae_lens/pretrained_saes.yaml,sha256=d6FYfWTdVAPlOCM55C1ICS6lF9nWPPVNwjlXCa9p7NU,600468
16
16
  sae_lens/registry.py,sha256=nhy7BPSudSATqW4lo9H_k3Na7sfGHmAf9v-3wpnLL_o,1490
17
17
  sae_lens/saes/__init__.py,sha256=jVwazK8Q6dW5J6_zFXPoNAuBvSxgziQ8eMOjGM3t-X8,1475
18
18
  sae_lens/saes/batchtopk_sae.py,sha256=CyaFG2hMyyDaEaXXrAMJC8wQDW1JoddTKF5mvxxBQKY,3395
@@ -33,7 +33,7 @@ sae_lens/training/types.py,sha256=qSjmGzXf3MLalygG0psnVjmhX_mpLmL47MQtZfe7qxg,81
33
33
  sae_lens/training/upload_saes_to_huggingface.py,sha256=r_WzI1zLtGZ5TzAxuG3xa_8T09j3zXJrWd_vzPsPGkQ,4469
34
34
  sae_lens/tutorial/tsea.py,sha256=fd1am_XXsf2KMbByDapJo-2qlxduKaa62Z2qcQZ3QKU,18145
35
35
  sae_lens/util.py,sha256=mCwLAilGMVo8Scm7CIsCafU7GsfmBvCcjwmloI4Ly7Y,1718
36
- sae_lens-6.7.0.dist-info/LICENSE,sha256=DW6e-hDosiu4CfW0-imI57sV1I5f9UEslpviNQcOAKs,1069
37
- sae_lens-6.7.0.dist-info/METADATA,sha256=K5tIuWNs4ho7aEsNfimzbInpxi-eX_hhRneAGw22xVE,5356
38
- sae_lens-6.7.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
39
- sae_lens-6.7.0.dist-info/RECORD,,
36
+ sae_lens-6.9.0.dist-info/LICENSE,sha256=DW6e-hDosiu4CfW0-imI57sV1I5f9UEslpviNQcOAKs,1069
37
+ sae_lens-6.9.0.dist-info/METADATA,sha256=xqfc_6msZd2ZGEwnLFVBDAK2HdFrvllKwa78bx6iYhs,5244
38
+ sae_lens-6.9.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
39
+ sae_lens-6.9.0.dist-info/RECORD,,