sae-lens 6.6.2__tar.gz → 6.6.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sae_lens-6.6.2 → sae_lens-6.6.4}/PKG-INFO +3 -2
- {sae_lens-6.6.2 → sae_lens-6.6.4}/pyproject.toml +4 -2
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/__init__.py +1 -1
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/analysis/neuronpedia_integration.py +40 -24
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/loading/pretrained_sae_loaders.py +16 -5
- {sae_lens-6.6.2 → sae_lens-6.6.4}/LICENSE +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/README.md +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/analysis/__init__.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/analysis/hooked_sae_transformer.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/cache_activations_runner.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/config.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/constants.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/evals.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/llm_sae_training_runner.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/load_model.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/loading/__init__.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/loading/pretrained_saes_directory.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/pretokenize_runner.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/pretrained_saes.yaml +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/registry.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/saes/__init__.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/saes/batchtopk_sae.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/saes/gated_sae.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/saes/jumprelu_sae.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/saes/sae.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/saes/standard_sae.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/saes/topk_sae.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/saes/transcoder.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/tokenization_and_batching.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/training/__init__.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/training/activation_scaler.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/training/activations_store.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/training/mixing_buffer.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/training/optim.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/training/sae_trainer.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/training/types.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/training/upload_saes_to_huggingface.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/tutorial/tsea.py +0 -0
- {sae_lens-6.6.2 → sae_lens-6.6.4}/sae_lens/util.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: sae-lens
|
|
3
|
-
Version: 6.6.
|
|
3
|
+
Version: 6.6.4
|
|
4
4
|
Summary: Training and Analyzing Sparse Autoencoders (SAEs)
|
|
5
5
|
License: MIT
|
|
6
6
|
Keywords: deep-learning,sparse-autoencoders,mechanistic-interpretability,PyTorch
|
|
@@ -14,7 +14,8 @@ Classifier: Programming Language :: Python :: 3.12
|
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.13
|
|
15
15
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
16
16
|
Provides-Extra: mamba
|
|
17
|
-
|
|
17
|
+
Provides-Extra: neuronpedia
|
|
18
|
+
Requires-Dist: automated-interpretability (>=0.0.5,<1.0.0) ; extra == "neuronpedia"
|
|
18
19
|
Requires-Dist: babe (>=0.0.7,<0.0.8)
|
|
19
20
|
Requires-Dist: datasets (>=3.1.0)
|
|
20
21
|
Requires-Dist: mamba-lens (>=0.0.4,<0.0.5) ; extra == "mamba"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "sae-lens"
|
|
3
|
-
version = "6.6.
|
|
3
|
+
version = "6.6.4"
|
|
4
4
|
description = "Training and Analyzing Sparse Autoencoders (SAEs)"
|
|
5
5
|
authors = ["Joseph Bloom"]
|
|
6
6
|
readme = "README.md"
|
|
@@ -28,12 +28,12 @@ babe = "^0.0.7"
|
|
|
28
28
|
nltk = "^3.8.1"
|
|
29
29
|
safetensors = ">=0.4.2,<1.0.0"
|
|
30
30
|
mamba-lens = { version = "^0.0.4", optional = true }
|
|
31
|
-
automated-interpretability = ">=0.0.5,<1.0.0"
|
|
32
31
|
python-dotenv = ">=1.0.1"
|
|
33
32
|
pyyaml = "^6.0.1"
|
|
34
33
|
typing-extensions = "^4.10.0"
|
|
35
34
|
simple-parsing = "^0.1.6"
|
|
36
35
|
tenacity = ">=9.0.0"
|
|
36
|
+
automated-interpretability = { version = ">=0.0.5,<1.0.0", optional = true }
|
|
37
37
|
|
|
38
38
|
[tool.poetry.group.dev.dependencies]
|
|
39
39
|
pytest = "^8.0.2"
|
|
@@ -56,9 +56,11 @@ ruff = "^0.7.4"
|
|
|
56
56
|
eai-sparsify = "^1.1.1"
|
|
57
57
|
mike = "^2.0.0"
|
|
58
58
|
trio = "^0.30.0"
|
|
59
|
+
automated-interpretability = "^0.0.13"
|
|
59
60
|
|
|
60
61
|
[tool.poetry.extras]
|
|
61
62
|
mamba = ["mamba-lens"]
|
|
63
|
+
neuronpedia = ["automated-interpretability"]
|
|
62
64
|
|
|
63
65
|
[tool.ruff.lint]
|
|
64
66
|
exclude = ["*.ipynb"]
|
|
@@ -8,27 +8,6 @@ from typing import Any, TypeVar
|
|
|
8
8
|
|
|
9
9
|
import requests
|
|
10
10
|
from dotenv import load_dotenv
|
|
11
|
-
from neuron_explainer.activations.activation_records import calculate_max_activation
|
|
12
|
-
from neuron_explainer.activations.activations import ActivationRecord
|
|
13
|
-
from neuron_explainer.explanations.calibrated_simulator import (
|
|
14
|
-
UncalibratedNeuronSimulator,
|
|
15
|
-
)
|
|
16
|
-
from neuron_explainer.explanations.explainer import (
|
|
17
|
-
HARMONY_V4_MODELS,
|
|
18
|
-
ContextSize,
|
|
19
|
-
TokenActivationPairExplainer,
|
|
20
|
-
)
|
|
21
|
-
from neuron_explainer.explanations.explanations import ScoredSimulation
|
|
22
|
-
from neuron_explainer.explanations.few_shot_examples import FewShotExampleSet
|
|
23
|
-
from neuron_explainer.explanations.prompt_builder import PromptFormat
|
|
24
|
-
from neuron_explainer.explanations.scoring import (
|
|
25
|
-
_simulate_and_score_sequence,
|
|
26
|
-
aggregate_scored_sequence_simulations,
|
|
27
|
-
)
|
|
28
|
-
from neuron_explainer.explanations.simulator import (
|
|
29
|
-
LogprobFreeExplanationTokenSimulator,
|
|
30
|
-
NeuronSimulator,
|
|
31
|
-
)
|
|
32
11
|
from tenacity import retry, stop_after_attempt, wait_random_exponential
|
|
33
12
|
|
|
34
13
|
from sae_lens import SAE, logger
|
|
@@ -158,10 +137,22 @@ def sleep_identity(x: T) -> T:
|
|
|
158
137
|
|
|
159
138
|
@retry(wait=wait_random_exponential(min=1, max=500), stop=stop_after_attempt(10))
|
|
160
139
|
async def simulate_and_score( # type: ignore
|
|
161
|
-
simulator:
|
|
162
|
-
activation_records: list[
|
|
163
|
-
) ->
|
|
140
|
+
simulator: Any,
|
|
141
|
+
activation_records: list[Any],
|
|
142
|
+
) -> Any:
|
|
164
143
|
"""Score an explanation of a neuron by how well it predicts activations on the given text sequences."""
|
|
144
|
+
try:
|
|
145
|
+
from neuron_explainer.explanations.scoring import (
|
|
146
|
+
_simulate_and_score_sequence,
|
|
147
|
+
aggregate_scored_sequence_simulations,
|
|
148
|
+
)
|
|
149
|
+
except ImportError as e:
|
|
150
|
+
raise ImportError(
|
|
151
|
+
"The neuron_explainer package is required to use this function. "
|
|
152
|
+
"Please install SAELens with the neuronpedia optional dependencies: "
|
|
153
|
+
"pip install sae-lens[neuronpedia]"
|
|
154
|
+
) from e
|
|
155
|
+
|
|
165
156
|
scored_sequence_simulations = await asyncio.gather(
|
|
166
157
|
*[
|
|
167
158
|
sleep_identity(
|
|
@@ -253,6 +244,31 @@ async def autointerp_neuronpedia_features( # noqa: C901
|
|
|
253
244
|
Returns:
|
|
254
245
|
None
|
|
255
246
|
"""
|
|
247
|
+
try:
|
|
248
|
+
from neuron_explainer.activations.activation_records import (
|
|
249
|
+
calculate_max_activation,
|
|
250
|
+
)
|
|
251
|
+
from neuron_explainer.activations.activations import ActivationRecord
|
|
252
|
+
from neuron_explainer.explanations.calibrated_simulator import (
|
|
253
|
+
UncalibratedNeuronSimulator,
|
|
254
|
+
)
|
|
255
|
+
from neuron_explainer.explanations.explainer import (
|
|
256
|
+
HARMONY_V4_MODELS,
|
|
257
|
+
ContextSize,
|
|
258
|
+
TokenActivationPairExplainer,
|
|
259
|
+
)
|
|
260
|
+
from neuron_explainer.explanations.few_shot_examples import FewShotExampleSet
|
|
261
|
+
from neuron_explainer.explanations.prompt_builder import PromptFormat
|
|
262
|
+
from neuron_explainer.explanations.simulator import (
|
|
263
|
+
LogprobFreeExplanationTokenSimulator,
|
|
264
|
+
)
|
|
265
|
+
except ImportError as e:
|
|
266
|
+
raise ImportError(
|
|
267
|
+
"The automated-interpretability package is required to use autointerp functionality. "
|
|
268
|
+
"Please install SAELens with the neuronpedia optional dependencies: "
|
|
269
|
+
"pip install sae-lens[neuronpedia]"
|
|
270
|
+
) from e
|
|
271
|
+
|
|
256
272
|
logger.info("\n\n")
|
|
257
273
|
|
|
258
274
|
if os.getenv("OPENAI_API_KEY") is None:
|
|
@@ -1254,11 +1254,22 @@ def get_mwhanna_transcoder_config_from_hf(
|
|
|
1254
1254
|
wandb_config_path = hf_hub_download(
|
|
1255
1255
|
repo_id, "wandb-config.yaml", force_download=force_download
|
|
1256
1256
|
)
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1257
|
+
try:
|
|
1258
|
+
base_config_path = hf_hub_download(
|
|
1259
|
+
repo_id, "config.yaml", force_download=force_download
|
|
1260
|
+
)
|
|
1261
|
+
with open(base_config_path) as f:
|
|
1262
|
+
base_cfg_info: dict[str, Any] = yaml.safe_load(f)
|
|
1263
|
+
except EntryNotFoundError:
|
|
1264
|
+
# the 14b transcoders don't have a config file for some reason, so just pull the model name from the repo_id
|
|
1265
|
+
qwen_3_size_match = re.search(r"qwen3-(\d+(?:\.\d+)?)b", repo_id)
|
|
1266
|
+
if not qwen_3_size_match:
|
|
1267
|
+
raise ValueError(f"Could not extract Qwen3 size from repo_id: {repo_id}")
|
|
1268
|
+
qwen_3_size = qwen_3_size_match.group(1)
|
|
1269
|
+
base_cfg_info = {
|
|
1270
|
+
"model_name": f"Qwen/Qwen3-{qwen_3_size}B",
|
|
1271
|
+
}
|
|
1272
|
+
|
|
1262
1273
|
with open(wandb_config_path) as f:
|
|
1263
1274
|
wandb_cfg_info: dict[str, Any] = yaml.safe_load(f)
|
|
1264
1275
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|