sae-lens 6.6.3__tar.gz → 6.6.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sae_lens-6.6.3 → sae_lens-6.6.5}/PKG-INFO +3 -2
- {sae_lens-6.6.3 → sae_lens-6.6.5}/pyproject.toml +4 -2
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/__init__.py +1 -1
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/analysis/neuronpedia_integration.py +40 -24
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/evals.py +14 -10
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/loading/pretrained_sae_loaders.py +5 -1
- {sae_lens-6.6.3 → sae_lens-6.6.5}/LICENSE +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/README.md +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/analysis/__init__.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/analysis/hooked_sae_transformer.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/cache_activations_runner.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/config.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/constants.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/llm_sae_training_runner.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/load_model.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/loading/__init__.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/loading/pretrained_saes_directory.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/pretokenize_runner.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/pretrained_saes.yaml +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/registry.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/saes/__init__.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/saes/batchtopk_sae.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/saes/gated_sae.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/saes/jumprelu_sae.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/saes/sae.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/saes/standard_sae.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/saes/topk_sae.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/saes/transcoder.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/tokenization_and_batching.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/training/__init__.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/training/activation_scaler.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/training/activations_store.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/training/mixing_buffer.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/training/optim.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/training/sae_trainer.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/training/types.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/training/upload_saes_to_huggingface.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/tutorial/tsea.py +0 -0
- {sae_lens-6.6.3 → sae_lens-6.6.5}/sae_lens/util.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: sae-lens
|
|
3
|
-
Version: 6.6.
|
|
3
|
+
Version: 6.6.5
|
|
4
4
|
Summary: Training and Analyzing Sparse Autoencoders (SAEs)
|
|
5
5
|
License: MIT
|
|
6
6
|
Keywords: deep-learning,sparse-autoencoders,mechanistic-interpretability,PyTorch
|
|
@@ -14,7 +14,8 @@ Classifier: Programming Language :: Python :: 3.12
|
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.13
|
|
15
15
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
16
16
|
Provides-Extra: mamba
|
|
17
|
-
|
|
17
|
+
Provides-Extra: neuronpedia
|
|
18
|
+
Requires-Dist: automated-interpretability (>=0.0.5,<1.0.0) ; extra == "neuronpedia"
|
|
18
19
|
Requires-Dist: babe (>=0.0.7,<0.0.8)
|
|
19
20
|
Requires-Dist: datasets (>=3.1.0)
|
|
20
21
|
Requires-Dist: mamba-lens (>=0.0.4,<0.0.5) ; extra == "mamba"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "sae-lens"
|
|
3
|
-
version = "6.6.
|
|
3
|
+
version = "6.6.5"
|
|
4
4
|
description = "Training and Analyzing Sparse Autoencoders (SAEs)"
|
|
5
5
|
authors = ["Joseph Bloom"]
|
|
6
6
|
readme = "README.md"
|
|
@@ -28,12 +28,12 @@ babe = "^0.0.7"
|
|
|
28
28
|
nltk = "^3.8.1"
|
|
29
29
|
safetensors = ">=0.4.2,<1.0.0"
|
|
30
30
|
mamba-lens = { version = "^0.0.4", optional = true }
|
|
31
|
-
automated-interpretability = ">=0.0.5,<1.0.0"
|
|
32
31
|
python-dotenv = ">=1.0.1"
|
|
33
32
|
pyyaml = "^6.0.1"
|
|
34
33
|
typing-extensions = "^4.10.0"
|
|
35
34
|
simple-parsing = "^0.1.6"
|
|
36
35
|
tenacity = ">=9.0.0"
|
|
36
|
+
automated-interpretability = { version = ">=0.0.5,<1.0.0", optional = true }
|
|
37
37
|
|
|
38
38
|
[tool.poetry.group.dev.dependencies]
|
|
39
39
|
pytest = "^8.0.2"
|
|
@@ -56,9 +56,11 @@ ruff = "^0.7.4"
|
|
|
56
56
|
eai-sparsify = "^1.1.1"
|
|
57
57
|
mike = "^2.0.0"
|
|
58
58
|
trio = "^0.30.0"
|
|
59
|
+
automated-interpretability = "^0.0.13"
|
|
59
60
|
|
|
60
61
|
[tool.poetry.extras]
|
|
61
62
|
mamba = ["mamba-lens"]
|
|
63
|
+
neuronpedia = ["automated-interpretability"]
|
|
62
64
|
|
|
63
65
|
[tool.ruff.lint]
|
|
64
66
|
exclude = ["*.ipynb"]
|
|
@@ -8,27 +8,6 @@ from typing import Any, TypeVar
|
|
|
8
8
|
|
|
9
9
|
import requests
|
|
10
10
|
from dotenv import load_dotenv
|
|
11
|
-
from neuron_explainer.activations.activation_records import calculate_max_activation
|
|
12
|
-
from neuron_explainer.activations.activations import ActivationRecord
|
|
13
|
-
from neuron_explainer.explanations.calibrated_simulator import (
|
|
14
|
-
UncalibratedNeuronSimulator,
|
|
15
|
-
)
|
|
16
|
-
from neuron_explainer.explanations.explainer import (
|
|
17
|
-
HARMONY_V4_MODELS,
|
|
18
|
-
ContextSize,
|
|
19
|
-
TokenActivationPairExplainer,
|
|
20
|
-
)
|
|
21
|
-
from neuron_explainer.explanations.explanations import ScoredSimulation
|
|
22
|
-
from neuron_explainer.explanations.few_shot_examples import FewShotExampleSet
|
|
23
|
-
from neuron_explainer.explanations.prompt_builder import PromptFormat
|
|
24
|
-
from neuron_explainer.explanations.scoring import (
|
|
25
|
-
_simulate_and_score_sequence,
|
|
26
|
-
aggregate_scored_sequence_simulations,
|
|
27
|
-
)
|
|
28
|
-
from neuron_explainer.explanations.simulator import (
|
|
29
|
-
LogprobFreeExplanationTokenSimulator,
|
|
30
|
-
NeuronSimulator,
|
|
31
|
-
)
|
|
32
11
|
from tenacity import retry, stop_after_attempt, wait_random_exponential
|
|
33
12
|
|
|
34
13
|
from sae_lens import SAE, logger
|
|
@@ -158,10 +137,22 @@ def sleep_identity(x: T) -> T:
|
|
|
158
137
|
|
|
159
138
|
@retry(wait=wait_random_exponential(min=1, max=500), stop=stop_after_attempt(10))
|
|
160
139
|
async def simulate_and_score( # type: ignore
|
|
161
|
-
simulator:
|
|
162
|
-
activation_records: list[
|
|
163
|
-
) ->
|
|
140
|
+
simulator: Any,
|
|
141
|
+
activation_records: list[Any],
|
|
142
|
+
) -> Any:
|
|
164
143
|
"""Score an explanation of a neuron by how well it predicts activations on the given text sequences."""
|
|
144
|
+
try:
|
|
145
|
+
from neuron_explainer.explanations.scoring import (
|
|
146
|
+
_simulate_and_score_sequence,
|
|
147
|
+
aggregate_scored_sequence_simulations,
|
|
148
|
+
)
|
|
149
|
+
except ImportError as e:
|
|
150
|
+
raise ImportError(
|
|
151
|
+
"The neuron_explainer package is required to use this function. "
|
|
152
|
+
"Please install SAELens with the neuronpedia optional dependencies: "
|
|
153
|
+
"pip install sae-lens[neuronpedia]"
|
|
154
|
+
) from e
|
|
155
|
+
|
|
165
156
|
scored_sequence_simulations = await asyncio.gather(
|
|
166
157
|
*[
|
|
167
158
|
sleep_identity(
|
|
@@ -253,6 +244,31 @@ async def autointerp_neuronpedia_features( # noqa: C901
|
|
|
253
244
|
Returns:
|
|
254
245
|
None
|
|
255
246
|
"""
|
|
247
|
+
try:
|
|
248
|
+
from neuron_explainer.activations.activation_records import (
|
|
249
|
+
calculate_max_activation,
|
|
250
|
+
)
|
|
251
|
+
from neuron_explainer.activations.activations import ActivationRecord
|
|
252
|
+
from neuron_explainer.explanations.calibrated_simulator import (
|
|
253
|
+
UncalibratedNeuronSimulator,
|
|
254
|
+
)
|
|
255
|
+
from neuron_explainer.explanations.explainer import (
|
|
256
|
+
HARMONY_V4_MODELS,
|
|
257
|
+
ContextSize,
|
|
258
|
+
TokenActivationPairExplainer,
|
|
259
|
+
)
|
|
260
|
+
from neuron_explainer.explanations.few_shot_examples import FewShotExampleSet
|
|
261
|
+
from neuron_explainer.explanations.prompt_builder import PromptFormat
|
|
262
|
+
from neuron_explainer.explanations.simulator import (
|
|
263
|
+
LogprobFreeExplanationTokenSimulator,
|
|
264
|
+
)
|
|
265
|
+
except ImportError as e:
|
|
266
|
+
raise ImportError(
|
|
267
|
+
"The automated-interpretability package is required to use autointerp functionality. "
|
|
268
|
+
"Please install SAELens with the neuronpedia optional dependencies: "
|
|
269
|
+
"pip install sae-lens[neuronpedia]"
|
|
270
|
+
) from e
|
|
271
|
+
|
|
256
272
|
logger.info("\n\n")
|
|
257
273
|
|
|
258
274
|
if os.getenv("OPENAI_API_KEY") is None:
|
|
@@ -718,17 +718,9 @@ def get_recons_loss(
|
|
|
718
718
|
**model_kwargs,
|
|
719
719
|
)
|
|
720
720
|
|
|
721
|
-
def kl(original_logits: torch.Tensor, new_logits: torch.Tensor):
|
|
722
|
-
original_probs = torch.nn.functional.softmax(original_logits, dim=-1)
|
|
723
|
-
log_original_probs = torch.log(original_probs)
|
|
724
|
-
new_probs = torch.nn.functional.softmax(new_logits, dim=-1)
|
|
725
|
-
log_new_probs = torch.log(new_probs)
|
|
726
|
-
kl_div = original_probs * (log_original_probs - log_new_probs)
|
|
727
|
-
return kl_div.sum(dim=-1)
|
|
728
|
-
|
|
729
721
|
if compute_kl:
|
|
730
|
-
recons_kl_div =
|
|
731
|
-
zero_abl_kl_div =
|
|
722
|
+
recons_kl_div = _kl(original_logits, recons_logits)
|
|
723
|
+
zero_abl_kl_div = _kl(original_logits, zero_abl_logits)
|
|
732
724
|
metrics["kl_div_with_sae"] = recons_kl_div
|
|
733
725
|
metrics["kl_div_with_ablation"] = zero_abl_kl_div
|
|
734
726
|
|
|
@@ -740,6 +732,18 @@ def get_recons_loss(
|
|
|
740
732
|
return metrics
|
|
741
733
|
|
|
742
734
|
|
|
735
|
+
def _kl(original_logits: torch.Tensor, new_logits: torch.Tensor):
|
|
736
|
+
# Computes the log-probabilities of the new logits (approximation).
|
|
737
|
+
log_probs_new = torch.nn.functional.log_softmax(new_logits, dim=-1)
|
|
738
|
+
# Computes the probabilities of the original logits (true distribution).
|
|
739
|
+
probs_orig = torch.nn.functional.softmax(original_logits, dim=-1)
|
|
740
|
+
# Compute the KL divergence. torch.nn.functional.kl_div expects the first argument to be the log
|
|
741
|
+
# probabilities of the approximation (new), and the second argument to be the true distribution
|
|
742
|
+
# (original) as probabilities. This computes KL(original || new).
|
|
743
|
+
kl = torch.nn.functional.kl_div(log_probs_new, probs_orig, reduction="none")
|
|
744
|
+
return kl.sum(dim=-1)
|
|
745
|
+
|
|
746
|
+
|
|
743
747
|
def all_loadable_saes() -> list[tuple[str, str, float, float]]:
|
|
744
748
|
all_loadable_saes = []
|
|
745
749
|
saes_directory = get_pretrained_saes_directory()
|
|
@@ -1001,10 +1001,14 @@ def get_sparsify_config_from_disk(
|
|
|
1001
1001
|
layer = int(match.group(1))
|
|
1002
1002
|
hook_name = f"blocks.{layer}.hook_resid_post"
|
|
1003
1003
|
|
|
1004
|
+
d_sae = old_cfg_dict.get("num_latents")
|
|
1005
|
+
if d_sae is None:
|
|
1006
|
+
d_sae = old_cfg_dict["d_in"] * old_cfg_dict["expansion_factor"]
|
|
1007
|
+
|
|
1004
1008
|
cfg_dict: dict[str, Any] = {
|
|
1005
1009
|
"architecture": "standard",
|
|
1006
1010
|
"d_in": old_cfg_dict["d_in"],
|
|
1007
|
-
"d_sae":
|
|
1011
|
+
"d_sae": d_sae,
|
|
1008
1012
|
"dtype": "bfloat16",
|
|
1009
1013
|
"device": device or "cpu",
|
|
1010
1014
|
"model_name": config_dict.get("model", path.parts[-2]),
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|