sae-lens 6.15.0__py3-none-any.whl → 6.24.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sae_lens/__init__.py +13 -1
- sae_lens/analysis/hooked_sae_transformer.py +4 -13
- sae_lens/cache_activations_runner.py +3 -4
- sae_lens/config.py +39 -2
- sae_lens/constants.py +1 -0
- sae_lens/llm_sae_training_runner.py +9 -4
- sae_lens/loading/pretrained_sae_loaders.py +430 -24
- sae_lens/loading/pretrained_saes_directory.py +5 -3
- sae_lens/pretokenize_runner.py +3 -3
- sae_lens/pretrained_saes.yaml +26977 -65
- sae_lens/saes/__init__.py +7 -0
- sae_lens/saes/batchtopk_sae.py +3 -1
- sae_lens/saes/gated_sae.py +6 -11
- sae_lens/saes/jumprelu_sae.py +8 -13
- sae_lens/saes/matryoshka_batchtopk_sae.py +8 -15
- sae_lens/saes/sae.py +20 -32
- sae_lens/saes/standard_sae.py +4 -9
- sae_lens/saes/temporal_sae.py +365 -0
- sae_lens/saes/topk_sae.py +8 -11
- sae_lens/saes/transcoder.py +41 -0
- sae_lens/training/activation_scaler.py +7 -0
- sae_lens/training/activations_store.py +54 -12
- sae_lens/training/optim.py +11 -0
- sae_lens/training/sae_trainer.py +50 -11
- {sae_lens-6.15.0.dist-info → sae_lens-6.24.1.dist-info}/METADATA +16 -16
- sae_lens-6.24.1.dist-info/RECORD +41 -0
- sae_lens-6.15.0.dist-info/RECORD +0 -40
- {sae_lens-6.15.0.dist-info → sae_lens-6.24.1.dist-info}/WHEEL +0 -0
- {sae_lens-6.15.0.dist-info → sae_lens-6.24.1.dist-info}/licenses/LICENSE +0 -0
sae_lens/saes/__init__.py
CHANGED
|
@@ -25,6 +25,7 @@ from .standard_sae import (
|
|
|
25
25
|
StandardTrainingSAE,
|
|
26
26
|
StandardTrainingSAEConfig,
|
|
27
27
|
)
|
|
28
|
+
from .temporal_sae import TemporalSAE, TemporalSAEConfig
|
|
28
29
|
from .topk_sae import (
|
|
29
30
|
TopKSAE,
|
|
30
31
|
TopKSAEConfig,
|
|
@@ -32,6 +33,8 @@ from .topk_sae import (
|
|
|
32
33
|
TopKTrainingSAEConfig,
|
|
33
34
|
)
|
|
34
35
|
from .transcoder import (
|
|
36
|
+
JumpReLUSkipTranscoder,
|
|
37
|
+
JumpReLUSkipTranscoderConfig,
|
|
35
38
|
JumpReLUTranscoder,
|
|
36
39
|
JumpReLUTranscoderConfig,
|
|
37
40
|
SkipTranscoder,
|
|
@@ -69,6 +72,10 @@ __all__ = [
|
|
|
69
72
|
"SkipTranscoderConfig",
|
|
70
73
|
"JumpReLUTranscoder",
|
|
71
74
|
"JumpReLUTranscoderConfig",
|
|
75
|
+
"JumpReLUSkipTranscoder",
|
|
76
|
+
"JumpReLUSkipTranscoderConfig",
|
|
72
77
|
"MatryoshkaBatchTopKTrainingSAE",
|
|
73
78
|
"MatryoshkaBatchTopKTrainingSAEConfig",
|
|
79
|
+
"TemporalSAE",
|
|
80
|
+
"TemporalSAEConfig",
|
|
74
81
|
]
|
sae_lens/saes/batchtopk_sae.py
CHANGED
|
@@ -23,7 +23,9 @@ class BatchTopK(nn.Module):
|
|
|
23
23
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
24
24
|
acts = x.relu()
|
|
25
25
|
flat_acts = acts.flatten()
|
|
26
|
-
|
|
26
|
+
# Calculate total number of samples across all non-feature dimensions
|
|
27
|
+
num_samples = acts.shape[:-1].numel()
|
|
28
|
+
acts_topk_flat = torch.topk(flat_acts, int(self.k * num_samples), dim=-1)
|
|
27
29
|
return (
|
|
28
30
|
torch.zeros_like(flat_acts)
|
|
29
31
|
.scatter(-1, acts_topk_flat.indices, acts_topk_flat.values)
|
sae_lens/saes/gated_sae.py
CHANGED
|
@@ -2,7 +2,6 @@ from dataclasses import dataclass
|
|
|
2
2
|
from typing import Any
|
|
3
3
|
|
|
4
4
|
import torch
|
|
5
|
-
from jaxtyping import Float
|
|
6
5
|
from numpy.typing import NDArray
|
|
7
6
|
from torch import nn
|
|
8
7
|
from typing_extensions import override
|
|
@@ -49,9 +48,7 @@ class GatedSAE(SAE[GatedSAEConfig]):
|
|
|
49
48
|
super().initialize_weights()
|
|
50
49
|
_init_weights_gated(self)
|
|
51
50
|
|
|
52
|
-
def encode(
|
|
53
|
-
self, x: Float[torch.Tensor, "... d_in"]
|
|
54
|
-
) -> Float[torch.Tensor, "... d_sae"]:
|
|
51
|
+
def encode(self, x: torch.Tensor) -> torch.Tensor:
|
|
55
52
|
"""
|
|
56
53
|
Encode the input tensor into the feature space using a gated encoder.
|
|
57
54
|
This must match the original encode_gated implementation from SAE class.
|
|
@@ -72,9 +69,7 @@ class GatedSAE(SAE[GatedSAEConfig]):
|
|
|
72
69
|
# Combine gating and magnitudes
|
|
73
70
|
return self.hook_sae_acts_post(active_features * feature_magnitudes)
|
|
74
71
|
|
|
75
|
-
def decode(
|
|
76
|
-
self, feature_acts: Float[torch.Tensor, "... d_sae"]
|
|
77
|
-
) -> Float[torch.Tensor, "... d_in"]:
|
|
72
|
+
def decode(self, feature_acts: torch.Tensor) -> torch.Tensor:
|
|
78
73
|
"""
|
|
79
74
|
Decode the feature activations back into the input space:
|
|
80
75
|
1) Apply optional finetuning scaling.
|
|
@@ -94,7 +89,7 @@ class GatedSAE(SAE[GatedSAEConfig]):
|
|
|
94
89
|
@torch.no_grad()
|
|
95
90
|
def fold_W_dec_norm(self):
|
|
96
91
|
"""Override to handle gated-specific parameters."""
|
|
97
|
-
W_dec_norms = self.W_dec.norm(dim=-1).unsqueeze(1)
|
|
92
|
+
W_dec_norms = self.W_dec.norm(dim=-1).clamp(min=1e-8).unsqueeze(1)
|
|
98
93
|
self.W_dec.data = self.W_dec.data / W_dec_norms
|
|
99
94
|
self.W_enc.data = self.W_enc.data * W_dec_norms.T
|
|
100
95
|
|
|
@@ -147,8 +142,8 @@ class GatedTrainingSAE(TrainingSAE[GatedTrainingSAEConfig]):
|
|
|
147
142
|
_init_weights_gated(self)
|
|
148
143
|
|
|
149
144
|
def encode_with_hidden_pre(
|
|
150
|
-
self, x:
|
|
151
|
-
) -> tuple[
|
|
145
|
+
self, x: torch.Tensor
|
|
146
|
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
152
147
|
"""
|
|
153
148
|
Gated forward pass with pre-activation (for training).
|
|
154
149
|
"""
|
|
@@ -222,7 +217,7 @@ class GatedTrainingSAE(TrainingSAE[GatedTrainingSAEConfig]):
|
|
|
222
217
|
@torch.no_grad()
|
|
223
218
|
def fold_W_dec_norm(self):
|
|
224
219
|
"""Override to handle gated-specific parameters."""
|
|
225
|
-
W_dec_norms = self.W_dec.norm(dim=-1).unsqueeze(1)
|
|
220
|
+
W_dec_norms = self.W_dec.norm(dim=-1).clamp(min=1e-8).unsqueeze(1)
|
|
226
221
|
self.W_dec.data = self.W_dec.data / W_dec_norms
|
|
227
222
|
self.W_enc.data = self.W_enc.data * W_dec_norms.T
|
|
228
223
|
|
sae_lens/saes/jumprelu_sae.py
CHANGED
|
@@ -3,7 +3,6 @@ from typing import Any, Literal
|
|
|
3
3
|
|
|
4
4
|
import numpy as np
|
|
5
5
|
import torch
|
|
6
|
-
from jaxtyping import Float
|
|
7
6
|
from torch import nn
|
|
8
7
|
from typing_extensions import override
|
|
9
8
|
|
|
@@ -130,9 +129,7 @@ class JumpReLUSAE(SAE[JumpReLUSAEConfig]):
|
|
|
130
129
|
torch.zeros(self.cfg.d_sae, dtype=self.dtype, device=self.device)
|
|
131
130
|
)
|
|
132
131
|
|
|
133
|
-
def encode(
|
|
134
|
-
self, x: Float[torch.Tensor, "... d_in"]
|
|
135
|
-
) -> Float[torch.Tensor, "... d_sae"]:
|
|
132
|
+
def encode(self, x: torch.Tensor) -> torch.Tensor:
|
|
136
133
|
"""
|
|
137
134
|
Encode the input tensor into the feature space using JumpReLU.
|
|
138
135
|
The threshold parameter determines which units remain active.
|
|
@@ -150,9 +147,7 @@ class JumpReLUSAE(SAE[JumpReLUSAEConfig]):
|
|
|
150
147
|
# 3) Multiply the normally activated units by that mask.
|
|
151
148
|
return self.hook_sae_acts_post(base_acts * jump_relu_mask)
|
|
152
149
|
|
|
153
|
-
def decode(
|
|
154
|
-
self, feature_acts: Float[torch.Tensor, "... d_sae"]
|
|
155
|
-
) -> Float[torch.Tensor, "... d_in"]:
|
|
150
|
+
def decode(self, feature_acts: torch.Tensor) -> torch.Tensor:
|
|
156
151
|
"""
|
|
157
152
|
Decode the feature activations back to the input space.
|
|
158
153
|
Follows the same steps as StandardSAE: apply scaling, transform, hook, and optionally reshape.
|
|
@@ -172,8 +167,8 @@ class JumpReLUSAE(SAE[JumpReLUSAEConfig]):
|
|
|
172
167
|
# Save the current threshold before calling parent method
|
|
173
168
|
current_thresh = self.threshold.clone()
|
|
174
169
|
|
|
175
|
-
# Get W_dec norms that will be used for scaling
|
|
176
|
-
W_dec_norms = self.W_dec.norm(dim=-1)
|
|
170
|
+
# Get W_dec norms that will be used for scaling (clamped to avoid division by zero)
|
|
171
|
+
W_dec_norms = self.W_dec.norm(dim=-1).clamp(min=1e-8)
|
|
177
172
|
|
|
178
173
|
# Call parent implementation to handle W_enc, W_dec, and b_enc adjustment
|
|
179
174
|
super().fold_W_dec_norm()
|
|
@@ -265,8 +260,8 @@ class JumpReLUTrainingSAE(TrainingSAE[JumpReLUTrainingSAEConfig]):
|
|
|
265
260
|
return torch.exp(self.log_threshold)
|
|
266
261
|
|
|
267
262
|
def encode_with_hidden_pre(
|
|
268
|
-
self, x:
|
|
269
|
-
) -> tuple[
|
|
263
|
+
self, x: torch.Tensor
|
|
264
|
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
270
265
|
sae_in = self.process_sae_in(x)
|
|
271
266
|
|
|
272
267
|
hidden_pre = sae_in @ self.W_enc + self.b_enc
|
|
@@ -330,8 +325,8 @@ class JumpReLUTrainingSAE(TrainingSAE[JumpReLUTrainingSAEConfig]):
|
|
|
330
325
|
# Save the current threshold before we call the parent method
|
|
331
326
|
current_thresh = self.threshold.clone()
|
|
332
327
|
|
|
333
|
-
# Get W_dec norms
|
|
334
|
-
W_dec_norms = self.W_dec.norm(dim=-1).unsqueeze(1)
|
|
328
|
+
# Get W_dec norms (clamped to avoid division by zero)
|
|
329
|
+
W_dec_norms = self.W_dec.norm(dim=-1).clamp(min=1e-8).unsqueeze(1)
|
|
335
330
|
|
|
336
331
|
# Call parent implementation to handle W_enc and W_dec adjustment
|
|
337
332
|
super().fold_W_dec_norm()
|
|
@@ -2,7 +2,6 @@ import warnings
|
|
|
2
2
|
from dataclasses import dataclass, field
|
|
3
3
|
|
|
4
4
|
import torch
|
|
5
|
-
from jaxtyping import Float
|
|
6
5
|
from typing_extensions import override
|
|
7
6
|
|
|
8
7
|
from sae_lens.saes.batchtopk_sae import (
|
|
@@ -78,14 +77,11 @@ class MatryoshkaBatchTopKTrainingSAE(BatchTopKTrainingSAE):
|
|
|
78
77
|
@override
|
|
79
78
|
def training_forward_pass(self, step_input: TrainStepInput) -> TrainStepOutput:
|
|
80
79
|
base_output = super().training_forward_pass(step_input)
|
|
81
|
-
hidden_pre = base_output.hidden_pre
|
|
82
80
|
inv_W_dec_norm = 1 / self.W_dec.norm(dim=-1)
|
|
83
81
|
# the outer matryoshka level is the base SAE, so we don't need to add an extra loss for it
|
|
84
82
|
for width in self.cfg.matryoshka_widths[:-1]:
|
|
85
|
-
inner_hidden_pre = hidden_pre[:, :width]
|
|
86
|
-
inner_feat_acts = self.activation_fn(inner_hidden_pre)
|
|
87
83
|
inner_reconstruction = self._decode_matryoshka_level(
|
|
88
|
-
|
|
84
|
+
base_output.feature_acts, width, inv_W_dec_norm
|
|
89
85
|
)
|
|
90
86
|
inner_mse_loss = (
|
|
91
87
|
self.mse_loss_fn(inner_reconstruction, step_input.sae_in)
|
|
@@ -98,23 +94,24 @@ class MatryoshkaBatchTopKTrainingSAE(BatchTopKTrainingSAE):
|
|
|
98
94
|
|
|
99
95
|
def _decode_matryoshka_level(
|
|
100
96
|
self,
|
|
101
|
-
feature_acts:
|
|
97
|
+
feature_acts: torch.Tensor,
|
|
102
98
|
width: int,
|
|
103
99
|
inv_W_dec_norm: torch.Tensor,
|
|
104
|
-
) ->
|
|
100
|
+
) -> torch.Tensor:
|
|
105
101
|
"""
|
|
106
102
|
Decodes feature activations back into input space for a matryoshka level
|
|
107
103
|
"""
|
|
104
|
+
inner_feature_acts = feature_acts[:, :width]
|
|
108
105
|
# Handle sparse tensors using efficient sparse matrix multiplication
|
|
109
106
|
if self.cfg.rescale_acts_by_decoder_norm:
|
|
110
107
|
# need to multiply by the inverse of the norm because division is illegal with sparse tensors
|
|
111
|
-
|
|
112
|
-
if
|
|
108
|
+
inner_feature_acts = inner_feature_acts * inv_W_dec_norm[:width]
|
|
109
|
+
if inner_feature_acts.is_sparse:
|
|
113
110
|
sae_out_pre = (
|
|
114
|
-
_sparse_matmul_nd(
|
|
111
|
+
_sparse_matmul_nd(inner_feature_acts, self.W_dec[:width]) + self.b_dec
|
|
115
112
|
)
|
|
116
113
|
else:
|
|
117
|
-
sae_out_pre =
|
|
114
|
+
sae_out_pre = inner_feature_acts @ self.W_dec[:width] + self.b_dec
|
|
118
115
|
sae_out_pre = self.run_time_activation_norm_fn_out(sae_out_pre)
|
|
119
116
|
return self.reshape_fn_out(sae_out_pre, self.d_head)
|
|
120
117
|
|
|
@@ -137,7 +134,3 @@ def _validate_matryoshka_config(cfg: MatryoshkaBatchTopKTrainingSAEConfig) -> No
|
|
|
137
134
|
warnings.warn(
|
|
138
135
|
"WARNING: You have only set one matryoshka level. This is equivalent to using a standard BatchTopK SAE and is likely not what you want."
|
|
139
136
|
)
|
|
140
|
-
if cfg.matryoshka_widths[0] < cfg.k:
|
|
141
|
-
raise ValueError(
|
|
142
|
-
"The smallest matryoshka level width cannot be smaller than cfg.k."
|
|
143
|
-
)
|
sae_lens/saes/sae.py
CHANGED
|
@@ -19,9 +19,8 @@ from typing import (
|
|
|
19
19
|
|
|
20
20
|
import einops
|
|
21
21
|
import torch
|
|
22
|
-
from jaxtyping import Float
|
|
23
22
|
from numpy.typing import NDArray
|
|
24
|
-
from safetensors.torch import save_file
|
|
23
|
+
from safetensors.torch import load_file, save_file
|
|
25
24
|
from torch import nn
|
|
26
25
|
from transformer_lens.hook_points import HookedRootModule, HookPoint
|
|
27
26
|
from typing_extensions import deprecated, overload, override
|
|
@@ -155,9 +154,9 @@ class SAEConfig(ABC):
|
|
|
155
154
|
dtype: str = "float32"
|
|
156
155
|
device: str = "cpu"
|
|
157
156
|
apply_b_dec_to_input: bool = True
|
|
158
|
-
normalize_activations: Literal[
|
|
159
|
-
"none",
|
|
160
|
-
|
|
157
|
+
normalize_activations: Literal["none", "expected_average_only_in", "layer_norm"] = (
|
|
158
|
+
"none" # none, expected_average_only_in (Anthropic April Update)
|
|
159
|
+
)
|
|
161
160
|
reshape_activations: Literal["none", "hook_z"] = "none"
|
|
162
161
|
metadata: SAEMetadata = field(default_factory=SAEMetadata)
|
|
163
162
|
|
|
@@ -217,6 +216,7 @@ class TrainStepInput:
|
|
|
217
216
|
sae_in: torch.Tensor
|
|
218
217
|
coefficients: dict[str, float]
|
|
219
218
|
dead_neuron_mask: torch.Tensor | None
|
|
219
|
+
n_training_steps: int
|
|
220
220
|
|
|
221
221
|
|
|
222
222
|
class TrainCoefficientConfig(NamedTuple):
|
|
@@ -308,6 +308,7 @@ class SAE(HookedRootModule, Generic[T_SAE_CONFIG], ABC):
|
|
|
308
308
|
|
|
309
309
|
self.run_time_activation_norm_fn_in = run_time_activation_norm_fn_in
|
|
310
310
|
self.run_time_activation_norm_fn_out = run_time_activation_norm_fn_out
|
|
311
|
+
|
|
311
312
|
elif self.cfg.normalize_activations == "layer_norm":
|
|
312
313
|
# we need to scale the norm of the input and store the scaling factor
|
|
313
314
|
def run_time_activation_ln_in(
|
|
@@ -349,16 +350,12 @@ class SAE(HookedRootModule, Generic[T_SAE_CONFIG], ABC):
|
|
|
349
350
|
self.W_enc = nn.Parameter(w_enc_data)
|
|
350
351
|
|
|
351
352
|
@abstractmethod
|
|
352
|
-
def encode(
|
|
353
|
-
self, x: Float[torch.Tensor, "... d_in"]
|
|
354
|
-
) -> Float[torch.Tensor, "... d_sae"]:
|
|
353
|
+
def encode(self, x: torch.Tensor) -> torch.Tensor:
|
|
355
354
|
"""Encode input tensor to feature space."""
|
|
356
355
|
pass
|
|
357
356
|
|
|
358
357
|
@abstractmethod
|
|
359
|
-
def decode(
|
|
360
|
-
self, feature_acts: Float[torch.Tensor, "... d_sae"]
|
|
361
|
-
) -> Float[torch.Tensor, "... d_in"]:
|
|
358
|
+
def decode(self, feature_acts: torch.Tensor) -> torch.Tensor:
|
|
362
359
|
"""Decode feature activations back to input space."""
|
|
363
360
|
pass
|
|
364
361
|
|
|
@@ -448,26 +445,15 @@ class SAE(HookedRootModule, Generic[T_SAE_CONFIG], ABC):
|
|
|
448
445
|
|
|
449
446
|
return super().to(*args, **kwargs)
|
|
450
447
|
|
|
451
|
-
def process_sae_in(
|
|
452
|
-
self, sae_in: Float[torch.Tensor, "... d_in"]
|
|
453
|
-
) -> Float[torch.Tensor, "... d_in"]:
|
|
454
|
-
# print(f"Input shape to process_sae_in: {sae_in.shape}")
|
|
455
|
-
# print(f"self.cfg.hook_name: {self.cfg.hook_name}")
|
|
456
|
-
# print(f"self.b_dec shape: {self.b_dec.shape}")
|
|
457
|
-
# print(f"Hook z reshaping mode: {getattr(self, 'hook_z_reshaping_mode', False)}")
|
|
458
|
-
|
|
448
|
+
def process_sae_in(self, sae_in: torch.Tensor) -> torch.Tensor:
|
|
459
449
|
sae_in = sae_in.to(self.dtype)
|
|
460
|
-
|
|
461
|
-
# print(f"Shape before reshape_fn_in: {sae_in.shape}")
|
|
462
450
|
sae_in = self.reshape_fn_in(sae_in)
|
|
463
|
-
# print(f"Shape after reshape_fn_in: {sae_in.shape}")
|
|
464
451
|
|
|
465
452
|
sae_in = self.hook_sae_input(sae_in)
|
|
466
453
|
sae_in = self.run_time_activation_norm_fn_in(sae_in)
|
|
467
454
|
|
|
468
455
|
# Here's where the error happens
|
|
469
456
|
bias_term = self.b_dec * self.cfg.apply_b_dec_to_input
|
|
470
|
-
# print(f"Bias term shape: {bias_term.shape}")
|
|
471
457
|
|
|
472
458
|
return sae_in - bias_term
|
|
473
459
|
|
|
@@ -498,7 +484,7 @@ class SAE(HookedRootModule, Generic[T_SAE_CONFIG], ABC):
|
|
|
498
484
|
@torch.no_grad()
|
|
499
485
|
def fold_W_dec_norm(self):
|
|
500
486
|
"""Fold decoder norms into encoder."""
|
|
501
|
-
W_dec_norms = self.W_dec.norm(dim=-1).unsqueeze(1)
|
|
487
|
+
W_dec_norms = self.W_dec.norm(dim=-1).clamp(min=1e-8).unsqueeze(1)
|
|
502
488
|
self.W_dec.data = self.W_dec.data / W_dec_norms
|
|
503
489
|
self.W_enc.data = self.W_enc.data * W_dec_norms.T
|
|
504
490
|
|
|
@@ -866,14 +852,12 @@ class TrainingSAE(SAE[T_TRAINING_SAE_CONFIG], ABC):
|
|
|
866
852
|
|
|
867
853
|
@abstractmethod
|
|
868
854
|
def encode_with_hidden_pre(
|
|
869
|
-
self, x:
|
|
870
|
-
) -> tuple[
|
|
855
|
+
self, x: torch.Tensor
|
|
856
|
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
871
857
|
"""Encode with access to pre-activation values for training."""
|
|
872
858
|
...
|
|
873
859
|
|
|
874
|
-
def encode(
|
|
875
|
-
self, x: Float[torch.Tensor, "... d_in"]
|
|
876
|
-
) -> Float[torch.Tensor, "... d_sae"]:
|
|
860
|
+
def encode(self, x: torch.Tensor) -> torch.Tensor:
|
|
877
861
|
"""
|
|
878
862
|
For inference, just encode without returning hidden_pre.
|
|
879
863
|
(training_forward_pass calls encode_with_hidden_pre).
|
|
@@ -881,9 +865,7 @@ class TrainingSAE(SAE[T_TRAINING_SAE_CONFIG], ABC):
|
|
|
881
865
|
feature_acts, _ = self.encode_with_hidden_pre(x)
|
|
882
866
|
return feature_acts
|
|
883
867
|
|
|
884
|
-
def decode(
|
|
885
|
-
self, feature_acts: Float[torch.Tensor, "... d_sae"]
|
|
886
|
-
) -> Float[torch.Tensor, "... d_in"]:
|
|
868
|
+
def decode(self, feature_acts: torch.Tensor) -> torch.Tensor:
|
|
887
869
|
"""
|
|
888
870
|
Decodes feature activations back into input space,
|
|
889
871
|
applying optional finetuning scale, hooking, out normalization, etc.
|
|
@@ -1017,6 +999,12 @@ class TrainingSAE(SAE[T_TRAINING_SAE_CONFIG], ABC):
|
|
|
1017
999
|
) -> type[TrainingSAEConfig]:
|
|
1018
1000
|
return get_sae_training_class(architecture)[1]
|
|
1019
1001
|
|
|
1002
|
+
def load_weights_from_checkpoint(self, checkpoint_path: Path | str) -> None:
|
|
1003
|
+
checkpoint_path = Path(checkpoint_path)
|
|
1004
|
+
state_dict = load_file(checkpoint_path / SAE_WEIGHTS_FILENAME)
|
|
1005
|
+
self.process_state_dict_for_loading(state_dict)
|
|
1006
|
+
self.load_state_dict(state_dict)
|
|
1007
|
+
|
|
1020
1008
|
|
|
1021
1009
|
_blank_hook = nn.Identity()
|
|
1022
1010
|
|
sae_lens/saes/standard_sae.py
CHANGED
|
@@ -2,7 +2,6 @@ from dataclasses import dataclass
|
|
|
2
2
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
import torch
|
|
5
|
-
from jaxtyping import Float
|
|
6
5
|
from numpy.typing import NDArray
|
|
7
6
|
from torch import nn
|
|
8
7
|
from typing_extensions import override
|
|
@@ -54,9 +53,7 @@ class StandardSAE(SAE[StandardSAEConfig]):
|
|
|
54
53
|
super().initialize_weights()
|
|
55
54
|
_init_weights_standard(self)
|
|
56
55
|
|
|
57
|
-
def encode(
|
|
58
|
-
self, x: Float[torch.Tensor, "... d_in"]
|
|
59
|
-
) -> Float[torch.Tensor, "... d_sae"]:
|
|
56
|
+
def encode(self, x: torch.Tensor) -> torch.Tensor:
|
|
60
57
|
"""
|
|
61
58
|
Encode the input tensor into the feature space.
|
|
62
59
|
"""
|
|
@@ -67,9 +64,7 @@ class StandardSAE(SAE[StandardSAEConfig]):
|
|
|
67
64
|
# Apply the activation function (e.g., ReLU, depending on config)
|
|
68
65
|
return self.hook_sae_acts_post(self.activation_fn(hidden_pre))
|
|
69
66
|
|
|
70
|
-
def decode(
|
|
71
|
-
self, feature_acts: Float[torch.Tensor, "... d_sae"]
|
|
72
|
-
) -> Float[torch.Tensor, "... d_in"]:
|
|
67
|
+
def decode(self, feature_acts: torch.Tensor) -> torch.Tensor:
|
|
73
68
|
"""
|
|
74
69
|
Decode the feature activations back to the input space.
|
|
75
70
|
Now, if hook_z reshaping is turned on, we reverse the flattening.
|
|
@@ -127,8 +122,8 @@ class StandardTrainingSAE(TrainingSAE[StandardTrainingSAEConfig]):
|
|
|
127
122
|
}
|
|
128
123
|
|
|
129
124
|
def encode_with_hidden_pre(
|
|
130
|
-
self, x:
|
|
131
|
-
) -> tuple[
|
|
125
|
+
self, x: torch.Tensor
|
|
126
|
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
132
127
|
# Process the input (including dtype conversion, hook call, and any activation normalization)
|
|
133
128
|
sae_in = self.process_sae_in(x)
|
|
134
129
|
# Compute the pre-activation (and allow for a hook if desired)
|