sae-lens 6.7.0__tar.gz → 6.9.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sae_lens-6.7.0 → sae_lens-6.9.0}/PKG-INFO +1 -3
- {sae_lens-6.7.0 → sae_lens-6.9.0}/pyproject.toml +1 -4
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/__init__.py +1 -1
- sae_lens-6.9.0/sae_lens/analysis/neuronpedia_integration.py +163 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/pretrained_saes.yaml +85 -1
- sae_lens-6.7.0/sae_lens/analysis/neuronpedia_integration.py +0 -510
- {sae_lens-6.7.0 → sae_lens-6.9.0}/LICENSE +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/README.md +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/analysis/__init__.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/analysis/hooked_sae_transformer.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/cache_activations_runner.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/config.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/constants.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/evals.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/llm_sae_training_runner.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/load_model.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/loading/__init__.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/loading/pretrained_sae_loaders.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/loading/pretrained_saes_directory.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/pretokenize_runner.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/registry.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/saes/__init__.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/saes/batchtopk_sae.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/saes/gated_sae.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/saes/jumprelu_sae.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/saes/sae.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/saes/standard_sae.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/saes/topk_sae.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/saes/transcoder.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/tokenization_and_batching.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/training/__init__.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/training/activation_scaler.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/training/activations_store.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/training/mixing_buffer.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/training/optim.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/training/sae_trainer.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/training/types.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/training/upload_saes_to_huggingface.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/tutorial/tsea.py +0 -0
- {sae_lens-6.7.0 → sae_lens-6.9.0}/sae_lens/util.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: sae-lens
|
|
3
|
-
Version: 6.
|
|
3
|
+
Version: 6.9.0
|
|
4
4
|
Summary: Training and Analyzing Sparse Autoencoders (SAEs)
|
|
5
5
|
License: MIT
|
|
6
6
|
Keywords: deep-learning,sparse-autoencoders,mechanistic-interpretability,PyTorch
|
|
@@ -14,8 +14,6 @@ Classifier: Programming Language :: Python :: 3.12
|
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.13
|
|
15
15
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
16
16
|
Provides-Extra: mamba
|
|
17
|
-
Provides-Extra: neuronpedia
|
|
18
|
-
Requires-Dist: automated-interpretability (>=0.0.5,<1.0.0) ; extra == "neuronpedia"
|
|
19
17
|
Requires-Dist: babe (>=0.0.7,<0.0.8)
|
|
20
18
|
Requires-Dist: datasets (>=3.1.0)
|
|
21
19
|
Requires-Dist: mamba-lens (>=0.0.4,<0.0.5) ; extra == "mamba"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "sae-lens"
|
|
3
|
-
version = "6.
|
|
3
|
+
version = "6.9.0"
|
|
4
4
|
description = "Training and Analyzing Sparse Autoencoders (SAEs)"
|
|
5
5
|
authors = ["Joseph Bloom"]
|
|
6
6
|
readme = "README.md"
|
|
@@ -33,7 +33,6 @@ pyyaml = "^6.0.1"
|
|
|
33
33
|
typing-extensions = "^4.10.0"
|
|
34
34
|
simple-parsing = "^0.1.6"
|
|
35
35
|
tenacity = ">=9.0.0"
|
|
36
|
-
automated-interpretability = { version = ">=0.0.5,<1.0.0", optional = true }
|
|
37
36
|
|
|
38
37
|
[tool.poetry.group.dev.dependencies]
|
|
39
38
|
pytest = "^8.0.2"
|
|
@@ -56,11 +55,9 @@ ruff = "^0.7.4"
|
|
|
56
55
|
eai-sparsify = "^1.1.1"
|
|
57
56
|
mike = "^2.0.0"
|
|
58
57
|
trio = "^0.30.0"
|
|
59
|
-
automated-interpretability = "^0.0.13"
|
|
60
58
|
|
|
61
59
|
[tool.poetry.extras]
|
|
62
60
|
mamba = ["mamba-lens"]
|
|
63
|
-
neuronpedia = ["automated-interpretability"]
|
|
64
61
|
|
|
65
62
|
[tool.ruff.lint]
|
|
66
63
|
exclude = ["*.ipynb"]
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import urllib.parse
|
|
3
|
+
import webbrowser
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
from dotenv import load_dotenv
|
|
8
|
+
|
|
9
|
+
from sae_lens import SAE, logger
|
|
10
|
+
|
|
11
|
+
NEURONPEDIA_DOMAIN = "https://neuronpedia.org"
|
|
12
|
+
|
|
13
|
+
# Constants for replacing NaNs and Infs in outputs
|
|
14
|
+
POSITIVE_INF_REPLACEMENT = 9999
|
|
15
|
+
NEGATIVE_INF_REPLACEMENT = -9999
|
|
16
|
+
NAN_REPLACEMENT = 0
|
|
17
|
+
OTHER_INVALID_REPLACEMENT = -99999
|
|
18
|
+
|
|
19
|
+
# Pick up OPENAI_API_KEY from environment variable
|
|
20
|
+
load_dotenv()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def NanAndInfReplacer(value: str):
|
|
24
|
+
"""Replace NaNs and Infs in outputs."""
|
|
25
|
+
replacements = {
|
|
26
|
+
"-Infinity": NEGATIVE_INF_REPLACEMENT,
|
|
27
|
+
"Infinity": POSITIVE_INF_REPLACEMENT,
|
|
28
|
+
"NaN": NAN_REPLACEMENT,
|
|
29
|
+
}
|
|
30
|
+
if value in replacements:
|
|
31
|
+
replaced_value = replacements[value]
|
|
32
|
+
return float(replaced_value)
|
|
33
|
+
return NAN_REPLACEMENT
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def open_neuronpedia_feature_dashboard(sae: SAE[Any], index: int):
|
|
37
|
+
sae_id = sae.cfg.metadata.neuronpedia_id
|
|
38
|
+
if sae_id is None:
|
|
39
|
+
logger.warning(
|
|
40
|
+
"SAE does not have a Neuronpedia ID. Either dashboards for this SAE do not exist (yet) on Neuronpedia, or the SAE was not loaded via the from_pretrained method"
|
|
41
|
+
)
|
|
42
|
+
else:
|
|
43
|
+
url = f"{NEURONPEDIA_DOMAIN}/{sae_id}/{index}"
|
|
44
|
+
webbrowser.open(url)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def get_neuronpedia_quick_list(
|
|
48
|
+
sae: SAE[Any],
|
|
49
|
+
features: list[int],
|
|
50
|
+
name: str = "temporary_list",
|
|
51
|
+
):
|
|
52
|
+
sae_id = sae.cfg.metadata.neuronpedia_id
|
|
53
|
+
if sae_id is None:
|
|
54
|
+
logger.warning(
|
|
55
|
+
"SAE does not have a Neuronpedia ID. Either dashboards for this SAE do not exist (yet) on Neuronpedia, or the SAE was not loaded via the from_pretrained method"
|
|
56
|
+
)
|
|
57
|
+
assert sae_id is not None
|
|
58
|
+
|
|
59
|
+
url = NEURONPEDIA_DOMAIN + "/quick-list/"
|
|
60
|
+
name = urllib.parse.quote(name)
|
|
61
|
+
url = url + "?name=" + name
|
|
62
|
+
list_feature = [
|
|
63
|
+
{
|
|
64
|
+
"modelId": sae.cfg.metadata.model_name,
|
|
65
|
+
"layer": sae_id.split("/")[1],
|
|
66
|
+
"index": str(feature),
|
|
67
|
+
}
|
|
68
|
+
for feature in features
|
|
69
|
+
]
|
|
70
|
+
url = url + "&features=" + urllib.parse.quote(json.dumps(list_feature))
|
|
71
|
+
webbrowser.open(url)
|
|
72
|
+
|
|
73
|
+
return url
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def get_neuronpedia_feature(
|
|
77
|
+
feature: int, layer: int, model: str = "gpt2-small", dataset: str = "res-jb"
|
|
78
|
+
) -> dict[str, Any]:
|
|
79
|
+
"""Fetch a feature from Neuronpedia API."""
|
|
80
|
+
url = f"{NEURONPEDIA_DOMAIN}/api/feature/{model}/{layer}-{dataset}/{feature}"
|
|
81
|
+
result = requests.get(url).json()
|
|
82
|
+
result["index"] = int(result["index"])
|
|
83
|
+
return result
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class NeuronpediaActivation:
|
|
87
|
+
"""Represents an activation from Neuronpedia."""
|
|
88
|
+
|
|
89
|
+
def __init__(self, id: str, tokens: list[str], act_values: list[float]):
|
|
90
|
+
self.id = id
|
|
91
|
+
self.tokens = tokens
|
|
92
|
+
self.act_values = act_values
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class NeuronpediaFeature:
|
|
96
|
+
"""Represents a feature from Neuronpedia."""
|
|
97
|
+
|
|
98
|
+
def __init__(
|
|
99
|
+
self,
|
|
100
|
+
modelId: str,
|
|
101
|
+
layer: int,
|
|
102
|
+
dataset: str,
|
|
103
|
+
feature: int,
|
|
104
|
+
description: str = "",
|
|
105
|
+
activations: list[NeuronpediaActivation] | None = None,
|
|
106
|
+
autointerp_explanation: str = "",
|
|
107
|
+
autointerp_explanation_score: float = 0.0,
|
|
108
|
+
):
|
|
109
|
+
self.modelId = modelId
|
|
110
|
+
self.layer = layer
|
|
111
|
+
self.dataset = dataset
|
|
112
|
+
self.feature = feature
|
|
113
|
+
self.description = description
|
|
114
|
+
self.activations = activations
|
|
115
|
+
self.autointerp_explanation = autointerp_explanation
|
|
116
|
+
self.autointerp_explanation_score = autointerp_explanation_score
|
|
117
|
+
|
|
118
|
+
def has_activating_text(self) -> bool:
|
|
119
|
+
"""Check if the feature has activating text."""
|
|
120
|
+
if self.activations is None:
|
|
121
|
+
return False
|
|
122
|
+
return any(max(activation.act_values) > 0 for activation in self.activations)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def make_neuronpedia_list_with_features(
|
|
126
|
+
api_key: str,
|
|
127
|
+
list_name: str,
|
|
128
|
+
features: list[NeuronpediaFeature],
|
|
129
|
+
list_description: str | None = None,
|
|
130
|
+
open_browser: bool = True,
|
|
131
|
+
):
|
|
132
|
+
url = NEURONPEDIA_DOMAIN + "/api/list/new-with-features"
|
|
133
|
+
|
|
134
|
+
# make POST json request with body
|
|
135
|
+
body = {
|
|
136
|
+
"name": list_name,
|
|
137
|
+
"description": list_description,
|
|
138
|
+
"features": [
|
|
139
|
+
{
|
|
140
|
+
"modelId": feature.modelId,
|
|
141
|
+
"layer": f"{feature.layer}-{feature.dataset}",
|
|
142
|
+
"index": feature.feature,
|
|
143
|
+
"description": feature.description,
|
|
144
|
+
}
|
|
145
|
+
for feature in features
|
|
146
|
+
],
|
|
147
|
+
}
|
|
148
|
+
response = requests.post(url, json=body, headers={"x-api-key": api_key})
|
|
149
|
+
result = response.json()
|
|
150
|
+
|
|
151
|
+
if "url" in result and open_browser:
|
|
152
|
+
webbrowser.open(result["url"])
|
|
153
|
+
return result["url"]
|
|
154
|
+
raise Exception("Error in creating list: " + result["message"])
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def test_key(api_key: str):
|
|
158
|
+
"""Test the validity of the Neuronpedia API key."""
|
|
159
|
+
url = f"{NEURONPEDIA_DOMAIN}/api/test"
|
|
160
|
+
body = {"apiKey": api_key}
|
|
161
|
+
response = requests.post(url, json=body)
|
|
162
|
+
if response.status_code != 200:
|
|
163
|
+
raise Exception("Neuronpedia API key is not valid.")
|
|
@@ -14744,4 +14744,88 @@ mwhanna-qwen3-0.6b-transcoders-lowl0:
|
|
|
14744
14744
|
neuronpedia: qwen3-0.6b/26-transcoder-hp-lowl0
|
|
14745
14745
|
- id: layer_27
|
|
14746
14746
|
path: layer_27.safetensors
|
|
14747
|
-
neuronpedia: qwen3-0.6b/27-transcoder-hp-lowl0
|
|
14747
|
+
neuronpedia: qwen3-0.6b/27-transcoder-hp-lowl0
|
|
14748
|
+
|
|
14749
|
+
mntss-gemma-2-2b-2.5m-clt-as-per-layer:
|
|
14750
|
+
conversion_func: mntss_clt_layer_transcoder
|
|
14751
|
+
model: gemma-2-2b
|
|
14752
|
+
repo_id: mntss/clt-gemma-2-2b-2.5M
|
|
14753
|
+
saes:
|
|
14754
|
+
- id: layer_0
|
|
14755
|
+
path: 0
|
|
14756
|
+
neuronpedia: gemma-2-2b/0-clt-hp
|
|
14757
|
+
- id: layer_1
|
|
14758
|
+
path: 1
|
|
14759
|
+
neuronpedia: gemma-2-2b/1-clt-hp
|
|
14760
|
+
- id: layer_2
|
|
14761
|
+
path: 2
|
|
14762
|
+
neuronpedia: gemma-2-2b/2-clt-hp
|
|
14763
|
+
- id: layer_3
|
|
14764
|
+
path: 3
|
|
14765
|
+
neuronpedia: gemma-2-2b/3-clt-hp
|
|
14766
|
+
- id: layer_4
|
|
14767
|
+
path: 4
|
|
14768
|
+
neuronpedia: gemma-2-2b/4-clt-hp
|
|
14769
|
+
- id: layer_5
|
|
14770
|
+
path: 5
|
|
14771
|
+
neuronpedia: gemma-2-2b/5-clt-hp
|
|
14772
|
+
- id: layer_6
|
|
14773
|
+
path: 6
|
|
14774
|
+
neuronpedia: gemma-2-2b/6-clt-hp
|
|
14775
|
+
- id: layer_7
|
|
14776
|
+
path: 7
|
|
14777
|
+
neuronpedia: gemma-2-2b/7-clt-hp
|
|
14778
|
+
- id: layer_8
|
|
14779
|
+
path: 8
|
|
14780
|
+
neuronpedia: gemma-2-2b/8-clt-hp
|
|
14781
|
+
- id: layer_9
|
|
14782
|
+
path: 9
|
|
14783
|
+
neuronpedia: gemma-2-2b/9-clt-hp
|
|
14784
|
+
- id: layer_10
|
|
14785
|
+
path: 10
|
|
14786
|
+
neuronpedia: gemma-2-2b/10-clt-hp
|
|
14787
|
+
- id: layer_11
|
|
14788
|
+
path: 11
|
|
14789
|
+
neuronpedia: gemma-2-2b/11-clt-hp
|
|
14790
|
+
- id: layer_12
|
|
14791
|
+
path: 12
|
|
14792
|
+
neuronpedia: gemma-2-2b/12-clt-hp
|
|
14793
|
+
- id: layer_13
|
|
14794
|
+
path: 13
|
|
14795
|
+
neuronpedia: gemma-2-2b/13-clt-hp
|
|
14796
|
+
- id: layer_14
|
|
14797
|
+
path: 14
|
|
14798
|
+
neuronpedia: gemma-2-2b/14-clt-hp
|
|
14799
|
+
- id: layer_15
|
|
14800
|
+
path: 15
|
|
14801
|
+
neuronpedia: gemma-2-2b/15-clt-hp
|
|
14802
|
+
- id: layer_16
|
|
14803
|
+
path: 16
|
|
14804
|
+
neuronpedia: gemma-2-2b/16-clt-hp
|
|
14805
|
+
- id: layer_17
|
|
14806
|
+
path: 17
|
|
14807
|
+
neuronpedia: gemma-2-2b/17-clt-hp
|
|
14808
|
+
- id: layer_18
|
|
14809
|
+
path: 18
|
|
14810
|
+
neuronpedia: gemma-2-2b/18-clt-hp
|
|
14811
|
+
- id: layer_19
|
|
14812
|
+
path: 19
|
|
14813
|
+
neuronpedia: gemma-2-2b/19-clt-hp
|
|
14814
|
+
- id: layer_20
|
|
14815
|
+
path: 20
|
|
14816
|
+
neuronpedia: gemma-2-2b/20-clt-hp
|
|
14817
|
+
- id: layer_21
|
|
14818
|
+
path: 21
|
|
14819
|
+
neuronpedia: gemma-2-2b/21-clt-hp
|
|
14820
|
+
- id: layer_22
|
|
14821
|
+
path: 22
|
|
14822
|
+
neuronpedia: gemma-2-2b/22-clt-hp
|
|
14823
|
+
- id: layer_23
|
|
14824
|
+
path: 23
|
|
14825
|
+
neuronpedia: gemma-2-2b/23-clt-hp
|
|
14826
|
+
- id: layer_24
|
|
14827
|
+
path: 24
|
|
14828
|
+
neuronpedia: gemma-2-2b/24-clt-hp
|
|
14829
|
+
- id: layer_25
|
|
14830
|
+
path: 25
|
|
14831
|
+
neuronpedia: gemma-2-2b/25-clt-hp
|
|
@@ -1,510 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import json
|
|
3
|
-
import os
|
|
4
|
-
import urllib.parse
|
|
5
|
-
import webbrowser
|
|
6
|
-
from datetime import datetime
|
|
7
|
-
from typing import Any, TypeVar
|
|
8
|
-
|
|
9
|
-
import requests
|
|
10
|
-
from dotenv import load_dotenv
|
|
11
|
-
from tenacity import retry, stop_after_attempt, wait_random_exponential
|
|
12
|
-
|
|
13
|
-
from sae_lens import SAE, logger
|
|
14
|
-
|
|
15
|
-
NEURONPEDIA_DOMAIN = "https://neuronpedia.org"
|
|
16
|
-
|
|
17
|
-
# Constants for replacing NaNs and Infs in outputs
|
|
18
|
-
POSITIVE_INF_REPLACEMENT = 9999
|
|
19
|
-
NEGATIVE_INF_REPLACEMENT = -9999
|
|
20
|
-
NAN_REPLACEMENT = 0
|
|
21
|
-
OTHER_INVALID_REPLACEMENT = -99999
|
|
22
|
-
|
|
23
|
-
# Pick up OPENAI_API_KEY from environment variable
|
|
24
|
-
load_dotenv()
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def NanAndInfReplacer(value: str):
|
|
28
|
-
"""Replace NaNs and Infs in outputs."""
|
|
29
|
-
replacements = {
|
|
30
|
-
"-Infinity": NEGATIVE_INF_REPLACEMENT,
|
|
31
|
-
"Infinity": POSITIVE_INF_REPLACEMENT,
|
|
32
|
-
"NaN": NAN_REPLACEMENT,
|
|
33
|
-
}
|
|
34
|
-
if value in replacements:
|
|
35
|
-
replaced_value = replacements[value]
|
|
36
|
-
return float(replaced_value)
|
|
37
|
-
return NAN_REPLACEMENT
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def open_neuronpedia_feature_dashboard(sae: SAE[Any], index: int):
|
|
41
|
-
sae_id = sae.cfg.metadata.neuronpedia_id
|
|
42
|
-
if sae_id is None:
|
|
43
|
-
logger.warning(
|
|
44
|
-
"SAE does not have a Neuronpedia ID. Either dashboards for this SAE do not exist (yet) on Neuronpedia, or the SAE was not loaded via the from_pretrained method"
|
|
45
|
-
)
|
|
46
|
-
else:
|
|
47
|
-
url = f"{NEURONPEDIA_DOMAIN}/{sae_id}/{index}"
|
|
48
|
-
webbrowser.open(url)
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
def get_neuronpedia_quick_list(
|
|
52
|
-
sae: SAE[Any],
|
|
53
|
-
features: list[int],
|
|
54
|
-
name: str = "temporary_list",
|
|
55
|
-
):
|
|
56
|
-
sae_id = sae.cfg.metadata.neuronpedia_id
|
|
57
|
-
if sae_id is None:
|
|
58
|
-
logger.warning(
|
|
59
|
-
"SAE does not have a Neuronpedia ID. Either dashboards for this SAE do not exist (yet) on Neuronpedia, or the SAE was not loaded via the from_pretrained method"
|
|
60
|
-
)
|
|
61
|
-
assert sae_id is not None
|
|
62
|
-
|
|
63
|
-
url = NEURONPEDIA_DOMAIN + "/quick-list/"
|
|
64
|
-
name = urllib.parse.quote(name)
|
|
65
|
-
url = url + "?name=" + name
|
|
66
|
-
list_feature = [
|
|
67
|
-
{
|
|
68
|
-
"modelId": sae.cfg.metadata.model_name,
|
|
69
|
-
"layer": sae_id.split("/")[1],
|
|
70
|
-
"index": str(feature),
|
|
71
|
-
}
|
|
72
|
-
for feature in features
|
|
73
|
-
]
|
|
74
|
-
url = url + "&features=" + urllib.parse.quote(json.dumps(list_feature))
|
|
75
|
-
webbrowser.open(url)
|
|
76
|
-
|
|
77
|
-
return url
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
def get_neuronpedia_feature(
|
|
81
|
-
feature: int, layer: int, model: str = "gpt2-small", dataset: str = "res-jb"
|
|
82
|
-
) -> dict[str, Any]:
|
|
83
|
-
"""Fetch a feature from Neuronpedia API."""
|
|
84
|
-
url = f"{NEURONPEDIA_DOMAIN}/api/feature/{model}/{layer}-{dataset}/{feature}"
|
|
85
|
-
result = requests.get(url).json()
|
|
86
|
-
result["index"] = int(result["index"])
|
|
87
|
-
return result
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
class NeuronpediaActivation:
|
|
91
|
-
"""Represents an activation from Neuronpedia."""
|
|
92
|
-
|
|
93
|
-
def __init__(self, id: str, tokens: list[str], act_values: list[float]):
|
|
94
|
-
self.id = id
|
|
95
|
-
self.tokens = tokens
|
|
96
|
-
self.act_values = act_values
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
class NeuronpediaFeature:
|
|
100
|
-
"""Represents a feature from Neuronpedia."""
|
|
101
|
-
|
|
102
|
-
def __init__(
|
|
103
|
-
self,
|
|
104
|
-
modelId: str,
|
|
105
|
-
layer: int,
|
|
106
|
-
dataset: str,
|
|
107
|
-
feature: int,
|
|
108
|
-
description: str = "",
|
|
109
|
-
activations: list[NeuronpediaActivation] | None = None,
|
|
110
|
-
autointerp_explanation: str = "",
|
|
111
|
-
autointerp_explanation_score: float = 0.0,
|
|
112
|
-
):
|
|
113
|
-
self.modelId = modelId
|
|
114
|
-
self.layer = layer
|
|
115
|
-
self.dataset = dataset
|
|
116
|
-
self.feature = feature
|
|
117
|
-
self.description = description
|
|
118
|
-
self.activations = activations
|
|
119
|
-
self.autointerp_explanation = autointerp_explanation
|
|
120
|
-
self.autointerp_explanation_score = autointerp_explanation_score
|
|
121
|
-
|
|
122
|
-
def has_activating_text(self) -> bool:
|
|
123
|
-
"""Check if the feature has activating text."""
|
|
124
|
-
if self.activations is None:
|
|
125
|
-
return False
|
|
126
|
-
return any(max(activation.act_values) > 0 for activation in self.activations)
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
T = TypeVar("T")
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
@retry(wait=wait_random_exponential(min=1, max=500), stop=stop_after_attempt(10))
|
|
133
|
-
def sleep_identity(x: T) -> T:
|
|
134
|
-
"""Dummy function for retrying."""
|
|
135
|
-
return x
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
@retry(wait=wait_random_exponential(min=1, max=500), stop=stop_after_attempt(10))
|
|
139
|
-
async def simulate_and_score( # type: ignore
|
|
140
|
-
simulator: Any,
|
|
141
|
-
activation_records: list[Any],
|
|
142
|
-
) -> Any:
|
|
143
|
-
"""Score an explanation of a neuron by how well it predicts activations on the given text sequences."""
|
|
144
|
-
try:
|
|
145
|
-
from neuron_explainer.explanations.scoring import (
|
|
146
|
-
_simulate_and_score_sequence,
|
|
147
|
-
aggregate_scored_sequence_simulations,
|
|
148
|
-
)
|
|
149
|
-
except ImportError as e:
|
|
150
|
-
raise ImportError(
|
|
151
|
-
"The neuron_explainer package is required to use this function. "
|
|
152
|
-
"Please install SAELens with the neuronpedia optional dependencies: "
|
|
153
|
-
"pip install sae-lens[neuronpedia]"
|
|
154
|
-
) from e
|
|
155
|
-
|
|
156
|
-
scored_sequence_simulations = await asyncio.gather(
|
|
157
|
-
*[
|
|
158
|
-
sleep_identity(
|
|
159
|
-
_simulate_and_score_sequence(
|
|
160
|
-
simulator,
|
|
161
|
-
activation_record,
|
|
162
|
-
)
|
|
163
|
-
)
|
|
164
|
-
for activation_record in activation_records
|
|
165
|
-
]
|
|
166
|
-
)
|
|
167
|
-
return aggregate_scored_sequence_simulations(scored_sequence_simulations)
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
def make_neuronpedia_list_with_features(
|
|
171
|
-
api_key: str,
|
|
172
|
-
list_name: str,
|
|
173
|
-
features: list[NeuronpediaFeature],
|
|
174
|
-
list_description: str | None = None,
|
|
175
|
-
open_browser: bool = True,
|
|
176
|
-
):
|
|
177
|
-
url = NEURONPEDIA_DOMAIN + "/api/list/new-with-features"
|
|
178
|
-
|
|
179
|
-
# make POST json request with body
|
|
180
|
-
body = {
|
|
181
|
-
"name": list_name,
|
|
182
|
-
"description": list_description,
|
|
183
|
-
"features": [
|
|
184
|
-
{
|
|
185
|
-
"modelId": feature.modelId,
|
|
186
|
-
"layer": f"{feature.layer}-{feature.dataset}",
|
|
187
|
-
"index": feature.feature,
|
|
188
|
-
"description": feature.description,
|
|
189
|
-
}
|
|
190
|
-
for feature in features
|
|
191
|
-
],
|
|
192
|
-
}
|
|
193
|
-
response = requests.post(url, json=body, headers={"x-api-key": api_key})
|
|
194
|
-
result = response.json()
|
|
195
|
-
|
|
196
|
-
if "url" in result and open_browser:
|
|
197
|
-
webbrowser.open(result["url"])
|
|
198
|
-
return result["url"]
|
|
199
|
-
raise Exception("Error in creating list: " + result["message"])
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
def test_key(api_key: str):
|
|
203
|
-
"""Test the validity of the Neuronpedia API key."""
|
|
204
|
-
url = f"{NEURONPEDIA_DOMAIN}/api/test"
|
|
205
|
-
body = {"apiKey": api_key}
|
|
206
|
-
response = requests.post(url, json=body)
|
|
207
|
-
if response.status_code != 200:
|
|
208
|
-
raise Exception("Neuronpedia API key is not valid.")
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
async def autointerp_neuronpedia_features( # noqa: C901
|
|
212
|
-
features: list[NeuronpediaFeature],
|
|
213
|
-
openai_api_key: str | None = None,
|
|
214
|
-
autointerp_retry_attempts: int = 3,
|
|
215
|
-
autointerp_score_max_concurrent: int = 20,
|
|
216
|
-
neuronpedia_api_key: str | None = None,
|
|
217
|
-
skip_neuronpedia_api_key_test: bool = False,
|
|
218
|
-
do_score: bool = True,
|
|
219
|
-
output_dir: str = "neuronpedia_outputs/autointerp",
|
|
220
|
-
num_activations_to_use: int = 20,
|
|
221
|
-
max_explanation_activation_records: int = 20,
|
|
222
|
-
upload_to_neuronpedia: bool = True,
|
|
223
|
-
autointerp_explainer_model_name: str = "gpt-4-1106-preview",
|
|
224
|
-
autointerp_scorer_model_name: str | None = "gpt-3.5-turbo",
|
|
225
|
-
save_to_disk: bool = True,
|
|
226
|
-
):
|
|
227
|
-
"""
|
|
228
|
-
Autointerp Neuronpedia features.
|
|
229
|
-
|
|
230
|
-
Args:
|
|
231
|
-
features: List of NeuronpediaFeature objects.
|
|
232
|
-
openai_api_key: OpenAI API key.
|
|
233
|
-
autointerp_retry_attempts: Number of retry attempts for autointerp.
|
|
234
|
-
autointerp_score_max_concurrent: Maximum number of concurrent requests for autointerp scoring.
|
|
235
|
-
neuronpedia_api_key: Neuronpedia API key.
|
|
236
|
-
do_score: Whether to score the features.
|
|
237
|
-
output_dir: Output directory for saving the results.
|
|
238
|
-
num_activations_to_use: Number of activations to use.
|
|
239
|
-
max_explanation_activation_records: Maximum number of activation records for explanation.
|
|
240
|
-
upload_to_neuronpedia: Whether to upload the results to Neuronpedia.
|
|
241
|
-
autointerp_explainer_model_name: Model name for autointerp explainer.
|
|
242
|
-
autointerp_scorer_model_name: Model name for autointerp scorer.
|
|
243
|
-
|
|
244
|
-
Returns:
|
|
245
|
-
None
|
|
246
|
-
"""
|
|
247
|
-
try:
|
|
248
|
-
from neuron_explainer.activations.activation_records import (
|
|
249
|
-
calculate_max_activation,
|
|
250
|
-
)
|
|
251
|
-
from neuron_explainer.activations.activations import ActivationRecord
|
|
252
|
-
from neuron_explainer.explanations.calibrated_simulator import (
|
|
253
|
-
UncalibratedNeuronSimulator,
|
|
254
|
-
)
|
|
255
|
-
from neuron_explainer.explanations.explainer import (
|
|
256
|
-
HARMONY_V4_MODELS,
|
|
257
|
-
ContextSize,
|
|
258
|
-
TokenActivationPairExplainer,
|
|
259
|
-
)
|
|
260
|
-
from neuron_explainer.explanations.few_shot_examples import FewShotExampleSet
|
|
261
|
-
from neuron_explainer.explanations.prompt_builder import PromptFormat
|
|
262
|
-
from neuron_explainer.explanations.simulator import (
|
|
263
|
-
LogprobFreeExplanationTokenSimulator,
|
|
264
|
-
)
|
|
265
|
-
except ImportError as e:
|
|
266
|
-
raise ImportError(
|
|
267
|
-
"The automated-interpretability package is required to use autointerp functionality. "
|
|
268
|
-
"Please install SAELens with the neuronpedia optional dependencies: "
|
|
269
|
-
"pip install sae-lens[neuronpedia]"
|
|
270
|
-
) from e
|
|
271
|
-
|
|
272
|
-
logger.info("\n\n")
|
|
273
|
-
|
|
274
|
-
if os.getenv("OPENAI_API_KEY") is None:
|
|
275
|
-
if openai_api_key is None:
|
|
276
|
-
raise Exception(
|
|
277
|
-
"You need to provide an OpenAI API key either in environment variable OPENAI_API_KEY or as an argument."
|
|
278
|
-
)
|
|
279
|
-
os.environ["OPENAI_API_KEY"] = openai_api_key
|
|
280
|
-
|
|
281
|
-
if autointerp_explainer_model_name not in HARMONY_V4_MODELS:
|
|
282
|
-
raise Exception(
|
|
283
|
-
f"Invalid explainer model name: {autointerp_explainer_model_name}. Must be one of: {HARMONY_V4_MODELS}"
|
|
284
|
-
)
|
|
285
|
-
|
|
286
|
-
if do_score and autointerp_scorer_model_name not in HARMONY_V4_MODELS:
|
|
287
|
-
raise Exception(
|
|
288
|
-
f"Invalid scorer model name: {autointerp_scorer_model_name}. Must be one of: {HARMONY_V4_MODELS}"
|
|
289
|
-
)
|
|
290
|
-
|
|
291
|
-
if upload_to_neuronpedia:
|
|
292
|
-
if neuronpedia_api_key is None:
|
|
293
|
-
raise Exception(
|
|
294
|
-
"You need to provide a Neuronpedia API key to upload the results to Neuronpedia."
|
|
295
|
-
)
|
|
296
|
-
if not skip_neuronpedia_api_key_test:
|
|
297
|
-
test_key(neuronpedia_api_key)
|
|
298
|
-
|
|
299
|
-
logger.info("\n\n=== Step 1) Fetching features from Neuronpedia")
|
|
300
|
-
for feature in features:
|
|
301
|
-
feature_data = get_neuronpedia_feature(
|
|
302
|
-
feature=feature.feature,
|
|
303
|
-
layer=feature.layer,
|
|
304
|
-
model=feature.modelId,
|
|
305
|
-
dataset=feature.dataset,
|
|
306
|
-
)
|
|
307
|
-
|
|
308
|
-
if "modelId" not in feature_data:
|
|
309
|
-
raise Exception(
|
|
310
|
-
f"Feature {feature.feature} in layer {feature.layer} of model {feature.modelId} and dataset {feature.dataset} does not exist."
|
|
311
|
-
)
|
|
312
|
-
|
|
313
|
-
if "activations" not in feature_data or len(feature_data["activations"]) == 0:
|
|
314
|
-
raise Exception(
|
|
315
|
-
f"Feature {feature.feature} in layer {feature.layer} of model {feature.modelId} and dataset {feature.dataset} does not have activations."
|
|
316
|
-
)
|
|
317
|
-
|
|
318
|
-
activations = feature_data["activations"]
|
|
319
|
-
activations_to_add = []
|
|
320
|
-
for activation in activations:
|
|
321
|
-
if len(activations_to_add) < num_activations_to_use:
|
|
322
|
-
activations_to_add.append(
|
|
323
|
-
NeuronpediaActivation(
|
|
324
|
-
id=activation["id"],
|
|
325
|
-
tokens=activation["tokens"],
|
|
326
|
-
act_values=activation["values"],
|
|
327
|
-
)
|
|
328
|
-
)
|
|
329
|
-
feature.activations = activations_to_add
|
|
330
|
-
|
|
331
|
-
if not feature.has_activating_text():
|
|
332
|
-
raise Exception(
|
|
333
|
-
f"Feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature} appears dead - it does not have activating text."
|
|
334
|
-
)
|
|
335
|
-
|
|
336
|
-
for iteration_num, feature in enumerate(features):
|
|
337
|
-
start_time = datetime.now()
|
|
338
|
-
|
|
339
|
-
logger.info(
|
|
340
|
-
f"\n========== Feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature} ({iteration_num + 1} of {len(features)} Features) =========="
|
|
341
|
-
)
|
|
342
|
-
logger.info(
|
|
343
|
-
f"\n=== Step 2) Explaining feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature}"
|
|
344
|
-
)
|
|
345
|
-
|
|
346
|
-
if feature.activations is None:
|
|
347
|
-
feature.activations = []
|
|
348
|
-
activation_records = [
|
|
349
|
-
ActivationRecord(
|
|
350
|
-
tokens=activation.tokens, # type: ignore
|
|
351
|
-
activations=activation.act_values, # type: ignore
|
|
352
|
-
) # type: ignore
|
|
353
|
-
for activation in feature.activations
|
|
354
|
-
]
|
|
355
|
-
|
|
356
|
-
activation_records_explaining = activation_records[
|
|
357
|
-
:max_explanation_activation_records
|
|
358
|
-
]
|
|
359
|
-
|
|
360
|
-
explainer = TokenActivationPairExplainer(
|
|
361
|
-
model_name=autointerp_explainer_model_name,
|
|
362
|
-
prompt_format=PromptFormat.HARMONY_V4,
|
|
363
|
-
context_size=ContextSize.SIXTEEN_K,
|
|
364
|
-
max_concurrent=1,
|
|
365
|
-
)
|
|
366
|
-
|
|
367
|
-
explanations = []
|
|
368
|
-
for _ in range(autointerp_retry_attempts):
|
|
369
|
-
try:
|
|
370
|
-
explanations = await explainer.generate_explanations(
|
|
371
|
-
all_activation_records=activation_records_explaining,
|
|
372
|
-
max_activation=calculate_max_activation(
|
|
373
|
-
activation_records_explaining
|
|
374
|
-
),
|
|
375
|
-
num_samples=1,
|
|
376
|
-
)
|
|
377
|
-
except Exception as e:
|
|
378
|
-
logger.error(f"ERROR, RETRYING: {e}")
|
|
379
|
-
else:
|
|
380
|
-
break
|
|
381
|
-
else:
|
|
382
|
-
logger.error(
|
|
383
|
-
f"ERROR: Failed to explain feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature}"
|
|
384
|
-
)
|
|
385
|
-
|
|
386
|
-
if len(explanations) != 1:
|
|
387
|
-
raise ValueError(
|
|
388
|
-
f"Expected exactly one explanation but got {len(explanations)}. This may indicate an issue with the explainer's response."
|
|
389
|
-
)
|
|
390
|
-
explanation = explanations[0].rstrip(".")
|
|
391
|
-
logger.info(
|
|
392
|
-
f"===== {autointerp_explainer_model_name}'s explanation: {explanation}"
|
|
393
|
-
)
|
|
394
|
-
feature.autointerp_explanation = explanation
|
|
395
|
-
|
|
396
|
-
scored_simulation = None
|
|
397
|
-
if do_score and autointerp_scorer_model_name:
|
|
398
|
-
logger.info(
|
|
399
|
-
f"\n=== Step 3) Scoring feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature}"
|
|
400
|
-
)
|
|
401
|
-
logger.info("=== This can take up to 30 seconds.")
|
|
402
|
-
|
|
403
|
-
temp_activation_records = [
|
|
404
|
-
ActivationRecord(
|
|
405
|
-
tokens=[ # type: ignore
|
|
406
|
-
token.replace("<|endoftext|>", "<|not_endoftext|>")
|
|
407
|
-
.replace(" 55", "_55")
|
|
408
|
-
.encode("ascii", errors="backslashreplace")
|
|
409
|
-
.decode("ascii")
|
|
410
|
-
for token in activation_record.tokens # type: ignore
|
|
411
|
-
],
|
|
412
|
-
activations=activation_record.activations, # type: ignore
|
|
413
|
-
) # type: ignore
|
|
414
|
-
for activation_record in activation_records
|
|
415
|
-
]
|
|
416
|
-
|
|
417
|
-
score = None
|
|
418
|
-
scored_simulation = None
|
|
419
|
-
for _ in range(autointerp_retry_attempts):
|
|
420
|
-
try:
|
|
421
|
-
simulator = UncalibratedNeuronSimulator(
|
|
422
|
-
LogprobFreeExplanationTokenSimulator(
|
|
423
|
-
autointerp_scorer_model_name,
|
|
424
|
-
explanation,
|
|
425
|
-
json_mode=True,
|
|
426
|
-
max_concurrent=autointerp_score_max_concurrent,
|
|
427
|
-
few_shot_example_set=FewShotExampleSet.JL_FINE_TUNED,
|
|
428
|
-
prompt_format=PromptFormat.HARMONY_V4,
|
|
429
|
-
)
|
|
430
|
-
)
|
|
431
|
-
scored_simulation = await simulate_and_score(
|
|
432
|
-
simulator, temp_activation_records
|
|
433
|
-
)
|
|
434
|
-
score = scored_simulation.get_preferred_score()
|
|
435
|
-
except Exception as e:
|
|
436
|
-
logger.error(f"ERROR, RETRYING: {e}")
|
|
437
|
-
else:
|
|
438
|
-
break
|
|
439
|
-
|
|
440
|
-
if (
|
|
441
|
-
score is None
|
|
442
|
-
or scored_simulation is None
|
|
443
|
-
or len(scored_simulation.scored_sequence_simulations)
|
|
444
|
-
!= num_activations_to_use
|
|
445
|
-
):
|
|
446
|
-
logger.error(
|
|
447
|
-
f"ERROR: Failed to score feature {feature.modelId}@{feature.layer}-{feature.dataset}:{feature.feature}. Skipping it."
|
|
448
|
-
)
|
|
449
|
-
continue
|
|
450
|
-
feature.autointerp_explanation_score = score
|
|
451
|
-
logger.info(
|
|
452
|
-
f"===== {autointerp_scorer_model_name}'s score: {(score * 100):.0f}"
|
|
453
|
-
)
|
|
454
|
-
|
|
455
|
-
else:
|
|
456
|
-
logger.info("=== Step 3) Skipping scoring as instructed.")
|
|
457
|
-
|
|
458
|
-
feature_data = {
|
|
459
|
-
"modelId": feature.modelId,
|
|
460
|
-
"layer": f"{feature.layer}-{feature.dataset}",
|
|
461
|
-
"index": feature.feature,
|
|
462
|
-
"explanation": feature.autointerp_explanation,
|
|
463
|
-
"explanationScore": feature.autointerp_explanation_score,
|
|
464
|
-
"explanationModel": autointerp_explainer_model_name,
|
|
465
|
-
}
|
|
466
|
-
if do_score and autointerp_scorer_model_name and scored_simulation:
|
|
467
|
-
feature_data["activations"] = feature.activations
|
|
468
|
-
feature_data["simulationModel"] = autointerp_scorer_model_name
|
|
469
|
-
feature_data["simulationActivations"] = (
|
|
470
|
-
scored_simulation.scored_sequence_simulations
|
|
471
|
-
) # type: ignore
|
|
472
|
-
feature_data["simulationScore"] = feature.autointerp_explanation_score
|
|
473
|
-
feature_data_str = json.dumps(feature_data, default=vars)
|
|
474
|
-
|
|
475
|
-
if save_to_disk:
|
|
476
|
-
output_file = f"{output_dir}/{feature.modelId}-{feature.layer}-{feature.dataset}_feature-{feature.feature}_time-{datetime.now().strftime('%Y%m%d-%H%M%S')}.jsonl"
|
|
477
|
-
os.makedirs(output_dir, exist_ok=True)
|
|
478
|
-
logger.info(f"\n=== Step 4) Saving feature to {output_file}")
|
|
479
|
-
with open(output_file, "a") as f:
|
|
480
|
-
f.write(feature_data_str)
|
|
481
|
-
f.write("\n")
|
|
482
|
-
else:
|
|
483
|
-
logger.info("\n=== Step 4) Skipping saving to disk.")
|
|
484
|
-
|
|
485
|
-
if upload_to_neuronpedia:
|
|
486
|
-
logger.info("\n=== Step 5) Uploading feature to Neuronpedia")
|
|
487
|
-
upload_data = json.dumps(
|
|
488
|
-
{
|
|
489
|
-
"feature": feature_data,
|
|
490
|
-
},
|
|
491
|
-
default=vars,
|
|
492
|
-
)
|
|
493
|
-
upload_data_json = json.loads(upload_data, parse_constant=NanAndInfReplacer)
|
|
494
|
-
url = f"{NEURONPEDIA_DOMAIN}/api/explanation/new"
|
|
495
|
-
response = requests.post(
|
|
496
|
-
url, json=upload_data_json, headers={"x-api-key": neuronpedia_api_key}
|
|
497
|
-
)
|
|
498
|
-
if response.status_code != 200:
|
|
499
|
-
logger.error(
|
|
500
|
-
f"ERROR: Couldn't upload explanation to Neuronpedia: {response.text}"
|
|
501
|
-
)
|
|
502
|
-
else:
|
|
503
|
-
logger.info(
|
|
504
|
-
f"===== Uploaded to Neuronpedia: {NEURONPEDIA_DOMAIN}/{feature.modelId}/{feature.layer}-{feature.dataset}/{feature.feature}"
|
|
505
|
-
)
|
|
506
|
-
|
|
507
|
-
end_time = datetime.now()
|
|
508
|
-
logger.info(f"\n========== Time Spent for Feature: {end_time - start_time}\n")
|
|
509
|
-
|
|
510
|
-
logger.info("\n\n========== Generation and Upload Complete ==========\n\n")
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|