cc-context-stats 1.6.2 → 1.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +39 -0
  2. package/CLAUDE.md +12 -0
  3. package/README.md +34 -24
  4. package/docs/ARCHITECTURE.md +52 -25
  5. package/docs/CSV_FORMAT.md +2 -0
  6. package/docs/DEPLOYMENT.md +19 -8
  7. package/docs/DEVELOPMENT.md +48 -12
  8. package/docs/MODEL_INTELLIGENCE.md +396 -0
  9. package/docs/configuration.md +35 -0
  10. package/docs/context-stats.md +12 -1
  11. package/docs/installation.md +82 -22
  12. package/docs/scripts.md +47 -23
  13. package/docs/troubleshooting.md +93 -4
  14. package/package.json +1 -1
  15. package/pyproject.toml +1 -1
  16. package/scripts/statusline-full.sh +171 -37
  17. package/scripts/statusline.js +214 -32
  18. package/scripts/statusline.py +195 -47
  19. package/src/claude_statusline/__init__.py +1 -1
  20. package/src/claude_statusline/cli/context_stats.py +85 -13
  21. package/src/claude_statusline/cli/explain.py +228 -0
  22. package/src/claude_statusline/cli/statusline.py +41 -30
  23. package/src/claude_statusline/core/colors.py +78 -9
  24. package/src/claude_statusline/core/config.py +68 -9
  25. package/src/claude_statusline/core/git.py +16 -5
  26. package/src/claude_statusline/graphs/intelligence.py +162 -0
  27. package/src/claude_statusline/graphs/renderer.py +38 -3
  28. package/tests/bash/test_statusline_full.bats +5 -5
  29. package/tests/fixtures/mi_test_vectors.json +140 -0
  30. package/tests/node/intelligence.test.js +98 -0
  31. package/tests/node/statusline.test.js +4 -4
  32. package/tests/python/test_colors.py +105 -0
  33. package/tests/python/test_config_colors.py +78 -0
  34. package/tests/python/test_explain.py +177 -0
  35. package/tests/python/test_intelligence.py +314 -0
  36. package/tests/python/test_layout.py +4 -4
  37. package/tests/python/test_statusline.py +4 -4
@@ -0,0 +1,78 @@
1
+ """Tests for color configuration in Config."""
2
+
3
+ from claude_statusline.core.config import Config
4
+
5
+
6
+ class TestConfigColorOverrides:
7
+ """Tests for loading color overrides from config file."""
8
+
9
+ def test_no_color_overrides_by_default(self, tmp_path):
10
+ config_file = tmp_path / "statusline.conf"
11
+ config_file.write_text("autocompact=true\n")
12
+ config = Config.load(config_path=config_file)
13
+ assert config.color_overrides == {}
14
+
15
+ def test_named_color_override(self, tmp_path):
16
+ config_file = tmp_path / "statusline.conf"
17
+ config_file.write_text("color_green=bright_cyan\n")
18
+ config = Config.load(config_path=config_file)
19
+ assert "green" in config.color_overrides
20
+ assert config.color_overrides["green"] == "\033[0;96m"
21
+
22
+ def test_hex_color_override(self, tmp_path):
23
+ config_file = tmp_path / "statusline.conf"
24
+ config_file.write_text("color_red=#f7768e\n")
25
+ config = Config.load(config_path=config_file)
26
+ assert "red" in config.color_overrides
27
+ assert config.color_overrides["red"] == "\033[38;2;247;118;142m"
28
+
29
+ def test_multiple_color_overrides(self, tmp_path):
30
+ config_file = tmp_path / "statusline.conf"
31
+ config_file.write_text("color_green=#7dcfff\ncolor_red=#f7768e\ncolor_blue=bright_blue\n")
32
+ config = Config.load(config_path=config_file)
33
+ assert len(config.color_overrides) == 3
34
+ assert "green" in config.color_overrides
35
+ assert "red" in config.color_overrides
36
+ assert "blue" in config.color_overrides
37
+
38
+ def test_invalid_color_ignored(self, tmp_path, capsys):
39
+ config_file = tmp_path / "statusline.conf"
40
+ config_file.write_text("color_green=nonexistent_color\n")
41
+ config = Config.load(config_path=config_file)
42
+ assert config.color_overrides == {}
43
+
44
+ def test_color_overrides_mixed_with_booleans(self, tmp_path):
45
+ config_file = tmp_path / "statusline.conf"
46
+ config_file.write_text("autocompact=false\ntoken_detail=true\ncolor_yellow=#e0af68\n")
47
+ config = Config.load(config_path=config_file)
48
+ assert config.autocompact is False
49
+ assert config.token_detail is True
50
+ assert "yellow" in config.color_overrides
51
+ assert config.color_overrides["yellow"] == "\033[38;2;224;175;104m"
52
+
53
+ def test_color_overrides_in_to_dict(self, tmp_path):
54
+ config_file = tmp_path / "statusline.conf"
55
+ config_file.write_text("color_cyan=#00ffff\n")
56
+ config = Config.load(config_path=config_file)
57
+ d = config.to_dict()
58
+ assert "color_overrides" in d
59
+ assert "cyan" in d["color_overrides"]
60
+
61
+ def test_unknown_color_key_ignored(self, tmp_path):
62
+ config_file = tmp_path / "statusline.conf"
63
+ config_file.write_text("color_purple=magenta\n")
64
+ config = Config.load(config_path=config_file)
65
+ assert config.color_overrides == {}
66
+
67
+ def test_all_six_color_slots(self, tmp_path):
68
+ config_file = tmp_path / "statusline.conf"
69
+ config_file.write_text(
70
+ "color_green=green\n"
71
+ "color_yellow=yellow\n"
72
+ "color_red=red\n"
73
+ "color_blue=blue\n"
74
+ "color_magenta=magenta\n"
75
+ "color_cyan=cyan\n"
76
+ )
77
+ config = Config.load(config_path=config_file)
78
+ assert len(config.color_overrides) == 6
@@ -0,0 +1,177 @@
1
+ """Tests for the context-stats explain command."""
2
+
3
+ import json
4
+ import subprocess
5
+ import sys
6
+ from pathlib import Path
7
+
8
+ PROJECT_ROOT = Path(__file__).parent.parent.parent
9
+ FIXTURES_DIR = PROJECT_ROOT / "tests" / "fixtures" / "json"
10
+
11
+
12
+ class TestExplainCommand:
13
+ """Tests for `context-stats explain`."""
14
+
15
+ def _run_explain(self, input_data, extra_args=None):
16
+ """Run context-stats explain with JSON input and return stdout."""
17
+ cmd = [sys.executable, "-m", "claude_statusline.cli.context_stats", "explain"]
18
+ if extra_args:
19
+ cmd.extend(extra_args)
20
+ result = subprocess.run(
21
+ cmd,
22
+ input=json.dumps(input_data),
23
+ capture_output=True,
24
+ text=True,
25
+ timeout=10,
26
+ )
27
+ return result
28
+
29
+ def test_explain_shows_model(self):
30
+ data = {"model": {"display_name": "Opus 4.5", "id": "claude-opus-4-5"}}
31
+ result = self._run_explain(data)
32
+ assert result.returncode == 0
33
+ assert "Opus 4.5" in result.stdout
34
+ assert "claude-opus-4-5" in result.stdout
35
+
36
+ def test_explain_shows_workspace(self):
37
+ data = {
38
+ "workspace": {
39
+ "current_dir": "/home/user/project",
40
+ "project_dir": "/home/user/project",
41
+ }
42
+ }
43
+ result = self._run_explain(data)
44
+ assert result.returncode == 0
45
+ assert "/home/user/project" in result.stdout
46
+
47
+ def test_explain_shows_context_window(self):
48
+ data = {
49
+ "context_window": {
50
+ "context_window_size": 200000,
51
+ "current_usage": {
52
+ "input_tokens": 50000,
53
+ "cache_creation_input_tokens": 10000,
54
+ "cache_read_input_tokens": 20000,
55
+ },
56
+ }
57
+ }
58
+ result = self._run_explain(data)
59
+ assert result.returncode == 0
60
+ assert "200,000" in result.stdout
61
+ assert "50,000" in result.stdout
62
+ assert "context_used" in result.stdout
63
+
64
+ def test_explain_shows_cost(self):
65
+ data = {
66
+ "cost": {
67
+ "total_cost_usd": 0.1234,
68
+ "total_lines_added": 100,
69
+ "total_lines_removed": 50,
70
+ }
71
+ }
72
+ result = self._run_explain(data)
73
+ assert result.returncode == 0
74
+ assert "$0.1234" in result.stdout
75
+
76
+ def test_explain_shows_session(self):
77
+ data = {"session_id": "abc-123", "version": "2.0.0"}
78
+ result = self._run_explain(data)
79
+ assert result.returncode == 0
80
+ assert "abc-123" in result.stdout
81
+ assert "2.0.0" in result.stdout
82
+
83
+ def test_explain_shows_absent_fields(self):
84
+ data = {}
85
+ result = self._run_explain(data)
86
+ assert result.returncode == 0
87
+ assert "(absent)" in result.stdout
88
+
89
+ def test_explain_shows_raw_json(self):
90
+ data = {"model": {"display_name": "Test"}}
91
+ result = self._run_explain(data)
92
+ assert result.returncode == 0
93
+ assert "Raw JSON" in result.stdout
94
+ assert '"display_name": "Test"' in result.stdout
95
+
96
+ def test_explain_shows_config(self):
97
+ data = {}
98
+ result = self._run_explain(data)
99
+ assert result.returncode == 0
100
+ assert "Active Config" in result.stdout
101
+
102
+ def test_explain_with_full_fixture(self):
103
+ with open(FIXTURES_DIR / "valid_full.json") as f:
104
+ data = json.load(f)
105
+ result = self._run_explain(data)
106
+ assert result.returncode == 0
107
+ assert "Opus 4.5" in result.stdout
108
+ assert "test-session-123" in result.stdout
109
+
110
+ def test_explain_invalid_json_fails(self):
111
+ result = subprocess.run(
112
+ [sys.executable, "-m", "claude_statusline.cli.context_stats", "explain"],
113
+ input="not valid json",
114
+ capture_output=True,
115
+ text=True,
116
+ timeout=10,
117
+ )
118
+ assert result.returncode == 1
119
+ assert "invalid JSON" in result.stderr
120
+
121
+ def test_explain_shows_derived_free_tokens(self):
122
+ data = {
123
+ "context_window": {
124
+ "context_window_size": 200000,
125
+ "current_usage": {
126
+ "input_tokens": 50000,
127
+ "cache_creation_input_tokens": 10000,
128
+ "cache_read_input_tokens": 20000,
129
+ },
130
+ }
131
+ }
132
+ result = self._run_explain(data)
133
+ assert result.returncode == 0
134
+ # 200000 - (50000+10000+20000) = 120000
135
+ assert "120,000" in result.stdout
136
+ assert "60.0%" in result.stdout
137
+
138
+ def test_explain_no_color_flag(self):
139
+ data = {"model": {"display_name": "Test"}}
140
+ result = subprocess.run(
141
+ [sys.executable, "-m", "claude_statusline.cli.context_stats", "explain", "--no-color"],
142
+ input=json.dumps(data),
143
+ capture_output=True,
144
+ text=True,
145
+ timeout=10,
146
+ )
147
+ assert result.returncode == 0
148
+ assert "Test" in result.stdout
149
+ # No ANSI escape codes when --no-color is passed
150
+ assert "\x1b[" not in result.stdout
151
+
152
+ def test_explain_shows_vim_mode(self):
153
+ data = {"vim": {"mode": "NORMAL"}}
154
+ result = self._run_explain(data)
155
+ assert result.returncode == 0
156
+ assert "NORMAL" in result.stdout
157
+ assert "Extensions" in result.stdout
158
+
159
+ def test_explain_shows_agent(self):
160
+ data = {"agent": {"name": "my-agent"}}
161
+ result = self._run_explain(data)
162
+ assert result.returncode == 0
163
+ assert "my-agent" in result.stdout
164
+ assert "Extensions" in result.stdout
165
+
166
+ def test_explain_shows_output_style(self):
167
+ data = {"output_style": {"name": "concise"}}
168
+ result = self._run_explain(data)
169
+ assert result.returncode == 0
170
+ assert "concise" in result.stdout
171
+ assert "Extensions" in result.stdout
172
+
173
+ def test_explain_no_extensions_section_when_absent(self):
174
+ data = {"model": {"display_name": "Test"}}
175
+ result = self._run_explain(data)
176
+ assert result.returncode == 0
177
+ assert "Extensions" not in result.stdout
@@ -0,0 +1,314 @@
1
+ """Tests for Model Intelligence (MI) score computation."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from pathlib import Path
7
+
8
+ import pytest
9
+
10
+ from claude_statusline.core.state import StateEntry
11
+ from claude_statusline.graphs.intelligence import (
12
+ MI_GREEN_THRESHOLD,
13
+ MI_WEIGHT_CPS,
14
+ MI_WEIGHT_ES,
15
+ MI_WEIGHT_PS,
16
+ MI_YELLOW_THRESHOLD,
17
+ IntelligenceScore,
18
+ calculate_context_pressure,
19
+ calculate_efficiency,
20
+ calculate_intelligence,
21
+ calculate_productivity,
22
+ format_mi_score,
23
+ get_mi_color,
24
+ )
25
+
26
+ FIXTURES_DIR = Path(__file__).parent.parent / "fixtures"
27
+
28
+
29
+ def _make_entry(
30
+ current_input=0,
31
+ cache_creation=0,
32
+ cache_read=0,
33
+ total_output=0,
34
+ lines_added=0,
35
+ lines_removed=0,
36
+ context_window_size=200000,
37
+ ) -> StateEntry:
38
+ """Helper to create a StateEntry with sane defaults."""
39
+ return StateEntry(
40
+ timestamp=1000000,
41
+ total_input_tokens=0,
42
+ total_output_tokens=total_output,
43
+ current_input_tokens=current_input,
44
+ current_output_tokens=0,
45
+ cache_creation=cache_creation,
46
+ cache_read=cache_read,
47
+ cost_usd=0.0,
48
+ lines_added=lines_added,
49
+ lines_removed=lines_removed,
50
+ session_id="test",
51
+ model_id="test-model",
52
+ workspace_project_dir="/test",
53
+ context_window_size=context_window_size,
54
+ )
55
+
56
+
57
+ # --- CPS tests ---
58
+
59
+
60
+ class TestContextPressure:
61
+ def test_empty_context(self):
62
+ assert calculate_context_pressure(0.0) == 1.0
63
+
64
+ def test_full_context(self):
65
+ assert calculate_context_pressure(1.0) == 0.0
66
+
67
+ def test_half_context(self):
68
+ cps = calculate_context_pressure(0.5)
69
+ assert 0.64 < cps < 0.66 # ~0.646
70
+
71
+ def test_custom_beta_linear(self):
72
+ cps = calculate_context_pressure(0.5, beta=1.0)
73
+ assert cps == pytest.approx(0.5, abs=0.01)
74
+
75
+ def test_custom_beta_quadratic(self):
76
+ cps = calculate_context_pressure(0.5, beta=2.0)
77
+ assert cps == pytest.approx(0.75, abs=0.01)
78
+
79
+ def test_over_capacity_clamped(self):
80
+ cps = calculate_context_pressure(1.5)
81
+ assert cps == 0.0
82
+
83
+ def test_negative_utilization(self):
84
+ assert calculate_context_pressure(-0.1) == 1.0
85
+
86
+
87
+ # --- CPS guard clause ---
88
+
89
+
90
+ class TestGuardClause:
91
+ def test_zero_context_window(self):
92
+ entry = _make_entry(current_input=50000)
93
+ score = calculate_intelligence(entry, None, context_window_size=0)
94
+ assert score.mi == 1.0
95
+ assert score.cps == 1.0
96
+ assert score.es == 1.0
97
+ assert score.ps == 0.5
98
+ assert score.utilization == 0.0
99
+
100
+
101
+ # --- ES tests ---
102
+
103
+
104
+ class TestEfficiency:
105
+ def test_no_tokens(self):
106
+ entry = _make_entry()
107
+ assert calculate_efficiency(entry) == 1.0
108
+
109
+ def test_all_cache_read(self):
110
+ entry = _make_entry(cache_read=100000)
111
+ assert calculate_efficiency(entry) == 1.0
112
+
113
+ def test_no_cache(self):
114
+ entry = _make_entry(current_input=100000)
115
+ assert calculate_efficiency(entry) == pytest.approx(0.3, abs=0.01)
116
+
117
+ def test_mixed_cache(self):
118
+ # 60% cache read
119
+ entry = _make_entry(current_input=20000, cache_creation=20000, cache_read=60000)
120
+ es = calculate_efficiency(entry)
121
+ assert es == pytest.approx(0.3 + 0.7 * 0.6, abs=0.01)
122
+
123
+
124
+ # --- PS tests ---
125
+
126
+
127
+ class TestProductivity:
128
+ def test_no_previous_entry(self):
129
+ entry = _make_entry(total_output=1000, lines_added=100)
130
+ assert calculate_productivity(entry, None) == 0.5
131
+
132
+ def test_no_output(self):
133
+ prev = _make_entry(total_output=1000)
134
+ cur = _make_entry(total_output=1000) # no increase
135
+ assert calculate_productivity(cur, prev) == 0.5
136
+
137
+ def test_high_productivity(self):
138
+ prev = _make_entry(total_output=0, lines_added=0, lines_removed=0)
139
+ cur = _make_entry(total_output=100, lines_added=20, lines_removed=5)
140
+ ps = calculate_productivity(cur, prev)
141
+ # ratio = 25/100 = 0.25, normalized = min(1, 0.25/0.2) = 1.0
142
+ assert ps == pytest.approx(1.0, abs=0.01)
143
+
144
+ def test_zero_productivity(self):
145
+ prev = _make_entry(total_output=0, lines_added=0, lines_removed=0)
146
+ cur = _make_entry(total_output=1000, lines_added=0, lines_removed=0)
147
+ ps = calculate_productivity(cur, prev)
148
+ assert ps == pytest.approx(0.2, abs=0.01)
149
+
150
+ def test_moderate_productivity(self):
151
+ prev = _make_entry(total_output=0, lines_added=0, lines_removed=0)
152
+ cur = _make_entry(total_output=1000, lines_added=50, lines_removed=10)
153
+ ps = calculate_productivity(cur, prev)
154
+ # ratio = 60/1000 = 0.06, normalized = min(1, 0.06/0.2) = 0.3
155
+ assert ps == pytest.approx(0.2 + 0.8 * 0.3, abs=0.01)
156
+
157
+ def test_capping(self):
158
+ prev = _make_entry(total_output=0, lines_added=0, lines_removed=0)
159
+ cur = _make_entry(total_output=10, lines_added=100, lines_removed=100)
160
+ ps = calculate_productivity(cur, prev)
161
+ # ratio = 200/10 = 20, normalized = min(1, 20/0.2) = 1.0
162
+ assert ps == pytest.approx(1.0, abs=0.01)
163
+
164
+ def test_consecutive_diffs(self):
165
+ """Verify PS uses consecutive entry diffs, not cumulative totals."""
166
+ prev = _make_entry(total_output=500, lines_added=50, lines_removed=10)
167
+ cur = _make_entry(total_output=600, lines_added=55, lines_removed=12)
168
+ ps = calculate_productivity(cur, prev)
169
+ # delta_lines = (55-50) + (12-10) = 7, delta_output = 100
170
+ # ratio = 7/100 = 0.07, normalized = 0.07/0.2 = 0.35
171
+ assert ps == pytest.approx(0.2 + 0.8 * 0.35, abs=0.01)
172
+
173
+
174
+ # --- Composite tests ---
175
+
176
+
177
+ class TestComposite:
178
+ def test_optimal_conditions(self):
179
+ prev = _make_entry(total_output=0, lines_added=0, lines_removed=0)
180
+ cur = _make_entry(
181
+ current_input=1000, cache_read=9000, total_output=100,
182
+ lines_added=25, lines_removed=5,
183
+ )
184
+ score = calculate_intelligence(cur, prev, 200000)
185
+ assert score.mi > 0.9
186
+
187
+ def test_worst_conditions(self):
188
+ prev = _make_entry(total_output=0, lines_added=0, lines_removed=0)
189
+ cur = _make_entry(
190
+ current_input=200000, total_output=10000,
191
+ lines_added=0, lines_removed=0,
192
+ )
193
+ score = calculate_intelligence(cur, prev, 200000)
194
+ assert score.mi < 0.2
195
+
196
+ def test_weight_sum(self):
197
+ assert MI_WEIGHT_CPS + MI_WEIGHT_ES + MI_WEIGHT_PS == pytest.approx(1.0)
198
+
199
+ def test_bounds(self):
200
+ """MI should always be in [0, 1]."""
201
+ prev = _make_entry(total_output=0)
202
+ for u in [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]:
203
+ used = int(u * 200000)
204
+ cur = _make_entry(current_input=used, total_output=1000, lines_added=50)
205
+ score = calculate_intelligence(cur, prev, 200000)
206
+ assert 0.0 <= score.mi <= 1.0, f"MI out of bounds at u={u}: {score.mi}"
207
+
208
+
209
+ # --- Color tests ---
210
+
211
+
212
+ class TestColor:
213
+ def test_green(self):
214
+ assert get_mi_color(0.8) == "green"
215
+
216
+ def test_yellow(self):
217
+ assert get_mi_color(0.5) == "yellow"
218
+
219
+ def test_red(self):
220
+ assert get_mi_color(0.2) == "red"
221
+
222
+ def test_boundary_green(self):
223
+ assert get_mi_color(MI_GREEN_THRESHOLD + 0.001) == "green"
224
+
225
+ def test_boundary_yellow_upper(self):
226
+ assert get_mi_color(MI_GREEN_THRESHOLD) == "yellow"
227
+
228
+ def test_boundary_yellow_lower(self):
229
+ assert get_mi_color(MI_YELLOW_THRESHOLD + 0.001) == "yellow"
230
+
231
+ def test_boundary_red(self):
232
+ assert get_mi_color(MI_YELLOW_THRESHOLD) == "red"
233
+
234
+
235
+ # --- Format tests ---
236
+
237
+
238
+ class TestFormat:
239
+ def test_two_decimals(self):
240
+ assert format_mi_score(0.82) == "0.82"
241
+
242
+ def test_zero(self):
243
+ assert format_mi_score(0.0) == "0.00"
244
+
245
+ def test_one(self):
246
+ assert format_mi_score(1.0) == "1.00"
247
+
248
+ def test_rounding(self):
249
+ assert format_mi_score(0.8249) == "0.82"
250
+ assert format_mi_score(0.8251) == "0.83"
251
+
252
+
253
+ # --- Shared test vectors ---
254
+
255
+
256
+ class TestSharedVectors:
257
+ """Test against shared vectors for cross-implementation parity."""
258
+
259
+ @pytest.fixture
260
+ def vectors(self):
261
+ with open(FIXTURES_DIR / "mi_test_vectors.json") as f:
262
+ return json.load(f)
263
+
264
+ def test_all_vectors(self, vectors):
265
+ for vec in vectors:
266
+ inp = vec["input"]
267
+ exp = vec["expected"]
268
+
269
+ # Build entries from vector input
270
+ current_input = inp["current_input"]
271
+ cache_creation = inp["cache_creation"]
272
+ cache_read = inp["cache_read"]
273
+ # current_used should equal current_input + cache_creation + cache_read
274
+ # but we trust the vector's current_used for the entry construction
275
+ cur = _make_entry(
276
+ current_input=current_input,
277
+ cache_creation=cache_creation,
278
+ cache_read=cache_read,
279
+ total_output=inp["cur_output"],
280
+ lines_added=inp["cur_lines_added"],
281
+ lines_removed=inp["cur_lines_removed"],
282
+ context_window_size=inp["context_window"],
283
+ )
284
+
285
+ has_prev = inp["prev_output"] is not None
286
+ if has_prev:
287
+ prev = _make_entry(
288
+ total_output=inp["prev_output"],
289
+ lines_added=inp["prev_lines_added"],
290
+ lines_removed=inp["prev_lines_removed"],
291
+ )
292
+ else:
293
+ prev = None
294
+
295
+ score = calculate_intelligence(
296
+ cur, prev, inp["context_window"], inp["beta"]
297
+ )
298
+
299
+ assert score.cps == pytest.approx(exp["cps"], abs=0.01), (
300
+ f"CPS mismatch for '{vec['description']}': "
301
+ f"got {score.cps}, expected {exp['cps']}"
302
+ )
303
+ assert score.es == pytest.approx(exp["es"], abs=0.01), (
304
+ f"ES mismatch for '{vec['description']}': "
305
+ f"got {score.es}, expected {exp['es']}"
306
+ )
307
+ assert score.ps == pytest.approx(exp["ps"], abs=0.01), (
308
+ f"PS mismatch for '{vec['description']}': "
309
+ f"got {score.ps}, expected {exp['ps']}"
310
+ )
311
+ assert score.mi == pytest.approx(exp["mi"], abs=0.01), (
312
+ f"MI mismatch for '{vec['description']}': "
313
+ f"got {score.mi}, expected {exp['mi']}"
314
+ )
@@ -106,13 +106,13 @@ class TestFitToWidth:
106
106
  def test_realistic_ansi_strings(self):
107
107
  base = "\033[2m[Claude]\033[0m \033[0;34mdir\033[0m"
108
108
  git = " | \033[0;35mmain\033[0m"
109
- ctx = " | \033[0;32m150.0k free (75.0%)\033[0m"
109
+ ctx = " | \033[0;32m150.0k (75.0%)\033[0m"
110
110
  session = " \033[2mtest-session-uuid-1234\033[0m"
111
111
 
112
- # base=[Claude] dir = 12, git= | main = 7, ctx= | 150.0k free (75.0%) = 22,
113
- # session= test-session-uuid-1234 = 23 => total = 64
112
+ # base=[Claude] dir = 12, git= | main = 7, ctx= | 150.0k (75.0%) = 17,
113
+ # session= test-session-uuid-1234 = 23 => total = 59
114
114
  result = fit_to_width([base, git, ctx, session], 80)
115
- assert visible_width(result) == 64
115
+ assert visible_width(result) == 59
116
116
 
117
117
  # With tight width, session should be dropped
118
118
  result = fit_to_width([base, git, ctx, session], 50)
@@ -71,7 +71,7 @@ class TestStatuslineScript:
71
71
  """Should show free tokens indicator."""
72
72
  output, code = run_script(sample_input)
73
73
  assert code == 0
74
- assert "free" in output
74
+ assert "%" in output
75
75
 
76
76
  def test_shows_ac_indicator(self, sample_input):
77
77
  """Should show autocompact indicator."""
@@ -121,19 +121,19 @@ class TestContextWindowColors:
121
121
  """Low usage (>50% free) should produce output with 'free'."""
122
122
  output, code = run_script(low_usage_input)
123
123
  assert code == 0
124
- assert "free" in output
124
+ assert "%" in output
125
125
 
126
126
  def test_medium_usage_has_output(self, medium_usage_input):
127
127
  """Medium usage (25-50% free) should produce output with 'free'."""
128
128
  output, code = run_script(medium_usage_input)
129
129
  assert code == 0
130
- assert "free" in output
130
+ assert "%" in output
131
131
 
132
132
  def test_high_usage_has_output(self, high_usage_input):
133
133
  """High usage (<25% free) should produce output with 'free'."""
134
134
  output, code = run_script(high_usage_input)
135
135
  assert code == 0
136
- assert "free" in output
136
+ assert "%" in output
137
137
 
138
138
 
139
139
  class TestFixtures: