luckyd-code 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. luckyd_code/__init__.py +54 -0
  2. luckyd_code/__main__.py +5 -0
  3. luckyd_code/_agent_loop.py +551 -0
  4. luckyd_code/_data_dir.py +73 -0
  5. luckyd_code/agent.py +38 -0
  6. luckyd_code/analytics/__init__.py +18 -0
  7. luckyd_code/analytics/reporter.py +195 -0
  8. luckyd_code/analytics/scanner.py +443 -0
  9. luckyd_code/analytics/smells.py +316 -0
  10. luckyd_code/analytics/trends.py +303 -0
  11. luckyd_code/api.py +473 -0
  12. luckyd_code/audit_daemon.py +845 -0
  13. luckyd_code/autonomous_fixer.py +473 -0
  14. luckyd_code/background.py +159 -0
  15. luckyd_code/backup.py +237 -0
  16. luckyd_code/brain/__init__.py +84 -0
  17. luckyd_code/brain/assembler.py +100 -0
  18. luckyd_code/brain/chunker.py +345 -0
  19. luckyd_code/brain/constants.py +73 -0
  20. luckyd_code/brain/embedder.py +163 -0
  21. luckyd_code/brain/graph.py +311 -0
  22. luckyd_code/brain/indexer.py +316 -0
  23. luckyd_code/brain/parser.py +140 -0
  24. luckyd_code/brain/retriever.py +234 -0
  25. luckyd_code/cli.py +894 -0
  26. luckyd_code/cli_commands/__init__.py +1 -0
  27. luckyd_code/cli_commands/audit.py +120 -0
  28. luckyd_code/cli_commands/background.py +83 -0
  29. luckyd_code/cli_commands/brain.py +87 -0
  30. luckyd_code/cli_commands/config.py +75 -0
  31. luckyd_code/cli_commands/dispatcher.py +695 -0
  32. luckyd_code/cli_commands/sessions.py +41 -0
  33. luckyd_code/cli_entry.py +147 -0
  34. luckyd_code/cli_utils.py +112 -0
  35. luckyd_code/config.py +205 -0
  36. luckyd_code/context.py +214 -0
  37. luckyd_code/cost_tracker.py +209 -0
  38. luckyd_code/error_reporter.py +508 -0
  39. luckyd_code/exceptions.py +39 -0
  40. luckyd_code/export.py +126 -0
  41. luckyd_code/feedback_analyzer.py +290 -0
  42. luckyd_code/file_watcher.py +258 -0
  43. luckyd_code/git/__init__.py +11 -0
  44. luckyd_code/git/auto_commit.py +157 -0
  45. luckyd_code/git/tools.py +85 -0
  46. luckyd_code/hooks.py +236 -0
  47. luckyd_code/indexer.py +280 -0
  48. luckyd_code/init.py +39 -0
  49. luckyd_code/keybindings.py +77 -0
  50. luckyd_code/log.py +55 -0
  51. luckyd_code/mcp/__init__.py +6 -0
  52. luckyd_code/mcp/client.py +184 -0
  53. luckyd_code/memory/__init__.py +19 -0
  54. luckyd_code/memory/manager.py +339 -0
  55. luckyd_code/metrics/__init__.py +5 -0
  56. luckyd_code/model_registry.py +131 -0
  57. luckyd_code/orchestrator.py +204 -0
  58. luckyd_code/permissions/__init__.py +1 -0
  59. luckyd_code/permissions/manager.py +103 -0
  60. luckyd_code/planner.py +361 -0
  61. luckyd_code/plugins.py +91 -0
  62. luckyd_code/py.typed +0 -0
  63. luckyd_code/retry.py +57 -0
  64. luckyd_code/router.py +417 -0
  65. luckyd_code/sandbox.py +156 -0
  66. luckyd_code/self_critique.py +2 -0
  67. luckyd_code/self_improve.py +274 -0
  68. luckyd_code/sessions.py +114 -0
  69. luckyd_code/settings.py +72 -0
  70. luckyd_code/skills/__init__.py +8 -0
  71. luckyd_code/skills/review.py +22 -0
  72. luckyd_code/skills/security.py +17 -0
  73. luckyd_code/tasks/__init__.py +1 -0
  74. luckyd_code/tasks/manager.py +102 -0
  75. luckyd_code/templates/icon-192.png +0 -0
  76. luckyd_code/templates/icon-512.png +0 -0
  77. luckyd_code/templates/index.html +1965 -0
  78. luckyd_code/templates/manifest.json +14 -0
  79. luckyd_code/templates/src/app.js +694 -0
  80. luckyd_code/templates/src/body.html +767 -0
  81. luckyd_code/templates/src/cdn.txt +2 -0
  82. luckyd_code/templates/src/style.css +474 -0
  83. luckyd_code/templates/sw.js +31 -0
  84. luckyd_code/templates/test.html +6 -0
  85. luckyd_code/themes.py +48 -0
  86. luckyd_code/tools/__init__.py +97 -0
  87. luckyd_code/tools/agent_tools.py +65 -0
  88. luckyd_code/tools/bash.py +360 -0
  89. luckyd_code/tools/brain_tools.py +137 -0
  90. luckyd_code/tools/browser.py +369 -0
  91. luckyd_code/tools/datetime_tool.py +34 -0
  92. luckyd_code/tools/dockerfile_gen.py +212 -0
  93. luckyd_code/tools/file_ops.py +381 -0
  94. luckyd_code/tools/game_gen.py +360 -0
  95. luckyd_code/tools/git_tools.py +130 -0
  96. luckyd_code/tools/git_worktree.py +63 -0
  97. luckyd_code/tools/path_validate.py +64 -0
  98. luckyd_code/tools/project_gen.py +187 -0
  99. luckyd_code/tools/readme_gen.py +227 -0
  100. luckyd_code/tools/registry.py +157 -0
  101. luckyd_code/tools/shell_detect.py +109 -0
  102. luckyd_code/tools/web.py +89 -0
  103. luckyd_code/tools/youtube.py +187 -0
  104. luckyd_code/tools_bridge.py +144 -0
  105. luckyd_code/undo.py +126 -0
  106. luckyd_code/update.py +60 -0
  107. luckyd_code/verify.py +360 -0
  108. luckyd_code/web_app.py +176 -0
  109. luckyd_code/web_routes/__init__.py +23 -0
  110. luckyd_code/web_routes/background.py +73 -0
  111. luckyd_code/web_routes/brain.py +109 -0
  112. luckyd_code/web_routes/cost.py +12 -0
  113. luckyd_code/web_routes/files.py +133 -0
  114. luckyd_code/web_routes/memories.py +94 -0
  115. luckyd_code/web_routes/misc.py +67 -0
  116. luckyd_code/web_routes/project.py +48 -0
  117. luckyd_code/web_routes/review.py +20 -0
  118. luckyd_code/web_routes/sessions.py +44 -0
  119. luckyd_code/web_routes/settings.py +43 -0
  120. luckyd_code/web_routes/static.py +70 -0
  121. luckyd_code/web_routes/update.py +19 -0
  122. luckyd_code/web_routes/ws.py +237 -0
  123. luckyd_code-1.2.2.dist-info/METADATA +297 -0
  124. luckyd_code-1.2.2.dist-info/RECORD +127 -0
  125. luckyd_code-1.2.2.dist-info/WHEEL +4 -0
  126. luckyd_code-1.2.2.dist-info/entry_points.txt +3 -0
  127. luckyd_code-1.2.2.dist-info/licenses/LICENSE +21 -0
luckyd_code/context.py ADDED
@@ -0,0 +1,214 @@
1
+ import logging
2
+ from typing import List, Dict, Any, Optional
3
+
4
+ _log = logging.getLogger(__name__)
5
+
6
+
7
+ def _get_accurate_token_count(text: str) -> int:
8
+ """Count tokens using tiktoken if available, fall back to heuristic.
9
+
10
+ Note: tiktoken uses OpenAI's cl100k_base encoding, not DeepSeek's native
11
+ tokenizer. DeepSeek's vocabulary is similar but not identical, so counts
12
+ may be slightly off. We apply a 15% safety multiplier to avoid hitting
13
+ the context window before compaction triggers.
14
+ """
15
+ _DEEPSEEK_SAFETY_FACTOR = 1.15
16
+ try:
17
+ import tiktoken
18
+ enc = tiktoken.get_encoding("cl100k_base")
19
+ return int(len(enc.encode(text)) * _DEEPSEEK_SAFETY_FACTOR)
20
+ except Exception:
21
+ # Code is 2-3x more token-dense than prose due to symbols/indentation
22
+ if any(c in text for c in '{}(\n') or text.count(' ') > 2:
23
+ return max(len(text) // 3, 1)
24
+ return max(len(text) // 4, 1)
25
+
26
+
27
+ class ConversationContext:
28
+ """Manages conversation history and message structure."""
29
+
30
+ def __init__(self, system_prompt: str, max_messages: int = 100,
31
+ config=None, model: str = "deepseek-v4-flash"):
32
+ self.messages: List[Dict[str, Any]] = [
33
+ {"role": "system", "content": system_prompt}
34
+ ]
35
+ self.max_messages = max_messages
36
+ self._config = config
37
+ self._model = model
38
+ # Auto-compact when estimated tokens exceed this threshold.
39
+ # DeepSeek V4 Flash and V4 Pro both support 1M context windows.
40
+ # We compact at 800K to leave ~200K headroom for the response and
41
+ # any injected tool results. Users on older models with smaller
42
+ # context windows can lower this via config.
43
+ self._token_compact_threshold = 800_000
44
+
45
+ def add_user_message(self, content: str):
46
+ self.messages.append({"role": "user", "content": content})
47
+ self._maybe_trim()
48
+ # Token-aware auto-compaction: prevent silent context window overflow
49
+ if self._config is not None and self.estimate_tokens() > self._token_compact_threshold:
50
+ self.compact(self._config, self._model, keep_last=8)
51
+
52
+ def add_assistant_message(self, content: Optional[str] = None, tool_calls: Optional[List[Dict[str, Any]]] = None, reasoning_content: Optional[str] = None):
53
+ msg: dict[str, Any] = {"role": "assistant"}
54
+ if content is not None:
55
+ msg["content"] = content
56
+ elif reasoning_content:
57
+ # DeepSeek API requires 'content' to always be present alongside
58
+ # 'reasoning_content' in multi-turn requests. Omitting it causes
59
+ # "content or tool_calls must be set". Default to empty string.
60
+ msg["content"] = ""
61
+ if tool_calls:
62
+ msg["tool_calls"] = tool_calls
63
+ if reasoning_content:
64
+ msg["reasoning_content"] = reasoning_content
65
+ self.messages.append(msg)
66
+ self._maybe_trim()
67
+
68
+ def add_tool_result(self, tool_call_id: str, tool_name: str, result: str):
69
+ self.messages.append({
70
+ "role": "tool",
71
+ "tool_call_id": tool_call_id,
72
+ "content": result,
73
+ })
74
+ self._maybe_trim()
75
+
76
+ def get_messages(self) -> List[Dict[str, Any]]:
77
+ return self.messages
78
+
79
+ @staticmethod
80
+ def _drop_orphaned_tool_messages(
81
+ messages: List[Dict[str, Any]],
82
+ ) -> List[Dict[str, Any]]:
83
+ """Remove tool-result messages whose parent assistant tool_call is absent.
84
+
85
+ This keeps the message list valid for the DeepSeek API, which requires
86
+ every ``role=tool`` message to be preceded by an assistant message that
87
+ contains a matching ``tool_call_id`` in its ``tool_calls`` list.
88
+ """
89
+ valid_parent_ids: set = set()
90
+ filtered: List[Dict[str, Any]] = []
91
+ for msg in messages:
92
+ role = msg.get("role")
93
+ if role == "assistant":
94
+ for tc in msg.get("tool_calls") or []:
95
+ valid_parent_ids.add(tc.get("id", ""))
96
+ filtered.append(msg)
97
+ elif role == "tool":
98
+ if msg.get("tool_call_id") in valid_parent_ids:
99
+ filtered.append(msg)
100
+ # orphaned tool result — silently dropped
101
+ else:
102
+ # user / system messages reset parent-id tracking
103
+ valid_parent_ids.clear()
104
+ filtered.append(msg)
105
+ return filtered
106
+
107
+ def _maybe_trim(self):
108
+ """Trim oldest messages if we exceed max_messages, keeping system prompt."""
109
+ if len(self.messages) > self.max_messages:
110
+ keep = [self.messages[0]] + self.messages[-(self.max_messages - 1):]
111
+ self.messages = self._drop_orphaned_tool_messages(keep)
112
+
113
+ def count_messages(self) -> int:
114
+ return len(self.messages)
115
+
116
+ def estimate_tokens(self) -> int:
117
+ """Estimate total tokens using tiktoken (accurate) with fallback to chars/4.
118
+
119
+ Accounts for all message fields the API consumes: content, tool_calls,
120
+ reasoning_content, and tool_call_ids.
121
+ """
122
+ total = 0
123
+ for msg in self.messages:
124
+ content = str(msg.get("content", ""))
125
+ total += _get_accurate_token_count(content)
126
+ # Account for tool_calls in assistant messages
127
+ for tc in msg.get("tool_calls", []):
128
+ fn = tc.get("function", {})
129
+ total += _get_accurate_token_count(str(fn.get("name", "")))
130
+ total += _get_accurate_token_count(str(fn.get("arguments", "")))
131
+ # Account for tool_call_id in tool messages
132
+ if msg.get("tool_call_id"):
133
+ total += 5
134
+ # DeepSeek reasoning_content (thinking blocks) — preserved across
135
+ # multi-turn conversations and required by the API. These can be
136
+ # surprisingly large (hundreds of tokens per assistant turn) and
137
+ # were previously invisible to the compaction threshold.
138
+ reasoning = msg.get("reasoning_content", "")
139
+ if reasoning:
140
+ total += _get_accurate_token_count(str(reasoning))
141
+ # Role overhead (~5 tokens per message)
142
+ total += 5
143
+ return total
144
+
145
+ def reset(self, system_prompt: Optional[str] = None):
146
+ if system_prompt:
147
+ self.messages = [{"role": "system", "content": system_prompt}]
148
+ else:
149
+ system = self.messages[0]
150
+ self.messages = [system]
151
+
152
+ def compact(self, config, model: str, keep_last: int = 5,
153
+ on_compact=None) -> str:
154
+ """Compact conversation by summarizing older messages using the model.
155
+
156
+ If *on_compact* is a callable, it is invoked with
157
+ ``(summary_text, compacted_count)`` after a successful compaction.
158
+ """
159
+ if len(self.messages) <= keep_last + 1:
160
+ return "Nothing to compact"
161
+
162
+ system = self.messages[0]
163
+ keep = self.messages[-keep_last:]
164
+ compact_targets = self.messages[1:-keep_last]
165
+
166
+ summary_text = "\n".join(
167
+ f"{m['role']}: {str(m.get('content', ''))[:800]}"
168
+ + (
169
+ f"\n[key reasoning]: {str(m['reasoning_content'])[:200]}"
170
+ if m.get("reasoning_content") else ""
171
+ )
172
+ for m in compact_targets
173
+ )
174
+
175
+ summary_prompt = (
176
+ "Summarize the following conversation history concisely. "
177
+ "Capture key decisions, code changes, file paths, and the user's goals:"
178
+ f"\n\n{summary_text}"
179
+ )
180
+
181
+ try:
182
+ from openai import OpenAI
183
+ import httpx
184
+ # Always use Flash for compaction — it's fast, cheap, and
185
+ # summarisation doesn't benefit from the Pro model's extra power.
186
+ compact_model = "deepseek-v4-flash"
187
+ client = OpenAI(
188
+ api_key=config.api_key,
189
+ base_url=config.base_url,
190
+ http_client=httpx.Client(timeout=30),
191
+ )
192
+ response = client.chat.completions.create(
193
+ model=compact_model,
194
+ messages=[{"role": "user", "content": summary_prompt}],
195
+ max_tokens=1500,
196
+ )
197
+ summary = response.choices[0].message.content or ""
198
+ except Exception as e:
199
+ return f"Compaction failed: {e}"
200
+
201
+ raw = [system, {
202
+ "role": "system",
203
+ "content": f"[Compacted conversation summary]: {summary}",
204
+ }] + keep
205
+ self.messages = self._drop_orphaned_tool_messages(raw)
206
+
207
+ count = len(compact_targets)
208
+ if callable(on_compact):
209
+ try:
210
+ on_compact(summary, count)
211
+ except Exception:
212
+ _log.warning("Compaction callback failed", exc_info=True)
213
+
214
+ return f"Compacted {count} messages into a summary"
@@ -0,0 +1,209 @@
1
+ """Track API usage costs per session with persistence."""
2
+
3
+ import json
4
+ import logging
5
+ from dataclasses import dataclass, field, fields
6
+ from datetime import datetime
7
+ from typing import Any, Optional
8
+
9
+ from ._data_dir import data_path, legacy_path
10
+
11
+ COST_FILE = data_path("costs.jsonl") # append-only, one JSON record per line
12
+ _LEGACY_COST_FILE = legacy_path("costs.json") # migrated on first write
13
+ _TOTALS_FILE = data_path("costs_total.json") # single-float running total
14
+
15
+ _logger = logging.getLogger("luckyd_code.cost_tracker")
16
+
17
+
18
+ @dataclass
19
+ class UsageRecord:
20
+ model: str
21
+ input_tokens: int
22
+ output_tokens: int
23
+ timestamp: str = ""
24
+ estimated_cost: float = 0.0
25
+ # Internal flag — excluded from serialization so it never appears in
26
+ # costs.jsonl. repr=False keeps it out of str() output too.
27
+ _cost_provided: bool = field(default=False, repr=False)
28
+
29
+ def __post_init__(self):
30
+ if not self.timestamp:
31
+ self.timestamp = datetime.now().isoformat()
32
+ # Only recalculate when cost was NOT explicitly provided by the caller.
33
+ # This preserves intentional zero-cost values (e.g. cached / free responses).
34
+ if not self._cost_provided and (self.input_tokens > 0 or self.output_tokens > 0):
35
+ self.estimated_cost = self._calc_cost()
36
+
37
+ def to_dict(self) -> dict:
38
+ """Serialize to a plain dict, excluding internal/private fields."""
39
+ return {
40
+ f.name: getattr(self, f.name)
41
+ for f in fields(self)
42
+ if not f.name.startswith("_")
43
+ }
44
+
45
+ def _calc_cost(self) -> float:
46
+ """Estimate cost using approximate per-token rates.
47
+
48
+ Rates (per 1K tokens, USD):
49
+ deepseek-v4-flash : $0.000140 in / $0.000280 out
50
+ deepseek-v4-pro : $0.001740 in / $0.003480 out (75% discount until 2026-05-31)
51
+ deepseek-chat : legacy alias for deepseek-v4-flash
52
+ deepseek-reasoner : legacy alias for deepseek-v4-flash (thinking mode)
53
+ Prices sourced from api-docs.deepseek.com/quick_start/pricing (2026-04-26)
54
+ """
55
+ rates = {
56
+ # Current V4 models
57
+ "deepseek-v4-flash": (0.000140, 0.000280),
58
+ "deepseek-v4-pro": (0.001740, 0.003480),
59
+ # Legacy names — now route to deepseek-v4-flash
60
+ "deepseek-chat": (0.000140, 0.000280),
61
+ "deepseek-reasoner": (0.000140, 0.000280),
62
+ # Older models (kept for historical cost records)
63
+ "deepseek-v3-0324": (0.000270, 0.001100),
64
+ "deepseek-v3": (0.000270, 0.001100),
65
+ }
66
+ # Default to v4-flash pricing for unknown models
67
+ input_rate, output_rate = rates.get(self.model, (0.000140, 0.000280))
68
+ return (self.input_tokens / 1000 * input_rate) + (
69
+ self.output_tokens / 1000 * output_rate
70
+ )
71
+
72
+
73
+ class CostTracker:
74
+ """Records API usage costs per session with cumulative tracking."""
75
+
76
+ def __init__(self):
77
+ self.session_start = datetime.now()
78
+ self.records: list[UsageRecord] = []
79
+ self._written_count: int = 0 # how many records already flushed to disk
80
+
81
+ def record_usage(self, model: str, input_tokens: int, output_tokens: int,
82
+ cost: Optional[float] = None) -> UsageRecord:
83
+ rec = UsageRecord(
84
+ model=model,
85
+ input_tokens=input_tokens,
86
+ output_tokens=output_tokens,
87
+ estimated_cost=cost if cost is not None else 0.0,
88
+ _cost_provided=cost is not None,
89
+ )
90
+ self.records.append(rec)
91
+ self._append_new_records()
92
+ return rec
93
+
94
+ def get_session_cost(self) -> float:
95
+ return sum(r.estimated_cost for r in self.records)
96
+
97
+ def get_session_tokens(self) -> tuple[int, int]:
98
+ inp = sum(r.input_tokens for r in self.records)
99
+ out = sum(r.output_tokens for r in self.records)
100
+ return inp, out
101
+
102
+ def get_cumulative_cost(self) -> float:
103
+ """Return the lifetime total cost in O(1) using the sidecar totals file.
104
+
105
+ Falls back to summing the full JSONL on first run (migration) and
106
+ writes the total to the sidecar file for all future calls.
107
+ """
108
+ # Fast path: read the single-value sidecar
109
+ if _TOTALS_FILE.exists():
110
+ try:
111
+ data = json.loads(_TOTALS_FILE.read_text(encoding="utf-8"))
112
+ return float(data.get("total", 0.0))
113
+ except Exception:
114
+ pass
115
+ # Slow path (first run / migration): sum the full JSONL, then persist
116
+ total = sum(r.get("estimated_cost", 0) for r in self._load_all())
117
+ self._write_total(total)
118
+ return total
119
+
120
+ @staticmethod
121
+ def _write_total(total: float) -> None:
122
+ """Persist the running total to the sidecar file."""
123
+ try:
124
+ _TOTALS_FILE.parent.mkdir(parents=True, exist_ok=True)
125
+ _TOTALS_FILE.write_text(json.dumps({"total": total}), encoding="utf-8")
126
+ except Exception:
127
+ pass
128
+
129
+ def get_stats(self) -> str:
130
+ inp, out = self.get_session_tokens()
131
+ cost = self.get_session_cost()
132
+ total_cost = self.get_cumulative_cost()
133
+ lines = [
134
+ "[bold]Cost Tracking[/bold]",
135
+ f" Session tokens: {inp:,} in / {out:,} out",
136
+ f" Session cost: ${cost:.4f}",
137
+ f" Cumulative cost: ${total_cost:.4f}",
138
+ f" API calls this session: {len(self.records)}",
139
+ ]
140
+ return "\n".join(lines)
141
+
142
+ def reset_cumulative(self) -> str:
143
+ """Wipe the persistent costs.jsonl and reset session records."""
144
+ self.records.clear()
145
+ self._written_count = 0
146
+ try:
147
+ for f in (COST_FILE, _LEGACY_COST_FILE, _TOTALS_FILE):
148
+ if f.exists():
149
+ f.unlink()
150
+ return "Cumulative cost history cleared."
151
+ except Exception as e:
152
+ return f"Failed to clear cost file: {e}"
153
+
154
+ def _append_new_records(self):
155
+ """Append only new records to the JSONL file — O(1) per call."""
156
+ new_records = self.records[self._written_count:]
157
+ if not new_records:
158
+ return
159
+ COST_FILE.parent.mkdir(parents=True, exist_ok=True)
160
+ try:
161
+ self._migrate_legacy_json_once()
162
+ with COST_FILE.open("a", encoding="utf-8") as fh:
163
+ for r in new_records:
164
+ fh.write(json.dumps(r.to_dict()) + "\n")
165
+ self._written_count = len(self.records)
166
+ # Keep running total sidecar in sync — O(1) increment
167
+ new_cost = sum(r.estimated_cost for r in new_records)
168
+ if new_cost:
169
+ current = self.get_cumulative_cost()
170
+ self._write_total(current + new_cost)
171
+ except Exception:
172
+ _logger.warning("Failed to persist cost records", exc_info=True)
173
+
174
+ @staticmethod
175
+ def _migrate_legacy_json_once():
176
+ """One-time migration: convert costs.json → costs.jsonl."""
177
+ if not _LEGACY_COST_FILE.exists() or COST_FILE.exists():
178
+ return
179
+ try:
180
+ records = json.loads(_LEGACY_COST_FILE.read_text(encoding="utf-8"))
181
+ with COST_FILE.open("w", encoding="utf-8") as fh:
182
+ for r in records:
183
+ fh.write(json.dumps(r) + "\n")
184
+ _LEGACY_COST_FILE.unlink()
185
+ except Exception:
186
+ _logger.warning("Failed to migrate legacy costs.json", exc_info=True)
187
+
188
+ @staticmethod
189
+ def _load_all() -> list[dict[str, Any]]:
190
+ """Load all records from JSONL (plus legacy JSON if present)."""
191
+ records: list[dict[str, Any]] = []
192
+ # Legacy fallback — only present before first migration
193
+ if _LEGACY_COST_FILE.exists() and not COST_FILE.exists():
194
+ try:
195
+ records = json.loads(_LEGACY_COST_FILE.read_text(encoding="utf-8"))
196
+ return records
197
+ except Exception:
198
+ _logger.warning("Failed to load legacy cost records", exc_info=True)
199
+ return []
200
+ if COST_FILE.exists():
201
+ try:
202
+ with COST_FILE.open(encoding="utf-8") as fh:
203
+ for line in fh:
204
+ line = line.strip()
205
+ if line:
206
+ records.append(json.loads(line))
207
+ except Exception:
208
+ _logger.warning("Failed to load cost records", exc_info=True)
209
+ return records