fast-resume 1.12.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fast_resume/__init__.py +5 -0
- fast_resume/adapters/__init__.py +25 -0
- fast_resume/adapters/base.py +263 -0
- fast_resume/adapters/claude.py +209 -0
- fast_resume/adapters/codex.py +216 -0
- fast_resume/adapters/copilot.py +176 -0
- fast_resume/adapters/copilot_vscode.py +326 -0
- fast_resume/adapters/crush.py +341 -0
- fast_resume/adapters/opencode.py +333 -0
- fast_resume/adapters/vibe.py +188 -0
- fast_resume/assets/claude.png +0 -0
- fast_resume/assets/codex.png +0 -0
- fast_resume/assets/copilot-cli.png +0 -0
- fast_resume/assets/copilot-vscode.png +0 -0
- fast_resume/assets/crush.png +0 -0
- fast_resume/assets/opencode.png +0 -0
- fast_resume/assets/vibe.png +0 -0
- fast_resume/cli.py +327 -0
- fast_resume/config.py +30 -0
- fast_resume/index.py +758 -0
- fast_resume/logging_config.py +57 -0
- fast_resume/query.py +264 -0
- fast_resume/search.py +281 -0
- fast_resume/tui/__init__.py +58 -0
- fast_resume/tui/app.py +629 -0
- fast_resume/tui/filter_bar.py +128 -0
- fast_resume/tui/modal.py +73 -0
- fast_resume/tui/preview.py +396 -0
- fast_resume/tui/query.py +86 -0
- fast_resume/tui/results_table.py +178 -0
- fast_resume/tui/search_input.py +117 -0
- fast_resume/tui/styles.py +302 -0
- fast_resume/tui/utils.py +160 -0
- fast_resume-1.12.8.dist-info/METADATA +545 -0
- fast_resume-1.12.8.dist-info/RECORD +38 -0
- fast_resume-1.12.8.dist-info/WHEEL +4 -0
- fast_resume-1.12.8.dist-info/entry_points.txt +3 -0
- fast_resume-1.12.8.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,326 @@
|
|
|
1
|
+
"""VS Code Copilot (copilot-vscode) session adapter."""
|
|
2
|
+
|
|
3
|
+
import orjson
|
|
4
|
+
import sys
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from urllib.parse import unquote, urlparse
|
|
8
|
+
|
|
9
|
+
from ..config import AGENTS
|
|
10
|
+
from ..logging_config import log_parse_error
|
|
11
|
+
from .base import ErrorCallback, ParseError, RawAdapterStats, Session, truncate_title
|
|
12
|
+
|
|
13
|
+
# VS Code storage paths vary by platform
|
|
14
|
+
if sys.platform == "darwin":
|
|
15
|
+
VSCODE_STORAGE = Path.home() / "Library" / "Application Support" / "Code"
|
|
16
|
+
elif sys.platform == "win32":
|
|
17
|
+
VSCODE_STORAGE = Path.home() / "AppData" / "Roaming" / "Code"
|
|
18
|
+
else: # Linux
|
|
19
|
+
VSCODE_STORAGE = Path.home() / ".config" / "Code"
|
|
20
|
+
|
|
21
|
+
CHAT_SESSIONS_DIR = (
|
|
22
|
+
VSCODE_STORAGE / "User" / "globalStorage" / "emptyWindowChatSessions"
|
|
23
|
+
)
|
|
24
|
+
WORKSPACE_STORAGE_DIR = VSCODE_STORAGE / "User" / "workspaceStorage"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class CopilotVSCodeAdapter:
|
|
28
|
+
"""Adapter for VS Code Copilot Chat sessions."""
|
|
29
|
+
|
|
30
|
+
name = "copilot-vscode"
|
|
31
|
+
color = AGENTS["copilot-vscode"]["color"]
|
|
32
|
+
badge = AGENTS["copilot-vscode"]["badge"]
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
chat_sessions_dir: Path | None = None,
|
|
37
|
+
workspace_storage_dir: Path | None = None,
|
|
38
|
+
) -> None:
|
|
39
|
+
self._chat_sessions_dir = (
|
|
40
|
+
chat_sessions_dir if chat_sessions_dir is not None else CHAT_SESSIONS_DIR
|
|
41
|
+
)
|
|
42
|
+
self._workspace_storage_dir = (
|
|
43
|
+
workspace_storage_dir
|
|
44
|
+
if workspace_storage_dir is not None
|
|
45
|
+
else WORKSPACE_STORAGE_DIR
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
def is_available(self) -> bool:
|
|
49
|
+
"""Check if VS Code Copilot Chat data exists."""
|
|
50
|
+
# Check empty window sessions
|
|
51
|
+
if self._chat_sessions_dir.exists() and any(
|
|
52
|
+
self._chat_sessions_dir.glob("*.json")
|
|
53
|
+
):
|
|
54
|
+
return True
|
|
55
|
+
# Check workspace sessions
|
|
56
|
+
if self._workspace_storage_dir.exists():
|
|
57
|
+
for ws_dir in self._workspace_storage_dir.iterdir():
|
|
58
|
+
chat_dir = ws_dir / "chatSessions"
|
|
59
|
+
if chat_dir.exists() and any(chat_dir.glob("*.json")):
|
|
60
|
+
return True
|
|
61
|
+
return False
|
|
62
|
+
|
|
63
|
+
def _get_session_id_from_file(self, session_file: Path) -> str | None:
|
|
64
|
+
"""Extract session ID from session file, returns None on error."""
|
|
65
|
+
try:
|
|
66
|
+
with open(session_file, "rb") as f:
|
|
67
|
+
data = orjson.loads(f.read())
|
|
68
|
+
return data.get("sessionId", session_file.stem)
|
|
69
|
+
except Exception:
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
def _get_workspace_directory(self, workspace_dir: Path) -> str:
|
|
73
|
+
"""Get the workspace folder path from workspace.json."""
|
|
74
|
+
workspace_json = workspace_dir / "workspace.json"
|
|
75
|
+
if workspace_json.exists():
|
|
76
|
+
try:
|
|
77
|
+
with open(workspace_json, "rb") as f:
|
|
78
|
+
data = orjson.loads(f.read())
|
|
79
|
+
folder = data.get("folder", "")
|
|
80
|
+
if folder.startswith("file://"):
|
|
81
|
+
# Parse and decode the file URI
|
|
82
|
+
parsed = urlparse(folder)
|
|
83
|
+
return unquote(parsed.path)
|
|
84
|
+
except Exception:
|
|
85
|
+
pass
|
|
86
|
+
return ""
|
|
87
|
+
|
|
88
|
+
def _get_all_session_files(self) -> list[tuple[Path, str]]:
|
|
89
|
+
"""Get all session files with their associated workspace directories.
|
|
90
|
+
|
|
91
|
+
Returns list of (session_file_path, workspace_directory).
|
|
92
|
+
"""
|
|
93
|
+
session_files: list[tuple[Path, str]] = []
|
|
94
|
+
|
|
95
|
+
# Empty window sessions (no workspace directory)
|
|
96
|
+
if self._chat_sessions_dir.exists():
|
|
97
|
+
for session_file in self._chat_sessions_dir.glob("*.json"):
|
|
98
|
+
session_files.append((session_file, ""))
|
|
99
|
+
|
|
100
|
+
# Workspace-specific sessions
|
|
101
|
+
if self._workspace_storage_dir.exists():
|
|
102
|
+
for ws_dir in self._workspace_storage_dir.iterdir():
|
|
103
|
+
if not ws_dir.is_dir():
|
|
104
|
+
continue
|
|
105
|
+
chat_dir = ws_dir / "chatSessions"
|
|
106
|
+
if chat_dir.exists():
|
|
107
|
+
ws_directory = self._get_workspace_directory(ws_dir)
|
|
108
|
+
for session_file in chat_dir.glob("*.json"):
|
|
109
|
+
session_files.append((session_file, ws_directory))
|
|
110
|
+
|
|
111
|
+
return session_files
|
|
112
|
+
|
|
113
|
+
def find_sessions(self) -> list[Session]:
|
|
114
|
+
"""Find all VS Code Copilot Chat sessions."""
|
|
115
|
+
if not self.is_available():
|
|
116
|
+
return []
|
|
117
|
+
|
|
118
|
+
sessions = []
|
|
119
|
+
for session_file, ws_directory in self._get_all_session_files():
|
|
120
|
+
session = self._parse_session(session_file, ws_directory)
|
|
121
|
+
if session:
|
|
122
|
+
sessions.append(session)
|
|
123
|
+
|
|
124
|
+
return sessions
|
|
125
|
+
|
|
126
|
+
def _parse_session(
|
|
127
|
+
self,
|
|
128
|
+
session_file: Path,
|
|
129
|
+
workspace_directory: str = "",
|
|
130
|
+
on_error: ErrorCallback = None,
|
|
131
|
+
) -> Session | None:
|
|
132
|
+
"""Parse a VS Code Copilot Chat session file."""
|
|
133
|
+
try:
|
|
134
|
+
with open(session_file, "rb") as f:
|
|
135
|
+
data = orjson.loads(f.read())
|
|
136
|
+
|
|
137
|
+
session_id = data.get("sessionId", session_file.stem)
|
|
138
|
+
title = data.get("customTitle", "")
|
|
139
|
+
requests = data.get("requests", [])
|
|
140
|
+
|
|
141
|
+
if not requests:
|
|
142
|
+
return None
|
|
143
|
+
|
|
144
|
+
# Extract messages
|
|
145
|
+
messages: list[str] = []
|
|
146
|
+
directory = workspace_directory # Use workspace directory as default
|
|
147
|
+
turn_count = 0
|
|
148
|
+
|
|
149
|
+
for req in requests:
|
|
150
|
+
# User message
|
|
151
|
+
msg = req.get("message", {})
|
|
152
|
+
user_text = msg.get("text", "")
|
|
153
|
+
if user_text:
|
|
154
|
+
messages.append(f"» {user_text}")
|
|
155
|
+
turn_count += 1
|
|
156
|
+
|
|
157
|
+
# Try to extract directory from content references if not already set
|
|
158
|
+
if not directory:
|
|
159
|
+
for ref in req.get("contentReferences", []):
|
|
160
|
+
ref_data = ref.get("reference", {})
|
|
161
|
+
uri = ref_data.get("uri", {})
|
|
162
|
+
fs_path = uri.get("fsPath", "")
|
|
163
|
+
if fs_path:
|
|
164
|
+
# Get parent directory
|
|
165
|
+
directory = str(Path(fs_path).parent)
|
|
166
|
+
break
|
|
167
|
+
|
|
168
|
+
# Assistant response
|
|
169
|
+
response = req.get("response", [])
|
|
170
|
+
has_response = False
|
|
171
|
+
for resp_part in response:
|
|
172
|
+
if isinstance(resp_part, dict):
|
|
173
|
+
value = resp_part.get("value", "")
|
|
174
|
+
if value:
|
|
175
|
+
messages.append(f" {value}")
|
|
176
|
+
has_response = True
|
|
177
|
+
if has_response:
|
|
178
|
+
turn_count += 1
|
|
179
|
+
|
|
180
|
+
if not messages:
|
|
181
|
+
return None
|
|
182
|
+
|
|
183
|
+
# Use first user message as title if no custom title
|
|
184
|
+
if not title and messages:
|
|
185
|
+
first_msg = messages[0].lstrip("» ").strip()
|
|
186
|
+
title = truncate_title(first_msg)
|
|
187
|
+
|
|
188
|
+
# Get timestamp from file or data
|
|
189
|
+
creation_date = data.get("creationDate")
|
|
190
|
+
last_message_date = data.get("lastMessageDate")
|
|
191
|
+
if last_message_date:
|
|
192
|
+
timestamp = datetime.fromtimestamp(last_message_date / 1000)
|
|
193
|
+
elif creation_date:
|
|
194
|
+
timestamp = datetime.fromtimestamp(creation_date / 1000)
|
|
195
|
+
else:
|
|
196
|
+
timestamp = datetime.fromtimestamp(session_file.stat().st_mtime)
|
|
197
|
+
|
|
198
|
+
full_content = "\n\n".join(messages)
|
|
199
|
+
|
|
200
|
+
return Session(
|
|
201
|
+
id=session_id,
|
|
202
|
+
agent=self.name,
|
|
203
|
+
title=title,
|
|
204
|
+
directory=directory,
|
|
205
|
+
timestamp=timestamp,
|
|
206
|
+
content=full_content,
|
|
207
|
+
message_count=turn_count,
|
|
208
|
+
mtime=session_file.stat().st_mtime,
|
|
209
|
+
)
|
|
210
|
+
except OSError as e:
|
|
211
|
+
error = ParseError(
|
|
212
|
+
agent=self.name,
|
|
213
|
+
file_path=str(session_file),
|
|
214
|
+
error_type="OSError",
|
|
215
|
+
message=str(e),
|
|
216
|
+
)
|
|
217
|
+
log_parse_error(
|
|
218
|
+
error.agent, error.file_path, error.error_type, error.message
|
|
219
|
+
)
|
|
220
|
+
if on_error:
|
|
221
|
+
on_error(error)
|
|
222
|
+
return None
|
|
223
|
+
except orjson.JSONDecodeError as e:
|
|
224
|
+
error = ParseError(
|
|
225
|
+
agent=self.name,
|
|
226
|
+
file_path=str(session_file),
|
|
227
|
+
error_type="JSONDecodeError",
|
|
228
|
+
message=str(e),
|
|
229
|
+
)
|
|
230
|
+
log_parse_error(
|
|
231
|
+
error.agent, error.file_path, error.error_type, error.message
|
|
232
|
+
)
|
|
233
|
+
if on_error:
|
|
234
|
+
on_error(error)
|
|
235
|
+
return None
|
|
236
|
+
except (KeyError, TypeError, AttributeError) as e:
|
|
237
|
+
error = ParseError(
|
|
238
|
+
agent=self.name,
|
|
239
|
+
file_path=str(session_file),
|
|
240
|
+
error_type=type(e).__name__,
|
|
241
|
+
message=str(e),
|
|
242
|
+
)
|
|
243
|
+
log_parse_error(
|
|
244
|
+
error.agent, error.file_path, error.error_type, error.message
|
|
245
|
+
)
|
|
246
|
+
if on_error:
|
|
247
|
+
on_error(error)
|
|
248
|
+
return None
|
|
249
|
+
|
|
250
|
+
def find_sessions_incremental(
|
|
251
|
+
self,
|
|
252
|
+
known: dict[str, tuple[float, str]],
|
|
253
|
+
on_error: ErrorCallback = None,
|
|
254
|
+
) -> tuple[list[Session], list[str]]:
|
|
255
|
+
"""Find sessions incrementally, comparing against known sessions."""
|
|
256
|
+
if not self.is_available():
|
|
257
|
+
deleted_ids = [
|
|
258
|
+
sid for sid, (_, agent) in known.items() if agent == self.name
|
|
259
|
+
]
|
|
260
|
+
return [], deleted_ids
|
|
261
|
+
|
|
262
|
+
# Scan all session files and build current state
|
|
263
|
+
current_files: dict[str, tuple[Path, float, str]] = {}
|
|
264
|
+
|
|
265
|
+
for session_file, ws_directory in self._get_all_session_files():
|
|
266
|
+
session_id = self._get_session_id_from_file(session_file)
|
|
267
|
+
if session_id is None:
|
|
268
|
+
continue
|
|
269
|
+
mtime = session_file.stat().st_mtime
|
|
270
|
+
current_files[session_id] = (session_file, mtime, ws_directory)
|
|
271
|
+
|
|
272
|
+
# Find new and modified sessions
|
|
273
|
+
new_or_modified = []
|
|
274
|
+
for session_id, (path, mtime, ws_directory) in current_files.items():
|
|
275
|
+
known_entry = known.get(session_id)
|
|
276
|
+
if known_entry is None or mtime > known_entry[0] + 0.001:
|
|
277
|
+
session = self._parse_session(path, ws_directory, on_error=on_error)
|
|
278
|
+
if session:
|
|
279
|
+
new_or_modified.append(session)
|
|
280
|
+
|
|
281
|
+
# Find deleted sessions
|
|
282
|
+
current_ids = set(current_files.keys())
|
|
283
|
+
deleted_ids = [
|
|
284
|
+
sid
|
|
285
|
+
for sid, (_, agent) in known.items()
|
|
286
|
+
if agent == self.name and sid not in current_ids
|
|
287
|
+
]
|
|
288
|
+
|
|
289
|
+
return new_or_modified, deleted_ids
|
|
290
|
+
|
|
291
|
+
def get_resume_command(self, session: Session, yolo: bool = False) -> list[str]:
|
|
292
|
+
"""Get command to open VS Code.
|
|
293
|
+
|
|
294
|
+
Note: VS Code Copilot Chat doesn't support resuming specific sessions
|
|
295
|
+
via command line. We open VS Code in the session's directory instead.
|
|
296
|
+
"""
|
|
297
|
+
if session.directory:
|
|
298
|
+
return ["code", session.directory]
|
|
299
|
+
return ["code"]
|
|
300
|
+
|
|
301
|
+
def get_raw_stats(self) -> RawAdapterStats:
|
|
302
|
+
"""Get raw statistics from VS Code Copilot data folders."""
|
|
303
|
+
if not self.is_available():
|
|
304
|
+
return RawAdapterStats(
|
|
305
|
+
agent=self.name,
|
|
306
|
+
data_dir=str(self._chat_sessions_dir),
|
|
307
|
+
available=False,
|
|
308
|
+
file_count=0,
|
|
309
|
+
total_bytes=0,
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
session_files = self._get_all_session_files()
|
|
313
|
+
total_bytes = 0
|
|
314
|
+
for path, _ in session_files:
|
|
315
|
+
try:
|
|
316
|
+
total_bytes += path.stat().st_size
|
|
317
|
+
except OSError:
|
|
318
|
+
pass
|
|
319
|
+
|
|
320
|
+
return RawAdapterStats(
|
|
321
|
+
agent=self.name,
|
|
322
|
+
data_dir=str(self._chat_sessions_dir),
|
|
323
|
+
available=True,
|
|
324
|
+
file_count=len(session_files),
|
|
325
|
+
total_bytes=total_bytes,
|
|
326
|
+
)
|
|
@@ -0,0 +1,341 @@
|
|
|
1
|
+
"""Crush (charmbracelet) session adapter."""
|
|
2
|
+
|
|
3
|
+
import orjson
|
|
4
|
+
import sqlite3
|
|
5
|
+
from collections import defaultdict
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from ..config import AGENTS, CRUSH_PROJECTS_FILE
|
|
10
|
+
from ..logging_config import log_parse_error
|
|
11
|
+
from .base import ErrorCallback, ParseError, RawAdapterStats, Session, truncate_title
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CrushAdapter:
|
|
15
|
+
"""Adapter for Crush sessions."""
|
|
16
|
+
|
|
17
|
+
name = "crush"
|
|
18
|
+
color = AGENTS["crush"]["color"]
|
|
19
|
+
badge = AGENTS["crush"]["badge"]
|
|
20
|
+
|
|
21
|
+
def __init__(self, projects_file: Path | None = None) -> None:
|
|
22
|
+
self._projects_file = (
|
|
23
|
+
projects_file if projects_file is not None else CRUSH_PROJECTS_FILE
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
def is_available(self) -> bool:
|
|
27
|
+
"""Check if Crush projects file exists."""
|
|
28
|
+
return self._projects_file.exists()
|
|
29
|
+
|
|
30
|
+
def find_sessions(self) -> list[Session]:
|
|
31
|
+
"""Find all Crush sessions across all projects."""
|
|
32
|
+
if not self.is_available():
|
|
33
|
+
return []
|
|
34
|
+
|
|
35
|
+
sessions = []
|
|
36
|
+
|
|
37
|
+
try:
|
|
38
|
+
with open(self._projects_file, "rb") as f:
|
|
39
|
+
projects_data = orjson.loads(f.read())
|
|
40
|
+
except (orjson.JSONDecodeError, OSError):
|
|
41
|
+
return []
|
|
42
|
+
|
|
43
|
+
for project in projects_data.get("projects", []):
|
|
44
|
+
project_path = project.get("path", "")
|
|
45
|
+
data_dir = project.get("data_dir", "")
|
|
46
|
+
|
|
47
|
+
if not data_dir:
|
|
48
|
+
continue
|
|
49
|
+
|
|
50
|
+
db_path = Path(data_dir) / "crush.db"
|
|
51
|
+
if not db_path.exists():
|
|
52
|
+
continue
|
|
53
|
+
|
|
54
|
+
project_sessions = self._load_sessions_from_db(db_path, project_path)
|
|
55
|
+
sessions.extend(project_sessions)
|
|
56
|
+
|
|
57
|
+
return sessions
|
|
58
|
+
|
|
59
|
+
def _load_sessions_from_db(
|
|
60
|
+
self, db_path: Path, project_path: str, on_error: ErrorCallback = None
|
|
61
|
+
) -> list[Session]:
|
|
62
|
+
"""Load sessions from a Crush SQLite database."""
|
|
63
|
+
sessions = []
|
|
64
|
+
|
|
65
|
+
try:
|
|
66
|
+
conn = sqlite3.connect(str(db_path), timeout=5)
|
|
67
|
+
conn.row_factory = sqlite3.Row
|
|
68
|
+
cursor = conn.cursor()
|
|
69
|
+
|
|
70
|
+
cursor.execute("""
|
|
71
|
+
SELECT
|
|
72
|
+
s.id, s.title, s.message_count, s.updated_at, s.created_at,
|
|
73
|
+
m.role, m.parts, m.created_at as msg_created_at
|
|
74
|
+
FROM sessions s
|
|
75
|
+
LEFT JOIN messages m ON m.session_id = s.id
|
|
76
|
+
WHERE s.message_count > 0
|
|
77
|
+
ORDER BY s.updated_at DESC, m.created_at ASC
|
|
78
|
+
""")
|
|
79
|
+
|
|
80
|
+
# Group messages by session
|
|
81
|
+
session_data: dict[str, dict] = {}
|
|
82
|
+
session_messages: dict[str, list[tuple[str, str]]] = defaultdict(list)
|
|
83
|
+
|
|
84
|
+
for row in cursor.fetchall():
|
|
85
|
+
session_id = row["id"]
|
|
86
|
+
|
|
87
|
+
# Store session metadata (first occurrence)
|
|
88
|
+
if session_id not in session_data:
|
|
89
|
+
session_data[session_id] = {
|
|
90
|
+
"title": row["title"] or "",
|
|
91
|
+
"updated_at": row["updated_at"],
|
|
92
|
+
"created_at": row["created_at"],
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
# Collect messages
|
|
96
|
+
if row["role"] is not None:
|
|
97
|
+
session_messages[session_id].append((row["role"], row["parts"]))
|
|
98
|
+
|
|
99
|
+
conn.close()
|
|
100
|
+
|
|
101
|
+
# Build Session objects
|
|
102
|
+
for session_id, data in session_data.items():
|
|
103
|
+
session = self._build_session(
|
|
104
|
+
session_id,
|
|
105
|
+
data,
|
|
106
|
+
session_messages[session_id],
|
|
107
|
+
project_path,
|
|
108
|
+
on_error=on_error,
|
|
109
|
+
)
|
|
110
|
+
if session:
|
|
111
|
+
sessions.append(session)
|
|
112
|
+
|
|
113
|
+
except sqlite3.Error as e:
|
|
114
|
+
error = ParseError(
|
|
115
|
+
agent=self.name,
|
|
116
|
+
file_path=str(db_path),
|
|
117
|
+
error_type="sqlite3.Error",
|
|
118
|
+
message=str(e),
|
|
119
|
+
)
|
|
120
|
+
log_parse_error(
|
|
121
|
+
error.agent, error.file_path, error.error_type, error.message
|
|
122
|
+
)
|
|
123
|
+
if on_error:
|
|
124
|
+
on_error(error)
|
|
125
|
+
|
|
126
|
+
return sessions
|
|
127
|
+
|
|
128
|
+
def _build_session(
|
|
129
|
+
self,
|
|
130
|
+
session_id: str,
|
|
131
|
+
data: dict,
|
|
132
|
+
messages_raw: list[tuple[str, str]],
|
|
133
|
+
project_path: str,
|
|
134
|
+
on_error: ErrorCallback = None,
|
|
135
|
+
) -> Session | None:
|
|
136
|
+
"""Build a Session object from pre-fetched data."""
|
|
137
|
+
try:
|
|
138
|
+
title = data["title"]
|
|
139
|
+
updated_at = data["updated_at"]
|
|
140
|
+
created_at = data["created_at"]
|
|
141
|
+
|
|
142
|
+
# Detect if timestamp is in milliseconds (> year 3000 in seconds)
|
|
143
|
+
if updated_at > 100_000_000_000:
|
|
144
|
+
updated_at = updated_at / 1000
|
|
145
|
+
if created_at > 100_000_000_000:
|
|
146
|
+
created_at = created_at / 1000
|
|
147
|
+
|
|
148
|
+
timestamp = datetime.fromtimestamp(updated_at)
|
|
149
|
+
|
|
150
|
+
messages: list[str] = []
|
|
151
|
+
first_user_message = ""
|
|
152
|
+
|
|
153
|
+
for role, parts_json in messages_raw:
|
|
154
|
+
text_content = self._extract_text_from_parts(parts_json)
|
|
155
|
+
if not text_content:
|
|
156
|
+
continue
|
|
157
|
+
|
|
158
|
+
role_prefix = "» " if role == "user" else " "
|
|
159
|
+
messages.append(f"{role_prefix}{text_content}")
|
|
160
|
+
|
|
161
|
+
if role == "user" and not first_user_message and len(text_content) > 5:
|
|
162
|
+
first_user_message = text_content
|
|
163
|
+
|
|
164
|
+
# Skip sessions with no actual content
|
|
165
|
+
if not messages or not first_user_message:
|
|
166
|
+
return None
|
|
167
|
+
|
|
168
|
+
# Use first user message as title if none set
|
|
169
|
+
if not title:
|
|
170
|
+
title = truncate_title(first_user_message)
|
|
171
|
+
|
|
172
|
+
full_content = "\n\n".join(messages)
|
|
173
|
+
|
|
174
|
+
return Session(
|
|
175
|
+
id=session_id,
|
|
176
|
+
agent=self.name,
|
|
177
|
+
title=title,
|
|
178
|
+
directory=project_path,
|
|
179
|
+
timestamp=timestamp,
|
|
180
|
+
content=full_content,
|
|
181
|
+
message_count=len(messages),
|
|
182
|
+
)
|
|
183
|
+
except (KeyError, TypeError, AttributeError, ValueError) as e:
|
|
184
|
+
error = ParseError(
|
|
185
|
+
agent=self.name,
|
|
186
|
+
file_path=f"crush_db:{session_id}",
|
|
187
|
+
error_type=type(e).__name__,
|
|
188
|
+
message=str(e),
|
|
189
|
+
)
|
|
190
|
+
log_parse_error(
|
|
191
|
+
error.agent, error.file_path, error.error_type, error.message
|
|
192
|
+
)
|
|
193
|
+
if on_error:
|
|
194
|
+
on_error(error)
|
|
195
|
+
return None
|
|
196
|
+
|
|
197
|
+
def _extract_text_from_parts(self, parts_json: str) -> str:
|
|
198
|
+
"""Extract text content from message parts JSON."""
|
|
199
|
+
try:
|
|
200
|
+
parts = orjson.loads(parts_json)
|
|
201
|
+
except orjson.JSONDecodeError:
|
|
202
|
+
return ""
|
|
203
|
+
|
|
204
|
+
text_parts = []
|
|
205
|
+
for part in parts:
|
|
206
|
+
if not isinstance(part, dict):
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
part_type = part.get("type", "")
|
|
210
|
+
data = part.get("data", {})
|
|
211
|
+
|
|
212
|
+
if part_type == "text" and isinstance(data, dict):
|
|
213
|
+
text = data.get("text", "")
|
|
214
|
+
if text:
|
|
215
|
+
text_parts.append(text)
|
|
216
|
+
elif part_type == "tool_result" and isinstance(data, dict):
|
|
217
|
+
# Include tool results for context
|
|
218
|
+
content = data.get("content", "")
|
|
219
|
+
if content and len(content) < 500: # Skip long tool outputs
|
|
220
|
+
text_parts.append(f"[{data.get('name', 'tool')}]: {content[:200]}")
|
|
221
|
+
elif part_type == "tool_call" and isinstance(data, dict):
|
|
222
|
+
# Include tool calls for context
|
|
223
|
+
name = data.get("name", "")
|
|
224
|
+
if name:
|
|
225
|
+
text_parts.append(f"[calling {name}]")
|
|
226
|
+
|
|
227
|
+
return " ".join(text_parts)
|
|
228
|
+
|
|
229
|
+
def find_sessions_incremental(
|
|
230
|
+
self,
|
|
231
|
+
known: dict[str, tuple[float, str]],
|
|
232
|
+
on_error: ErrorCallback = None,
|
|
233
|
+
) -> tuple[list[Session], list[str]]:
|
|
234
|
+
"""Find sessions incrementally, comparing against known sessions."""
|
|
235
|
+
if not self.is_available():
|
|
236
|
+
deleted_ids = [
|
|
237
|
+
sid for sid, (_, agent) in known.items() if agent == self.name
|
|
238
|
+
]
|
|
239
|
+
return [], deleted_ids
|
|
240
|
+
|
|
241
|
+
try:
|
|
242
|
+
with open(self._projects_file, "rb") as f:
|
|
243
|
+
projects_data = orjson.loads(f.read())
|
|
244
|
+
except (orjson.JSONDecodeError, OSError):
|
|
245
|
+
deleted_ids = [
|
|
246
|
+
sid for sid, (_, agent) in known.items() if agent == self.name
|
|
247
|
+
]
|
|
248
|
+
return [], deleted_ids
|
|
249
|
+
|
|
250
|
+
# For Crush, we track db file mtimes and session IDs within
|
|
251
|
+
# When a db changes, we reload all sessions from it and diff
|
|
252
|
+
new_or_modified = []
|
|
253
|
+
all_current_ids: set[str] = set()
|
|
254
|
+
|
|
255
|
+
for project in projects_data.get("projects", []):
|
|
256
|
+
project_path = project.get("path", "")
|
|
257
|
+
data_dir = project.get("data_dir", "")
|
|
258
|
+
|
|
259
|
+
if not data_dir:
|
|
260
|
+
continue
|
|
261
|
+
|
|
262
|
+
db_path = Path(data_dir) / "crush.db"
|
|
263
|
+
if not db_path.exists():
|
|
264
|
+
continue
|
|
265
|
+
|
|
266
|
+
# Load all sessions from this db
|
|
267
|
+
project_sessions = self._load_sessions_from_db(
|
|
268
|
+
db_path, project_path, on_error=on_error
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
for session in project_sessions:
|
|
272
|
+
all_current_ids.add(session.id)
|
|
273
|
+
known_entry = known.get(session.id)
|
|
274
|
+
# Use session timestamp for comparison since db doesn't have file mtime
|
|
275
|
+
# Use 1ms tolerance for comparison due to datetime precision loss
|
|
276
|
+
session_mtime = session.timestamp.timestamp()
|
|
277
|
+
if known_entry is None or session_mtime > known_entry[0] + 0.001:
|
|
278
|
+
session.mtime = session_mtime
|
|
279
|
+
new_or_modified.append(session)
|
|
280
|
+
|
|
281
|
+
# Find deleted sessions
|
|
282
|
+
deleted_ids = [
|
|
283
|
+
sid
|
|
284
|
+
for sid, (_, agent) in known.items()
|
|
285
|
+
if agent == self.name and sid not in all_current_ids
|
|
286
|
+
]
|
|
287
|
+
|
|
288
|
+
return new_or_modified, deleted_ids
|
|
289
|
+
|
|
290
|
+
def get_resume_command(self, session: Session, yolo: bool = False) -> list[str]:
|
|
291
|
+
"""Get command to resume a Crush session."""
|
|
292
|
+
# Crush is interactive - it shows a session picker when launched in a project directory
|
|
293
|
+
# fast-resume changes to session.directory before executing this command
|
|
294
|
+
return ["crush"]
|
|
295
|
+
|
|
296
|
+
def get_raw_stats(self) -> RawAdapterStats:
|
|
297
|
+
"""Get raw statistics from Crush database files."""
|
|
298
|
+
if not self.is_available():
|
|
299
|
+
return RawAdapterStats(
|
|
300
|
+
agent=self.name,
|
|
301
|
+
data_dir=str(self._projects_file.parent),
|
|
302
|
+
available=False,
|
|
303
|
+
file_count=0,
|
|
304
|
+
total_bytes=0,
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
try:
|
|
308
|
+
with open(self._projects_file, "rb") as f:
|
|
309
|
+
projects_data = orjson.loads(f.read())
|
|
310
|
+
except (orjson.JSONDecodeError, OSError):
|
|
311
|
+
return RawAdapterStats(
|
|
312
|
+
agent=self.name,
|
|
313
|
+
data_dir=str(self._projects_file.parent),
|
|
314
|
+
available=True,
|
|
315
|
+
file_count=0,
|
|
316
|
+
total_bytes=0,
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
file_count = 0
|
|
320
|
+
total_bytes = 0
|
|
321
|
+
|
|
322
|
+
for project in projects_data.get("projects", []):
|
|
323
|
+
data_dir = project.get("data_dir", "")
|
|
324
|
+
if not data_dir:
|
|
325
|
+
continue
|
|
326
|
+
|
|
327
|
+
db_path = Path(data_dir) / "crush.db"
|
|
328
|
+
if db_path.exists():
|
|
329
|
+
try:
|
|
330
|
+
file_count += 1
|
|
331
|
+
total_bytes += db_path.stat().st_size
|
|
332
|
+
except OSError:
|
|
333
|
+
pass
|
|
334
|
+
|
|
335
|
+
return RawAdapterStats(
|
|
336
|
+
agent=self.name,
|
|
337
|
+
data_dir=str(self._projects_file.parent),
|
|
338
|
+
available=True,
|
|
339
|
+
file_count=file_count,
|
|
340
|
+
total_bytes=total_bytes,
|
|
341
|
+
)
|