fast-resume 1.12.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,57 @@
1
+ """Logging configuration for fast-resume."""
2
+
3
+ import logging
4
+ from pathlib import Path
5
+
6
+ from .config import CACHE_DIR, LOG_FILE
7
+
8
+ # Module logger for parse errors
9
+ parse_logger = logging.getLogger("fast_resume.parse_errors")
10
+
11
+
12
+ def setup_logging() -> None:
13
+ """Set up logging with file handler for parse errors.
14
+
15
+ Logs are written to ~/.cache/fast-resume/parse-errors.log
16
+ """
17
+ # Ensure cache directory exists
18
+ CACHE_DIR.mkdir(parents=True, exist_ok=True)
19
+
20
+ # Configure parse error logger
21
+ parse_logger.setLevel(logging.WARNING)
22
+
23
+ # Avoid duplicate handlers if called multiple times
24
+ if not parse_logger.handlers:
25
+ # File handler - append mode, rotates on size
26
+ handler = logging.FileHandler(LOG_FILE, mode="a", encoding="utf-8")
27
+ handler.setLevel(logging.WARNING)
28
+
29
+ # Format: timestamp - level - message
30
+ formatter = logging.Formatter(
31
+ "%(asctime)s - %(levelname)s - %(message)s",
32
+ datefmt="%Y-%m-%d %H:%M:%S",
33
+ )
34
+ handler.setFormatter(formatter)
35
+ parse_logger.addHandler(handler)
36
+
37
+ # Don't propagate to root logger (avoid console output)
38
+ parse_logger.propagate = False
39
+
40
+
41
+ def log_parse_error(
42
+ agent: str, file_path: str | Path, error_type: str, message: str
43
+ ) -> None:
44
+ """Log a parse error to the log file.
45
+
46
+ Args:
47
+ agent: Which adapter encountered the error (e.g., "claude", "codex")
48
+ file_path: Path to the problematic file
49
+ error_type: Exception type name (e.g., "JSONDecodeError")
50
+ message: Human-readable error message
51
+ """
52
+ parse_logger.warning("[%s] %s in %s: %s", agent, error_type, file_path, message)
53
+
54
+
55
+ def get_log_file_path() -> Path:
56
+ """Return the path to the log file."""
57
+ return LOG_FILE
fast_resume/query.py ADDED
@@ -0,0 +1,264 @@
1
+ """Query parser for keyword-based search syntax.
2
+
3
+ Supports syntax like: `agent:claude,codex dir:my-project date:<1d api auth`
4
+
5
+ Keywords:
6
+ - agent: Filter by agent name (supports multiple: agent:claude,codex)
7
+ - dir: Filter by directory (substring match, supports multiple: dir:proj1,proj2)
8
+ - date: Filter by date (today, yesterday, <1h, >1d, etc.)
9
+
10
+ Negation:
11
+ - Use ! prefix on value: agent:!claude (exclude claude)
12
+ - Use - prefix on keyword: -agent:claude (exclude claude)
13
+ """
14
+
15
+ import re
16
+ from dataclasses import dataclass, field
17
+ from datetime import datetime, timedelta
18
+ from enum import Enum
19
+
20
+
21
+ class DateOp(Enum):
22
+ """Date filter comparison operator."""
23
+
24
+ EXACT = "exact" # today, yesterday
25
+ LESS_THAN = "<" # <1h (within the last hour)
26
+ GREATER_THAN = ">" # >1d (older than 1 day)
27
+
28
+
29
+ @dataclass
30
+ class DateFilter:
31
+ """Parsed date filter."""
32
+
33
+ op: DateOp
34
+ value: str # Original value for display
35
+ cutoff: datetime # The cutoff datetime for comparison
36
+ negated: bool = False # True if filter should exclude matches
37
+
38
+
39
+ @dataclass
40
+ class Filter:
41
+ """A filter with multiple possible values and negation support.
42
+
43
+ Supports mixed include/exclude: agent:claude,!codex means
44
+ "match claude but not codex".
45
+ """
46
+
47
+ include: list[str] = field(default_factory=list) # Values to match (OR logic)
48
+ exclude: list[str] = field(default_factory=list) # Values to exclude (AND logic)
49
+
50
+ @property
51
+ def values(self) -> list[str]:
52
+ """All values (for backward compat and display)."""
53
+ return self.include + self.exclude
54
+
55
+ @property
56
+ def negated(self) -> bool:
57
+ """True if filter is exclude-only (for backward compat)."""
58
+ return len(self.include) == 0 and len(self.exclude) > 0
59
+
60
+ def matches(self, value: str, substring: bool = False) -> bool:
61
+ """Check if value matches this filter.
62
+
63
+ Args:
64
+ value: The value to check
65
+ substring: If True, check if any filter value is a substring
66
+
67
+ Returns:
68
+ True if the value matches include list (or no include list)
69
+ AND doesn't match exclude list.
70
+ """
71
+ if not self.include and not self.exclude:
72
+ return True
73
+
74
+ def check(filter_val: str) -> bool:
75
+ if substring:
76
+ return filter_val.lower() in value.lower()
77
+ return value == filter_val
78
+
79
+ # Check excludes first - if any match, reject
80
+ if any(check(v) for v in self.exclude):
81
+ return False
82
+
83
+ # If no includes, accept (exclude-only filter)
84
+ if not self.include:
85
+ return True
86
+
87
+ # Check includes - at least one must match
88
+ return any(check(v) for v in self.include)
89
+
90
+
91
+ @dataclass
92
+ class ParsedQuery:
93
+ """Result of parsing a search query."""
94
+
95
+ text: str # Free-text search terms
96
+ agent: Filter | None # Extracted agent filter
97
+ directory: Filter | None # Extracted directory filter
98
+ date: DateFilter | None # Extracted date filter
99
+
100
+
101
+ # Pattern to match keyword:value pairs with optional - prefix for negation
102
+ # Handles: agent:value, -agent:value, dir:"value with spaces", date:<1h
103
+ _KEYWORD_PATTERN = re.compile(
104
+ r"(-?)" # optional negation prefix
105
+ r"(agent|dir|date):" # keyword prefix
106
+ r'(?:"([^"]+)"|(\S+))' # quoted value or unquoted value
107
+ )
108
+
109
+ # Pattern to parse relative time like <1h, >2d, <30m
110
+ _RELATIVE_TIME_PATTERN = re.compile(
111
+ r"^([<>])?(\d+)(m|h|d|w|mo|y)$" # operator, number, unit
112
+ )
113
+
114
+ # Time unit multipliers (in seconds)
115
+ _TIME_UNITS = {
116
+ "m": 60, # minutes
117
+ "h": 3600, # hours
118
+ "d": 86400, # days
119
+ "w": 604800, # weeks
120
+ "mo": 2592000, # months (30 days)
121
+ "y": 31536000, # years (365 days)
122
+ }
123
+
124
+
125
+ def _parse_date_value(value: str, negated: bool = False) -> DateFilter | None:
126
+ """Parse a date filter value into a DateFilter.
127
+
128
+ Supports:
129
+ - today: sessions from today
130
+ - yesterday: sessions from yesterday
131
+ - <Nu: sessions newer than N units (e.g., <1h, <2d)
132
+ - >Nu: sessions older than N units (e.g., >1h, >2d)
133
+ - Nu: same as <Nu (default to "within")
134
+ """
135
+ now = datetime.now()
136
+
137
+ # Handle ! prefix for negation in value
138
+ if value.startswith("!"):
139
+ value = value[1:]
140
+ negated = True
141
+
142
+ value_lower = value.lower()
143
+
144
+ # Handle named dates
145
+ if value_lower == "today":
146
+ cutoff = now.replace(hour=0, minute=0, second=0, microsecond=0)
147
+ return DateFilter(op=DateOp.EXACT, value=value, cutoff=cutoff, negated=negated)
148
+ elif value_lower == "yesterday":
149
+ cutoff = (now - timedelta(days=1)).replace(
150
+ hour=0, minute=0, second=0, microsecond=0
151
+ )
152
+ return DateFilter(op=DateOp.EXACT, value=value, cutoff=cutoff, negated=negated)
153
+ elif value_lower == "week":
154
+ cutoff = now - timedelta(days=7)
155
+ return DateFilter(
156
+ op=DateOp.LESS_THAN, value=value, cutoff=cutoff, negated=negated
157
+ )
158
+ elif value_lower == "month":
159
+ cutoff = now - timedelta(days=30)
160
+ return DateFilter(
161
+ op=DateOp.LESS_THAN, value=value, cutoff=cutoff, negated=negated
162
+ )
163
+
164
+ # Handle relative time patterns
165
+ match = _RELATIVE_TIME_PATTERN.match(value_lower)
166
+ if match:
167
+ op_str, num_str, unit = match.groups()
168
+ num = int(num_str)
169
+ seconds = num * _TIME_UNITS[unit]
170
+ cutoff = now - timedelta(seconds=seconds)
171
+
172
+ if op_str == ">":
173
+ return DateFilter(
174
+ op=DateOp.GREATER_THAN, value=value, cutoff=cutoff, negated=negated
175
+ )
176
+ else: # < or no operator defaults to "within"
177
+ return DateFilter(
178
+ op=DateOp.LESS_THAN, value=value, cutoff=cutoff, negated=negated
179
+ )
180
+
181
+ return None
182
+
183
+
184
+ def _parse_filter_value(value: str, negated: bool) -> Filter:
185
+ """Parse a filter value, handling ! prefix and comma-separated values.
186
+
187
+ Args:
188
+ value: The filter value (e.g., "claude", "claude,codex", "claude,!codex")
189
+ negated: True if the keyword had - prefix (e.g., -agent:claude)
190
+
191
+ Returns:
192
+ Filter with values sorted into include/exclude lists.
193
+ """
194
+ include: list[str] = []
195
+ exclude: list[str] = []
196
+
197
+ # Split comma-separated values
198
+ raw_values = [v.strip() for v in value.split(",") if v.strip()]
199
+
200
+ for val in raw_values:
201
+ # Check for ! prefix on individual value
202
+ if val.startswith("!"):
203
+ exclude.append(val[1:])
204
+ elif negated:
205
+ # Whole filter negated with - prefix
206
+ exclude.append(val)
207
+ else:
208
+ include.append(val)
209
+
210
+ return Filter(include=include, exclude=exclude)
211
+
212
+
213
+ def parse_query(query: str) -> ParsedQuery:
214
+ """Parse keyword syntax from query string.
215
+
216
+ Args:
217
+ query: Raw query string like "agent:claude,codex dir:my-project date:<1d api"
218
+
219
+ Returns:
220
+ ParsedQuery with extracted filters and remaining free-text.
221
+
222
+ Examples:
223
+ >>> parse_query("agent:claude api auth")
224
+ ParsedQuery(text="api auth", agent=Filter(include=["claude"]), ...)
225
+
226
+ >>> parse_query("agent:claude,codex api") # Multiple values (OR)
227
+ ParsedQuery(text="api", agent=Filter(include=["claude", "codex"]), ...)
228
+
229
+ >>> parse_query("-agent:claude api") # Negation (exclude all)
230
+ ParsedQuery(text="api", agent=Filter(exclude=["claude"]), ...)
231
+
232
+ >>> parse_query("agent:!claude api") # Negation (exclude specific)
233
+ ParsedQuery(text="api", agent=Filter(exclude=["claude"]), ...)
234
+
235
+ >>> parse_query("agent:claude,!codex api") # Mixed include/exclude
236
+ ParsedQuery(text="api", agent=Filter(include=["claude"], exclude=["codex"]), ...)
237
+ """
238
+ agent: Filter | None = None
239
+ directory: Filter | None = None
240
+ date: DateFilter | None = None
241
+
242
+ # Find all keyword matches and track their positions
243
+ matches = list(_KEYWORD_PATTERN.finditer(query))
244
+
245
+ # Extract keyword values (last one wins if duplicates)
246
+ for match in matches:
247
+ neg_prefix = match.group(1) == "-"
248
+ keyword = match.group(2)
249
+ # Value is either quoted (group 3) or unquoted (group 4)
250
+ value = match.group(3) or match.group(4)
251
+
252
+ if keyword == "agent":
253
+ agent = _parse_filter_value(value, neg_prefix)
254
+ elif keyword == "dir":
255
+ directory = _parse_filter_value(value, neg_prefix)
256
+ elif keyword == "date":
257
+ date = _parse_date_value(value, neg_prefix)
258
+
259
+ # Remove keyword:value pairs from query to get free-text
260
+ text = _KEYWORD_PATTERN.sub("", query)
261
+ # Clean up extra whitespace
262
+ text = " ".join(text.split())
263
+
264
+ return ParsedQuery(text=text, agent=agent, directory=directory, date=date)
fast_resume/search.py ADDED
@@ -0,0 +1,281 @@
1
+ """Search engine for aggregating and searching sessions."""
2
+
3
+ from concurrent.futures import ThreadPoolExecutor, as_completed
4
+ from typing import Callable
5
+
6
+ from .adapters import (
7
+ ClaudeAdapter,
8
+ CodexAdapter,
9
+ CopilotAdapter,
10
+ CopilotVSCodeAdapter,
11
+ CrushAdapter,
12
+ ErrorCallback,
13
+ OpenCodeAdapter,
14
+ Session,
15
+ VibeAdapter,
16
+ )
17
+ from .index import TantivyIndex
18
+ from .query import Filter, parse_query
19
+
20
+
21
+ class SessionSearch:
22
+ """Aggregates sessions from all adapters and provides search.
23
+
24
+ Uses Tantivy as the single source of truth for session data.
25
+ """
26
+
27
+ def __init__(self) -> None:
28
+ self.adapters = [
29
+ ClaudeAdapter(),
30
+ CodexAdapter(),
31
+ CopilotAdapter(),
32
+ CopilotVSCodeAdapter(),
33
+ CrushAdapter(),
34
+ OpenCodeAdapter(),
35
+ VibeAdapter(),
36
+ ]
37
+ self._sessions: list[Session] | None = None
38
+ self._sessions_by_id: dict[str, Session] = {}
39
+ self._streaming_in_progress: bool = False
40
+ self._index = TantivyIndex()
41
+
42
+ def _load_from_index(self) -> list[Session] | None:
43
+ """Try to load sessions from index if no changes detected (fast path for TUI)."""
44
+ # Get known sessions from Tantivy
45
+ known = self._index.get_known_sessions()
46
+ if not known:
47
+ return None
48
+
49
+ # Check if any adapter has changes
50
+ for adapter in self.adapters:
51
+ new_or_modified, deleted_ids = adapter.find_sessions_incremental(known)
52
+ if new_or_modified or deleted_ids:
53
+ # Changes detected - need full update
54
+ return None
55
+
56
+ # No changes - load from index
57
+ sessions = self._index.get_all_sessions()
58
+ if not sessions:
59
+ return None
60
+
61
+ # Populate sessions_by_id
62
+ for session in sessions:
63
+ self._sessions_by_id[session.id] = session
64
+
65
+ return sessions
66
+
67
+ def get_all_sessions(self, force_refresh: bool = False) -> list[Session]:
68
+ """Get all sessions from all adapters with incremental updates."""
69
+ if self._sessions is not None and not force_refresh:
70
+ return self._sessions
71
+
72
+ # If streaming is in progress, return current partial results
73
+ if self._streaming_in_progress:
74
+ return self._sessions if self._sessions is not None else []
75
+
76
+ # Get known sessions from Tantivy for incremental comparison
77
+ known = self._index.get_known_sessions() if not force_refresh else {}
78
+
79
+ # Ask each adapter for changes
80
+ all_new_or_modified: list[Session] = []
81
+ all_deleted_ids: list[str] = []
82
+
83
+ def get_incremental(adapter):
84
+ return adapter.find_sessions_incremental(known)
85
+
86
+ with ThreadPoolExecutor(max_workers=len(self.adapters)) as executor:
87
+ results = executor.map(get_incremental, self.adapters)
88
+ for new_or_modified, deleted_ids in results:
89
+ all_new_or_modified.extend(new_or_modified)
90
+ all_deleted_ids.extend(deleted_ids)
91
+
92
+ # If no changes and we have data in index, load from index
93
+ if not all_new_or_modified and not all_deleted_ids and known:
94
+ self._sessions = self._index.get_all_sessions()
95
+ for session in self._sessions:
96
+ self._sessions_by_id[session.id] = session
97
+ self._sessions.sort(key=lambda s: s.timestamp, reverse=True)
98
+ return self._sessions
99
+
100
+ # Apply deletions to index
101
+ self._index.delete_sessions(all_deleted_ids)
102
+
103
+ # Update modified sessions atomically (delete + add in single transaction)
104
+ self._index.update_sessions(all_new_or_modified)
105
+
106
+ # Load all sessions from index
107
+ self._sessions = self._index.get_all_sessions()
108
+ for session in self._sessions:
109
+ self._sessions_by_id[session.id] = session
110
+
111
+ # Sort by timestamp, newest first
112
+ self._sessions.sort(key=lambda s: s.timestamp, reverse=True)
113
+
114
+ return self._sessions
115
+
116
+ def get_sessions_streaming(
117
+ self,
118
+ on_progress: Callable[[], None],
119
+ on_error: ErrorCallback = None,
120
+ ) -> tuple[list[Session], int, int, int]:
121
+ """Load sessions with progress callback for each adapter that completes.
122
+
123
+ Sessions are indexed incrementally as each adapter completes, allowing
124
+ Tantivy search to work during streaming.
125
+
126
+ Args:
127
+ on_progress: Callback for progress updates
128
+ on_error: Optional callback for parse errors
129
+
130
+ Returns:
131
+ Tuple of (sessions, new_count, updated_count, deleted_count)
132
+ """
133
+ # Get known sessions from Tantivy
134
+ known = self._index.get_known_sessions()
135
+
136
+ # Pre-populate _sessions_by_id with existing sessions from index
137
+ # so search() can find them during streaming
138
+ existing_sessions = self._index.get_all_sessions()
139
+ for session in existing_sessions:
140
+ self._sessions_by_id[session.id] = session
141
+
142
+ # Mark streaming as in progress
143
+ self._streaming_in_progress = True
144
+ total_new = 0
145
+ total_updated = 0
146
+ total_deleted = 0
147
+
148
+ def get_incremental(adapter):
149
+ return adapter.find_sessions_incremental(known, on_error=on_error)
150
+
151
+ try:
152
+ with ThreadPoolExecutor(max_workers=len(self.adapters)) as executor:
153
+ futures = {
154
+ executor.submit(get_incremental, a): a for a in self.adapters
155
+ }
156
+ for future in as_completed(futures):
157
+ new_or_modified, deleted_ids = future.result()
158
+
159
+ # Skip if no changes from this adapter
160
+ if not new_or_modified and not deleted_ids:
161
+ continue
162
+
163
+ # Handle deletions first
164
+ if deleted_ids:
165
+ self._index.delete_sessions(deleted_ids)
166
+ for sid in deleted_ids:
167
+ self._sessions_by_id.pop(sid, None)
168
+ total_deleted += len(deleted_ids)
169
+
170
+ # Index incrementally + update _sessions_by_id for search lookup
171
+ if new_or_modified:
172
+ # Update atomically (delete + add in single transaction)
173
+ self._index.update_sessions(new_or_modified)
174
+ for session in new_or_modified:
175
+ self._sessions_by_id[session.id] = session
176
+ # Count new vs updated
177
+ if session.id in known:
178
+ total_updated += 1
179
+ else:
180
+ total_new += 1
181
+
182
+ # Notify progress - TUI will query the index
183
+ on_progress()
184
+ finally:
185
+ self._streaming_in_progress = False
186
+
187
+ # Load final state from index
188
+ self._sessions = self._index.get_all_sessions()
189
+ for session in self._sessions:
190
+ self._sessions_by_id[session.id] = session
191
+ self._sessions.sort(key=lambda s: s.timestamp, reverse=True)
192
+
193
+ return self._sessions, total_new, total_updated, total_deleted
194
+
195
+ def search(
196
+ self,
197
+ query: str,
198
+ agent_filter: str | None = None,
199
+ directory_filter: str | None = None,
200
+ limit: int = 100,
201
+ ) -> list[Session]:
202
+ """Search sessions using Tantivy full-text search with fuzzy matching.
203
+
204
+ Supports keyword syntax in the query:
205
+ - agent:value,value2 - Filter by agent (comma for OR, ! or - for NOT)
206
+ - dir:value - Filter by directory (substring match)
207
+ - date:value - Filter by date (today, yesterday, <1h, >1d, etc.)
208
+
209
+ Explicit filter parameters take precedence over keywords in the query.
210
+ All filtering is done at the Tantivy level for efficiency.
211
+ """
212
+ # Parse keyword syntax from query
213
+ parsed = parse_query(query)
214
+ search_text = parsed.text
215
+
216
+ # Merge filters: explicit params take precedence over parsed keywords
217
+ # Convert string params to Filter objects for consistency
218
+ if agent_filter is not None:
219
+ effective_agent: Filter | None = Filter(include=[agent_filter])
220
+ else:
221
+ effective_agent = parsed.agent
222
+
223
+ if directory_filter is not None:
224
+ effective_dir: Filter | None = Filter(include=[directory_filter])
225
+ else:
226
+ effective_dir = parsed.directory
227
+
228
+ date_filter = parsed.date
229
+
230
+ # During streaming, _sessions_by_id is updated incrementally
231
+ # Only call get_all_sessions() if not streaming and no sessions loaded yet
232
+ if not self._streaming_in_progress and self._sessions is None:
233
+ self.get_all_sessions()
234
+
235
+ # Use Tantivy for all searching and filtering
236
+ results = self._index.search(
237
+ search_text,
238
+ agent_filter=effective_agent,
239
+ directory_filter=effective_dir,
240
+ date_filter=date_filter,
241
+ limit=limit,
242
+ )
243
+
244
+ # Lookup full session objects from results
245
+ matched_sessions = []
246
+ for session_id, _score in results:
247
+ session = self._sessions_by_id.get(session_id)
248
+ if session:
249
+ matched_sessions.append(session)
250
+
251
+ return matched_sessions
252
+
253
+ def get_session_count(self, agent_filter: str | None = None) -> int:
254
+ """Get the total number of sessions in the index.
255
+
256
+ Args:
257
+ agent_filter: If provided, only count sessions for this agent.
258
+ """
259
+ return self._index.get_session_count(agent_filter)
260
+
261
+ def get_agents_with_sessions(self) -> set[str]:
262
+ """Get the set of agent names that have at least one session."""
263
+ agents = set()
264
+ for adapter in self.adapters:
265
+ if self._index.get_session_count(adapter.name) > 0:
266
+ agents.add(adapter.name)
267
+ return agents
268
+
269
+ def get_adapter_for_session(self, session: Session):
270
+ """Get the adapter for a session."""
271
+ for adapter in self.adapters:
272
+ if adapter.name == session.agent:
273
+ return adapter
274
+ return None
275
+
276
+ def get_resume_command(self, session: Session, yolo: bool = False) -> list[str]:
277
+ """Get the resume command for a session."""
278
+ adapter = self.get_adapter_for_session(session)
279
+ if adapter:
280
+ return adapter.get_resume_command(session, yolo=yolo)
281
+ return []
@@ -0,0 +1,58 @@
1
+ """TUI package for fast-resume."""
2
+
3
+ from .. import __version__
4
+ from .app import FastResumeApp
5
+ from .search_input import KeywordSuggester
6
+ from .modal import YoloModeModal
7
+ from .preview import SessionPreview
8
+ from .utils import (
9
+ ASSETS_DIR,
10
+ _icon_cache,
11
+ copy_to_clipboard,
12
+ format_directory,
13
+ format_time_ago,
14
+ get_age_color,
15
+ get_agent_icon,
16
+ highlight_matches,
17
+ )
18
+
19
+
20
+ def run_tui(
21
+ query: str = "",
22
+ agent_filter: str | None = None,
23
+ yolo: bool = False,
24
+ no_version_check: bool = False,
25
+ ) -> tuple[list[str] | None, str | None]:
26
+ """Run the TUI and return the resume command and directory if selected."""
27
+ app = FastResumeApp(
28
+ initial_query=query,
29
+ agent_filter=agent_filter,
30
+ yolo=yolo,
31
+ no_version_check=no_version_check,
32
+ )
33
+ app.run()
34
+
35
+ if not no_version_check and app._available_update:
36
+ print(
37
+ f"\nUpdate available: {__version__} → {app._available_update}\n"
38
+ f"Run: uv tool upgrade fast-resume"
39
+ )
40
+
41
+ return app.get_resume_command(), app.get_resume_directory()
42
+
43
+
44
+ __all__ = [
45
+ "run_tui",
46
+ "FastResumeApp",
47
+ "KeywordSuggester",
48
+ "YoloModeModal",
49
+ "SessionPreview",
50
+ "ASSETS_DIR",
51
+ "copy_to_clipboard",
52
+ "format_directory",
53
+ "format_time_ago",
54
+ "get_age_color",
55
+ "get_agent_icon",
56
+ "highlight_matches",
57
+ "_icon_cache",
58
+ ]