onetool-mcp 1.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. bench/__init__.py +5 -0
  2. bench/cli.py +69 -0
  3. bench/harness/__init__.py +66 -0
  4. bench/harness/client.py +692 -0
  5. bench/harness/config.py +397 -0
  6. bench/harness/csv_writer.py +109 -0
  7. bench/harness/evaluate.py +512 -0
  8. bench/harness/metrics.py +283 -0
  9. bench/harness/runner.py +899 -0
  10. bench/py.typed +0 -0
  11. bench/reporter.py +629 -0
  12. bench/run.py +487 -0
  13. bench/secrets.py +101 -0
  14. bench/utils.py +16 -0
  15. onetool/__init__.py +4 -0
  16. onetool/cli.py +391 -0
  17. onetool/py.typed +0 -0
  18. onetool_mcp-1.0.0b1.dist-info/METADATA +163 -0
  19. onetool_mcp-1.0.0b1.dist-info/RECORD +132 -0
  20. onetool_mcp-1.0.0b1.dist-info/WHEEL +4 -0
  21. onetool_mcp-1.0.0b1.dist-info/entry_points.txt +3 -0
  22. onetool_mcp-1.0.0b1.dist-info/licenses/LICENSE.txt +687 -0
  23. onetool_mcp-1.0.0b1.dist-info/licenses/NOTICE.txt +64 -0
  24. ot/__init__.py +37 -0
  25. ot/__main__.py +6 -0
  26. ot/_cli.py +107 -0
  27. ot/_tui.py +53 -0
  28. ot/config/__init__.py +46 -0
  29. ot/config/defaults/bench.yaml +4 -0
  30. ot/config/defaults/diagram-templates/api-flow.mmd +33 -0
  31. ot/config/defaults/diagram-templates/c4-context.puml +30 -0
  32. ot/config/defaults/diagram-templates/class-diagram.mmd +87 -0
  33. ot/config/defaults/diagram-templates/feature-mindmap.mmd +70 -0
  34. ot/config/defaults/diagram-templates/microservices.d2 +81 -0
  35. ot/config/defaults/diagram-templates/project-gantt.mmd +37 -0
  36. ot/config/defaults/diagram-templates/state-machine.mmd +42 -0
  37. ot/config/defaults/onetool.yaml +25 -0
  38. ot/config/defaults/prompts.yaml +97 -0
  39. ot/config/defaults/servers.yaml +7 -0
  40. ot/config/defaults/snippets.yaml +4 -0
  41. ot/config/defaults/tool_templates/__init__.py +7 -0
  42. ot/config/defaults/tool_templates/extension.py +52 -0
  43. ot/config/defaults/tool_templates/isolated.py +61 -0
  44. ot/config/dynamic.py +121 -0
  45. ot/config/global_templates/__init__.py +2 -0
  46. ot/config/global_templates/bench-secrets-template.yaml +6 -0
  47. ot/config/global_templates/bench.yaml +9 -0
  48. ot/config/global_templates/onetool.yaml +27 -0
  49. ot/config/global_templates/secrets-template.yaml +44 -0
  50. ot/config/global_templates/servers.yaml +18 -0
  51. ot/config/global_templates/snippets.yaml +235 -0
  52. ot/config/loader.py +1087 -0
  53. ot/config/mcp.py +145 -0
  54. ot/config/secrets.py +190 -0
  55. ot/config/tool_config.py +125 -0
  56. ot/decorators.py +116 -0
  57. ot/executor/__init__.py +35 -0
  58. ot/executor/base.py +16 -0
  59. ot/executor/fence_processor.py +83 -0
  60. ot/executor/linter.py +142 -0
  61. ot/executor/pack_proxy.py +260 -0
  62. ot/executor/param_resolver.py +140 -0
  63. ot/executor/pep723.py +288 -0
  64. ot/executor/result_store.py +369 -0
  65. ot/executor/runner.py +496 -0
  66. ot/executor/simple.py +163 -0
  67. ot/executor/tool_loader.py +396 -0
  68. ot/executor/validator.py +398 -0
  69. ot/executor/worker_pool.py +388 -0
  70. ot/executor/worker_proxy.py +189 -0
  71. ot/http_client.py +145 -0
  72. ot/logging/__init__.py +37 -0
  73. ot/logging/config.py +315 -0
  74. ot/logging/entry.py +213 -0
  75. ot/logging/format.py +188 -0
  76. ot/logging/span.py +349 -0
  77. ot/meta.py +1555 -0
  78. ot/paths.py +453 -0
  79. ot/prompts.py +218 -0
  80. ot/proxy/__init__.py +21 -0
  81. ot/proxy/manager.py +396 -0
  82. ot/py.typed +0 -0
  83. ot/registry/__init__.py +189 -0
  84. ot/registry/models.py +57 -0
  85. ot/registry/parser.py +269 -0
  86. ot/registry/registry.py +413 -0
  87. ot/server.py +315 -0
  88. ot/shortcuts/__init__.py +15 -0
  89. ot/shortcuts/aliases.py +87 -0
  90. ot/shortcuts/snippets.py +258 -0
  91. ot/stats/__init__.py +35 -0
  92. ot/stats/html.py +250 -0
  93. ot/stats/jsonl_writer.py +283 -0
  94. ot/stats/reader.py +354 -0
  95. ot/stats/timing.py +57 -0
  96. ot/support.py +63 -0
  97. ot/tools.py +114 -0
  98. ot/utils/__init__.py +81 -0
  99. ot/utils/batch.py +161 -0
  100. ot/utils/cache.py +120 -0
  101. ot/utils/deps.py +403 -0
  102. ot/utils/exceptions.py +23 -0
  103. ot/utils/factory.py +179 -0
  104. ot/utils/format.py +65 -0
  105. ot/utils/http.py +202 -0
  106. ot/utils/platform.py +45 -0
  107. ot/utils/sanitize.py +130 -0
  108. ot/utils/truncate.py +69 -0
  109. ot_tools/__init__.py +4 -0
  110. ot_tools/_convert/__init__.py +12 -0
  111. ot_tools/_convert/excel.py +279 -0
  112. ot_tools/_convert/pdf.py +254 -0
  113. ot_tools/_convert/powerpoint.py +268 -0
  114. ot_tools/_convert/utils.py +358 -0
  115. ot_tools/_convert/word.py +283 -0
  116. ot_tools/brave_search.py +604 -0
  117. ot_tools/code_search.py +736 -0
  118. ot_tools/context7.py +495 -0
  119. ot_tools/convert.py +614 -0
  120. ot_tools/db.py +415 -0
  121. ot_tools/diagram.py +1604 -0
  122. ot_tools/diagram.yaml +167 -0
  123. ot_tools/excel.py +1372 -0
  124. ot_tools/file.py +1348 -0
  125. ot_tools/firecrawl.py +732 -0
  126. ot_tools/grounding_search.py +646 -0
  127. ot_tools/package.py +604 -0
  128. ot_tools/py.typed +0 -0
  129. ot_tools/ripgrep.py +544 -0
  130. ot_tools/scaffold.py +471 -0
  131. ot_tools/transform.py +213 -0
  132. ot_tools/web_fetch.py +384 -0
ot/executor/pep723.py ADDED
@@ -0,0 +1,288 @@
1
+ """PEP 723 inline script metadata detection and parsing.
2
+
3
+ PEP 723 defines inline script metadata for Python scripts, allowing them
4
+ to declare dependencies and Python version requirements.
5
+
6
+ Example:
7
+ # /// script
8
+ # requires-python = ">=3.11"
9
+ # dependencies = [
10
+ # "httpx>=0.27.0",
11
+ # "trafilatura>=2.0.0",
12
+ # ]
13
+ # ///
14
+
15
+ This module detects such headers and extracts tool functions for worker routing.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import ast
21
+ import re
22
+ import tomllib
23
+ from dataclasses import dataclass, field
24
+ from pathlib import Path
25
+
26
+ # Regex to match PEP 723 script block
27
+ # Matches: # /// script ... # ///
28
+ PEP723_PATTERN = re.compile(
29
+ r"^# /// script\s*$"
30
+ r"(.*?)"
31
+ r"^# ///$",
32
+ re.MULTILINE | re.DOTALL,
33
+ )
34
+
35
+
36
+ @dataclass
37
+ class ScriptMetadata:
38
+ """Parsed PEP 723 script metadata."""
39
+
40
+ requires_python: str | None = None
41
+ dependencies: list[str] = field(default_factory=list)
42
+ raw_content: str = ""
43
+
44
+ @property
45
+ def has_dependencies(self) -> bool:
46
+ """Check if script declares any dependencies."""
47
+ return bool(self.dependencies)
48
+
49
+
50
+ @dataclass
51
+ class ToolFileInfo:
52
+ """Information about a tool file.
53
+
54
+ Attributes:
55
+ path: Path to the tool file.
56
+ pack: Pack name (e.g., "brave" for brave.search).
57
+ functions: List of public function names.
58
+ is_worker: True if tool uses worker subprocess (PEP 723 with deps).
59
+ is_internal: True if tool is bundled with OneTool (from ot_tools package).
60
+ metadata: Parsed PEP 723 metadata if present.
61
+ config_class_source: Source code of Config class if present.
62
+ """
63
+
64
+ path: Path
65
+ pack: str | None = None
66
+ functions: list[str] = field(default_factory=list)
67
+ is_worker: bool = False
68
+ is_internal: bool = False
69
+ metadata: ScriptMetadata | None = None
70
+ config_class_source: str | None = None
71
+
72
+
73
+ def parse_pep723_metadata(content: str) -> ScriptMetadata | None:
74
+ """Parse PEP 723 inline script metadata from file content.
75
+
76
+ Args:
77
+ content: File content to parse
78
+
79
+ Returns:
80
+ ScriptMetadata if found, None otherwise
81
+ """
82
+ match = PEP723_PATTERN.search(content)
83
+ if not match:
84
+ return None
85
+
86
+ raw_content = match.group(1).strip()
87
+
88
+ # Strip "# " prefix from each line to get valid TOML
89
+ toml_lines = [
90
+ line[2:] if line.startswith("# ") else line.lstrip("#")
91
+ for line in raw_content.split("\n")
92
+ ]
93
+ toml_content = "\n".join(toml_lines)
94
+
95
+ try:
96
+ data = tomllib.loads(toml_content)
97
+ except tomllib.TOMLDecodeError:
98
+ return None
99
+
100
+ return ScriptMetadata(
101
+ requires_python=data.get("requires-python"),
102
+ dependencies=data.get("dependencies", []),
103
+ raw_content=raw_content,
104
+ )
105
+
106
+
107
+ def has_pep723_header(path: Path) -> bool:
108
+ """Check if a file has a PEP 723 script header.
109
+
110
+ Args:
111
+ path: Path to Python file
112
+
113
+ Returns:
114
+ True if file has PEP 723 header
115
+ """
116
+ try:
117
+ content = path.read_text()
118
+ return PEP723_PATTERN.search(content) is not None
119
+ except OSError:
120
+ return False
121
+
122
+
123
+ def _extract_functions_from_ast(tree: ast.Module) -> list[str]:
124
+ """Extract public function names from a parsed AST.
125
+
126
+ Args:
127
+ tree: Parsed AST module
128
+
129
+ Returns:
130
+ List of public function names
131
+ """
132
+ functions: list[str] = []
133
+
134
+ # Check for __all__ definition
135
+ all_names: list[str] | None = None
136
+ for node in ast.walk(tree):
137
+ if isinstance(node, ast.Assign):
138
+ for target in node.targets:
139
+ if (
140
+ isinstance(target, ast.Name)
141
+ and target.id == "__all__"
142
+ and isinstance(node.value, ast.List)
143
+ ):
144
+ all_names = []
145
+ for elt in node.value.elts:
146
+ if isinstance(elt, ast.Constant) and isinstance(elt.value, str):
147
+ all_names.append(elt.value)
148
+
149
+ # Extract function definitions
150
+ for node in tree.body:
151
+ if isinstance(node, ast.FunctionDef):
152
+ name = node.name
153
+ # Skip private functions
154
+ if name.startswith("_"):
155
+ continue
156
+ # If __all__ is defined, only include those
157
+ if all_names is not None and name not in all_names:
158
+ continue
159
+ functions.append(name)
160
+
161
+ return functions
162
+
163
+
164
+ def _extract_pack_from_ast(tree: ast.Module) -> str | None:
165
+ """Extract the pack declaration from a parsed AST.
166
+
167
+ Looks for: pack = "name" at the top of the file.
168
+
169
+ Args:
170
+ tree: Parsed AST module
171
+
172
+ Returns:
173
+ Pack string, or None if not declared
174
+ """
175
+ for node in tree.body:
176
+ if isinstance(node, ast.Assign):
177
+ for target in node.targets:
178
+ if (
179
+ isinstance(target, ast.Name)
180
+ and target.id == "pack"
181
+ and isinstance(node.value, ast.Constant)
182
+ and isinstance(node.value.value, str)
183
+ ):
184
+ return node.value.value
185
+ return None
186
+
187
+
188
+ def _extract_config_from_ast(tree: ast.Module, content: str) -> str | None:
189
+ """Extract the Config class source from a parsed AST.
190
+
191
+ Looks for: class Config(BaseModel): in the module body.
192
+ The class must inherit from BaseModel (pydantic).
193
+
194
+ Args:
195
+ tree: Parsed AST module
196
+ content: Original file content (needed for source extraction)
197
+
198
+ Returns:
199
+ Config class source code as string, or None if not found
200
+ """
201
+ for node in tree.body:
202
+ if isinstance(node, ast.ClassDef) and node.name == "Config":
203
+ # Verify it inherits from BaseModel
204
+ for base in node.bases:
205
+ base_name = None
206
+ if isinstance(base, ast.Name):
207
+ base_name = base.id
208
+ elif isinstance(base, ast.Attribute):
209
+ base_name = base.attr
210
+
211
+ if base_name == "BaseModel":
212
+ # Extract source code using line numbers
213
+ lines = content.split("\n")
214
+ start_line = node.lineno - 1 # 0-indexed
215
+ end_line = node.end_lineno or node.lineno
216
+ config_source = "\n".join(lines[start_line:end_line])
217
+ return config_source
218
+
219
+ return None
220
+
221
+
222
+ def analyze_tool_file(path: Path) -> ToolFileInfo:
223
+ """Analyze a tool file for metadata, pack, functions, and config.
224
+
225
+ Reads the file once and extracts all information in a single pass.
226
+
227
+ Args:
228
+ path: Path to Python file
229
+
230
+ Returns:
231
+ ToolFileInfo with all extracted information
232
+ """
233
+ info = ToolFileInfo(path=path)
234
+
235
+ try:
236
+ content = path.read_text()
237
+ except OSError:
238
+ return info
239
+
240
+ # Check for PEP 723 metadata
241
+ info.metadata = parse_pep723_metadata(content)
242
+ info.is_worker = info.metadata is not None and info.metadata.has_dependencies
243
+
244
+ # Parse AST once for all extractions
245
+ try:
246
+ tree = ast.parse(content)
247
+ except SyntaxError:
248
+ return info
249
+
250
+ # Extract pack, functions, and config class from pre-parsed AST
251
+ info.pack = _extract_pack_from_ast(tree)
252
+ info.functions = _extract_functions_from_ast(tree)
253
+ info.config_class_source = _extract_config_from_ast(tree, content)
254
+
255
+ return info
256
+
257
+
258
+ def categorize_tools(
259
+ tool_files: list[Path],
260
+ internal_paths: set[Path] | None = None,
261
+ ) -> tuple[list[ToolFileInfo], list[ToolFileInfo]]:
262
+ """Categorize tool files into extension tools and internal tools.
263
+
264
+ Internal tools (bundled with OneTool) run in-process.
265
+ Extension tools (user-created with PEP 723) run in worker subprocesses.
266
+
267
+ Args:
268
+ tool_files: List of tool file paths.
269
+ internal_paths: Set of paths that are internal tools (from ot_tools package).
270
+ If provided, tools in this set are marked as is_internal=True.
271
+
272
+ Returns:
273
+ Tuple of (worker_tools, inprocess_tools)
274
+ """
275
+ worker_tools: list[ToolFileInfo] = []
276
+ inprocess_tools: list[ToolFileInfo] = []
277
+ internal_paths = internal_paths or set()
278
+
279
+ for path in tool_files:
280
+ info = analyze_tool_file(path)
281
+ # Mark internal tools (bundled with OneTool)
282
+ info.is_internal = path.resolve() in internal_paths
283
+ if info.is_worker:
284
+ worker_tools.append(info)
285
+ else:
286
+ inprocess_tools.append(info)
287
+
288
+ return worker_tools, inprocess_tools
@@ -0,0 +1,369 @@
1
+ """Large output result store for OneTool.
2
+
3
+ Stores tool outputs exceeding max_inline_size to disk and provides
4
+ a query API for paginated retrieval.
5
+
6
+ Storage:
7
+ .onetool/tmp/
8
+ ├── result-{guid}.meta.json # Metadata
9
+ └── result-{guid}.txt # Content
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import difflib
15
+ import json
16
+ import re
17
+ import uuid
18
+ from dataclasses import dataclass, field
19
+ from datetime import UTC, datetime
20
+ from pathlib import Path
21
+ from typing import Any
22
+
23
+ from ot.config import get_config
24
+
25
+
26
+ @dataclass
27
+ class ResultMeta:
28
+ """Metadata for a stored result."""
29
+
30
+ handle: str
31
+ total_lines: int
32
+ size_bytes: int
33
+ created_at: str
34
+ tool: str = ""
35
+
36
+ def to_dict(self) -> dict[str, Any]:
37
+ """Convert to dictionary for JSON serialization."""
38
+ return {
39
+ "handle": self.handle,
40
+ "total_lines": self.total_lines,
41
+ "size_bytes": self.size_bytes,
42
+ "created_at": self.created_at,
43
+ "tool": self.tool,
44
+ }
45
+
46
+ @classmethod
47
+ def from_dict(cls, data: dict[str, Any]) -> ResultMeta:
48
+ """Create from dictionary."""
49
+ return cls(
50
+ handle=data["handle"],
51
+ total_lines=data["total_lines"],
52
+ size_bytes=data["size_bytes"],
53
+ created_at=data["created_at"],
54
+ tool=data.get("tool", ""),
55
+ )
56
+
57
+
58
+ @dataclass
59
+ class StoredResult:
60
+ """Result from storing large output."""
61
+
62
+ handle: str
63
+ total_lines: int
64
+ size_bytes: int
65
+ summary: str
66
+ preview: list[str]
67
+ query: str
68
+
69
+ def to_dict(self) -> dict[str, Any]:
70
+ """Convert to summary dictionary for MCP response."""
71
+ return {
72
+ "handle": self.handle,
73
+ "total_lines": self.total_lines,
74
+ "size_bytes": self.size_bytes,
75
+ "summary": self.summary,
76
+ "preview": self.preview,
77
+ "query": self.query,
78
+ }
79
+
80
+
81
+ @dataclass
82
+ class QueryResult:
83
+ """Result from querying stored output."""
84
+
85
+ lines: list[str]
86
+ total_lines: int
87
+ returned: int
88
+ offset: int
89
+ has_more: bool
90
+
91
+ def to_dict(self) -> dict[str, Any]:
92
+ """Convert to dictionary for MCP response."""
93
+ return {
94
+ "lines": self.lines,
95
+ "total_lines": self.total_lines,
96
+ "returned": self.returned,
97
+ "offset": self.offset,
98
+ "has_more": self.has_more,
99
+ }
100
+
101
+
102
+ @dataclass
103
+ class ResultStore:
104
+ """Manages storage and retrieval of large tool outputs."""
105
+
106
+ store_dir: Path = field(default_factory=lambda: _get_default_store_dir())
107
+ _store_count: int = field(default=0, repr=False)
108
+
109
+ # Run cleanup every N store calls (probabilistic cleanup)
110
+ _CLEANUP_INTERVAL: int = 10
111
+
112
+ def __post_init__(self) -> None:
113
+ """Ensure store directory exists."""
114
+ self.store_dir.mkdir(parents=True, exist_ok=True)
115
+
116
+ def store(
117
+ self,
118
+ content: str,
119
+ *,
120
+ tool: str = "",
121
+ preview_lines: int | None = None,
122
+ ) -> StoredResult:
123
+ """Store large output to disk.
124
+
125
+ Args:
126
+ content: The output content to store
127
+ tool: Name of the tool that generated this output
128
+ preview_lines: Number of preview lines (default from config)
129
+
130
+ Returns:
131
+ StoredResult with handle and summary
132
+ """
133
+ # Probabilistic cleanup: run every N store calls instead of every call
134
+ self._store_count += 1
135
+ if self._store_count >= self._CLEANUP_INTERVAL:
136
+ self._store_count = 0
137
+ self.cleanup()
138
+
139
+ # Generate unique handle
140
+ handle = uuid.uuid4().hex[:12]
141
+
142
+ # Split into lines
143
+ lines = content.splitlines()
144
+ total_lines = len(lines)
145
+ size_bytes = len(content.encode("utf-8"))
146
+
147
+ # Write content file
148
+ content_path = self.store_dir / f"result-{handle}.txt"
149
+ content_path.write_text(content, encoding="utf-8")
150
+
151
+ # Create and write meta file
152
+ meta = ResultMeta(
153
+ handle=handle,
154
+ total_lines=total_lines,
155
+ size_bytes=size_bytes,
156
+ created_at=datetime.now(UTC).isoformat(),
157
+ tool=tool,
158
+ )
159
+ meta_path = self.store_dir / f"result-{handle}.meta.json"
160
+ meta_path.write_text(json.dumps(meta.to_dict(), indent=2), encoding="utf-8")
161
+
162
+ # Generate summary
163
+ summary = self._generate_summary(lines, tool)
164
+
165
+ # Get preview lines from config if not specified
166
+ if preview_lines is None:
167
+ config = get_config()
168
+ preview_lines = config.output.preview_lines
169
+
170
+ preview = lines[:preview_lines]
171
+
172
+ return StoredResult(
173
+ handle=handle,
174
+ total_lines=total_lines,
175
+ size_bytes=size_bytes,
176
+ summary=summary,
177
+ preview=preview,
178
+ query=f"ot.result(handle='{handle}', offset=1, limit=50)",
179
+ )
180
+
181
+ def query(
182
+ self,
183
+ handle: str,
184
+ *,
185
+ offset: int = 1,
186
+ limit: int = 100,
187
+ search: str = "",
188
+ fuzzy: bool = False,
189
+ ) -> QueryResult:
190
+ """Query stored result with pagination and optional filtering.
191
+
192
+ Args:
193
+ handle: The result handle from store()
194
+ offset: Starting line number (1-indexed, matching Claude's Read tool)
195
+ limit: Maximum lines to return
196
+ search: Regex pattern to filter lines (optional)
197
+ fuzzy: Use fuzzy matching instead of regex (optional)
198
+
199
+ Returns:
200
+ QueryResult with matching lines
201
+
202
+ Raises:
203
+ ValueError: If handle not found or expired
204
+ """
205
+ # Normalize offset (0 treated as 1)
206
+ if offset < 1:
207
+ offset = 1
208
+
209
+ # Find and load meta file
210
+ meta = self._load_meta(handle)
211
+ if meta is None:
212
+ raise ValueError(f"Result not found: {handle}")
213
+
214
+ # Check TTL
215
+ if self._is_expired(meta):
216
+ # Clean up expired file
217
+ self._delete_result(handle)
218
+ raise ValueError(f"Result expired: {handle}")
219
+
220
+ # Load content
221
+ content_path = self.store_dir / f"result-{handle}.txt"
222
+ if not content_path.exists():
223
+ raise ValueError(f"Result file missing: {handle}")
224
+
225
+ content = content_path.read_text(encoding="utf-8")
226
+ lines = content.splitlines()
227
+
228
+ # Apply search filter if provided
229
+ if search:
230
+ if fuzzy:
231
+ lines = self._fuzzy_filter(lines, search)
232
+ else:
233
+ try:
234
+ pattern = re.compile(search, re.IGNORECASE)
235
+ lines = [line for line in lines if pattern.search(line)]
236
+ except re.error as e:
237
+ raise ValueError(f"Invalid search pattern: {e}") from e
238
+
239
+ total_lines = len(lines)
240
+
241
+ # Apply offset/limit (1-indexed)
242
+ start_idx = offset - 1
243
+ end_idx = start_idx + limit
244
+ result_lines = lines[start_idx:end_idx]
245
+
246
+ return QueryResult(
247
+ lines=result_lines,
248
+ total_lines=total_lines,
249
+ returned=len(result_lines),
250
+ offset=offset,
251
+ has_more=end_idx < total_lines,
252
+ )
253
+
254
+ def cleanup(self) -> int:
255
+ """Remove expired result files.
256
+
257
+ Returns:
258
+ Number of files cleaned up
259
+ """
260
+ # Cache config outside loop to avoid repeated lookups
261
+ config = get_config()
262
+ ttl = config.output.result_ttl
263
+
264
+ cleaned = 0
265
+ for meta_path in self.store_dir.glob("result-*.meta.json"):
266
+ try:
267
+ meta_data = json.loads(meta_path.read_text(encoding="utf-8"))
268
+ meta = ResultMeta.from_dict(meta_data)
269
+
270
+ if self._is_expired(meta, ttl=ttl):
271
+ self._delete_result(meta.handle)
272
+ cleaned += 1
273
+ except (json.JSONDecodeError, KeyError, OSError):
274
+ # Invalid meta file - try to clean up
275
+ handle = meta_path.stem.replace("result-", "").replace(".meta", "")
276
+ content_path = self.store_dir / f"result-{handle}.txt"
277
+ if content_path.exists():
278
+ content_path.unlink()
279
+ meta_path.unlink()
280
+ cleaned += 1
281
+
282
+ return cleaned
283
+
284
+ def _generate_summary(self, lines: list[str], tool: str) -> str:
285
+ """Generate human-readable summary of stored content."""
286
+ total = len(lines)
287
+
288
+ if tool:
289
+ return f"{total} lines from {tool}"
290
+
291
+ return f"{total} lines stored"
292
+
293
+ def _load_meta(self, handle: str) -> ResultMeta | None:
294
+ """Load metadata for a result handle."""
295
+ meta_path = self.store_dir / f"result-{handle}.meta.json"
296
+ if not meta_path.exists():
297
+ return None
298
+
299
+ try:
300
+ data = json.loads(meta_path.read_text(encoding="utf-8"))
301
+ return ResultMeta.from_dict(data)
302
+ except (json.JSONDecodeError, KeyError):
303
+ return None
304
+
305
+ def _is_expired(self, meta: ResultMeta, *, ttl: int | None = None) -> bool:
306
+ """Check if a result has exceeded TTL.
307
+
308
+ Args:
309
+ meta: Result metadata.
310
+ ttl: TTL in seconds, or None to read from config.
311
+ """
312
+ if ttl is None:
313
+ config = get_config()
314
+ ttl = config.output.result_ttl
315
+
316
+ if ttl <= 0:
317
+ return False # No expiry
318
+
319
+ created = datetime.fromisoformat(meta.created_at)
320
+ age = datetime.now(UTC) - created
321
+
322
+ return age.total_seconds() > ttl
323
+
324
+ def _delete_result(self, handle: str) -> None:
325
+ """Delete result files for a handle."""
326
+ content_path = self.store_dir / f"result-{handle}.txt"
327
+ meta_path = self.store_dir / f"result-{handle}.meta.json"
328
+
329
+ if content_path.exists():
330
+ content_path.unlink()
331
+ if meta_path.exists():
332
+ meta_path.unlink()
333
+
334
+ def _fuzzy_filter(self, lines: list[str], query: str) -> list[str]:
335
+ """Filter lines using fuzzy matching, sorted by match score."""
336
+ scored = []
337
+ query_lower = query.lower()
338
+
339
+ # Pre-compute lowered lines to avoid .lower() in hot loop
340
+ lines_lower = [line.lower() for line in lines]
341
+
342
+ for line, line_lower in zip(lines, lines_lower, strict=True):
343
+ # Use SequenceMatcher for fuzzy matching
344
+ ratio = difflib.SequenceMatcher(None, query_lower, line_lower).ratio()
345
+ if ratio > 0.3: # Threshold for fuzzy match
346
+ scored.append((ratio, line))
347
+
348
+ # Sort by score descending
349
+ scored.sort(key=lambda x: x[0], reverse=True)
350
+
351
+ return [line for _, line in scored]
352
+
353
+
354
+ def _get_default_store_dir() -> Path:
355
+ """Get default store directory from config."""
356
+ config = get_config()
357
+ return config.get_result_store_path()
358
+
359
+
360
+ # Global singleton instance
361
+ _store: ResultStore | None = None
362
+
363
+
364
+ def get_result_store() -> ResultStore:
365
+ """Get or create the global result store instance."""
366
+ global _store
367
+ if _store is None:
368
+ _store = ResultStore()
369
+ return _store