onetool-mcp 1.0.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bench/__init__.py +5 -0
- bench/cli.py +69 -0
- bench/harness/__init__.py +66 -0
- bench/harness/client.py +692 -0
- bench/harness/config.py +397 -0
- bench/harness/csv_writer.py +109 -0
- bench/harness/evaluate.py +512 -0
- bench/harness/metrics.py +283 -0
- bench/harness/runner.py +899 -0
- bench/py.typed +0 -0
- bench/reporter.py +629 -0
- bench/run.py +487 -0
- bench/secrets.py +101 -0
- bench/utils.py +16 -0
- onetool/__init__.py +4 -0
- onetool/cli.py +391 -0
- onetool/py.typed +0 -0
- onetool_mcp-1.0.0b1.dist-info/METADATA +163 -0
- onetool_mcp-1.0.0b1.dist-info/RECORD +132 -0
- onetool_mcp-1.0.0b1.dist-info/WHEEL +4 -0
- onetool_mcp-1.0.0b1.dist-info/entry_points.txt +3 -0
- onetool_mcp-1.0.0b1.dist-info/licenses/LICENSE.txt +687 -0
- onetool_mcp-1.0.0b1.dist-info/licenses/NOTICE.txt +64 -0
- ot/__init__.py +37 -0
- ot/__main__.py +6 -0
- ot/_cli.py +107 -0
- ot/_tui.py +53 -0
- ot/config/__init__.py +46 -0
- ot/config/defaults/bench.yaml +4 -0
- ot/config/defaults/diagram-templates/api-flow.mmd +33 -0
- ot/config/defaults/diagram-templates/c4-context.puml +30 -0
- ot/config/defaults/diagram-templates/class-diagram.mmd +87 -0
- ot/config/defaults/diagram-templates/feature-mindmap.mmd +70 -0
- ot/config/defaults/diagram-templates/microservices.d2 +81 -0
- ot/config/defaults/diagram-templates/project-gantt.mmd +37 -0
- ot/config/defaults/diagram-templates/state-machine.mmd +42 -0
- ot/config/defaults/onetool.yaml +25 -0
- ot/config/defaults/prompts.yaml +97 -0
- ot/config/defaults/servers.yaml +7 -0
- ot/config/defaults/snippets.yaml +4 -0
- ot/config/defaults/tool_templates/__init__.py +7 -0
- ot/config/defaults/tool_templates/extension.py +52 -0
- ot/config/defaults/tool_templates/isolated.py +61 -0
- ot/config/dynamic.py +121 -0
- ot/config/global_templates/__init__.py +2 -0
- ot/config/global_templates/bench-secrets-template.yaml +6 -0
- ot/config/global_templates/bench.yaml +9 -0
- ot/config/global_templates/onetool.yaml +27 -0
- ot/config/global_templates/secrets-template.yaml +44 -0
- ot/config/global_templates/servers.yaml +18 -0
- ot/config/global_templates/snippets.yaml +235 -0
- ot/config/loader.py +1087 -0
- ot/config/mcp.py +145 -0
- ot/config/secrets.py +190 -0
- ot/config/tool_config.py +125 -0
- ot/decorators.py +116 -0
- ot/executor/__init__.py +35 -0
- ot/executor/base.py +16 -0
- ot/executor/fence_processor.py +83 -0
- ot/executor/linter.py +142 -0
- ot/executor/pack_proxy.py +260 -0
- ot/executor/param_resolver.py +140 -0
- ot/executor/pep723.py +288 -0
- ot/executor/result_store.py +369 -0
- ot/executor/runner.py +496 -0
- ot/executor/simple.py +163 -0
- ot/executor/tool_loader.py +396 -0
- ot/executor/validator.py +398 -0
- ot/executor/worker_pool.py +388 -0
- ot/executor/worker_proxy.py +189 -0
- ot/http_client.py +145 -0
- ot/logging/__init__.py +37 -0
- ot/logging/config.py +315 -0
- ot/logging/entry.py +213 -0
- ot/logging/format.py +188 -0
- ot/logging/span.py +349 -0
- ot/meta.py +1555 -0
- ot/paths.py +453 -0
- ot/prompts.py +218 -0
- ot/proxy/__init__.py +21 -0
- ot/proxy/manager.py +396 -0
- ot/py.typed +0 -0
- ot/registry/__init__.py +189 -0
- ot/registry/models.py +57 -0
- ot/registry/parser.py +269 -0
- ot/registry/registry.py +413 -0
- ot/server.py +315 -0
- ot/shortcuts/__init__.py +15 -0
- ot/shortcuts/aliases.py +87 -0
- ot/shortcuts/snippets.py +258 -0
- ot/stats/__init__.py +35 -0
- ot/stats/html.py +250 -0
- ot/stats/jsonl_writer.py +283 -0
- ot/stats/reader.py +354 -0
- ot/stats/timing.py +57 -0
- ot/support.py +63 -0
- ot/tools.py +114 -0
- ot/utils/__init__.py +81 -0
- ot/utils/batch.py +161 -0
- ot/utils/cache.py +120 -0
- ot/utils/deps.py +403 -0
- ot/utils/exceptions.py +23 -0
- ot/utils/factory.py +179 -0
- ot/utils/format.py +65 -0
- ot/utils/http.py +202 -0
- ot/utils/platform.py +45 -0
- ot/utils/sanitize.py +130 -0
- ot/utils/truncate.py +69 -0
- ot_tools/__init__.py +4 -0
- ot_tools/_convert/__init__.py +12 -0
- ot_tools/_convert/excel.py +279 -0
- ot_tools/_convert/pdf.py +254 -0
- ot_tools/_convert/powerpoint.py +268 -0
- ot_tools/_convert/utils.py +358 -0
- ot_tools/_convert/word.py +283 -0
- ot_tools/brave_search.py +604 -0
- ot_tools/code_search.py +736 -0
- ot_tools/context7.py +495 -0
- ot_tools/convert.py +614 -0
- ot_tools/db.py +415 -0
- ot_tools/diagram.py +1604 -0
- ot_tools/diagram.yaml +167 -0
- ot_tools/excel.py +1372 -0
- ot_tools/file.py +1348 -0
- ot_tools/firecrawl.py +732 -0
- ot_tools/grounding_search.py +646 -0
- ot_tools/package.py +604 -0
- ot_tools/py.typed +0 -0
- ot_tools/ripgrep.py +544 -0
- ot_tools/scaffold.py +471 -0
- ot_tools/transform.py +213 -0
- ot_tools/web_fetch.py +384 -0
ot/meta.py
ADDED
|
@@ -0,0 +1,1555 @@
|
|
|
1
|
+
"""OneTool core introspection tools (ot pack).
|
|
2
|
+
|
|
3
|
+
Provides tool discovery and messaging under the unified `ot` pack.
|
|
4
|
+
These are core introspection functions, not external tools, so they
|
|
5
|
+
live in the core package rather than tools_dir.
|
|
6
|
+
|
|
7
|
+
Functions:
|
|
8
|
+
ot.tools() - List or get tools with full documentation
|
|
9
|
+
ot.packs() - List or get packs with instructions
|
|
10
|
+
ot.aliases() - List or get alias definitions
|
|
11
|
+
ot.snippets() - List or get snippet definitions
|
|
12
|
+
ot.config() - Show configuration summary
|
|
13
|
+
ot.health() - Check system health
|
|
14
|
+
ot.stats() - Get runtime statistics
|
|
15
|
+
ot.result() - Query stored large output results
|
|
16
|
+
ot.notify() - Publish message to topic
|
|
17
|
+
ot.reload() - Force configuration reload
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from __future__ import annotations
|
|
21
|
+
|
|
22
|
+
import asyncio
|
|
23
|
+
import fnmatch
|
|
24
|
+
import inspect
|
|
25
|
+
import sys
|
|
26
|
+
import time as _time
|
|
27
|
+
from collections.abc import (
|
|
28
|
+
Callable as _Callable, # noqa: TC003 - used at runtime in timed()
|
|
29
|
+
)
|
|
30
|
+
from datetime import UTC, datetime
|
|
31
|
+
from pathlib import Path
|
|
32
|
+
from typing import Any, Literal
|
|
33
|
+
from typing import TypeVar as _TypeVar
|
|
34
|
+
|
|
35
|
+
import aiofiles
|
|
36
|
+
import yaml
|
|
37
|
+
|
|
38
|
+
from ot import __version__
|
|
39
|
+
from ot.config import get_config
|
|
40
|
+
from ot.logging import LogSpan
|
|
41
|
+
from ot.paths import get_global_dir, get_project_dir, resolve_cwd_path
|
|
42
|
+
from ot.proxy import get_proxy_manager
|
|
43
|
+
|
|
44
|
+
_T = _TypeVar("_T")
|
|
45
|
+
|
|
46
|
+
# Alias for cleaner logging calls in this module
|
|
47
|
+
log = LogSpan
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def resolve_ot_path(path: str) -> Path:
|
|
51
|
+
"""Resolve a path relative to the OT_DIR (.onetool/ directory).
|
|
52
|
+
|
|
53
|
+
Resolution priority:
|
|
54
|
+
1. If absolute or ~ path: use as-is
|
|
55
|
+
2. If project .onetool/ exists: resolve relative to it
|
|
56
|
+
3. Fall back to global ~/.onetool/
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
path: Path string (relative, absolute, or with ~)
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Resolved absolute Path
|
|
63
|
+
"""
|
|
64
|
+
p = Path(path).expanduser()
|
|
65
|
+
if p.is_absolute():
|
|
66
|
+
return p.resolve()
|
|
67
|
+
|
|
68
|
+
# Try project .onetool/ first
|
|
69
|
+
project_dir = get_project_dir()
|
|
70
|
+
if project_dir:
|
|
71
|
+
return (project_dir / p).resolve()
|
|
72
|
+
|
|
73
|
+
# Fall back to global
|
|
74
|
+
return (get_global_dir() / p).resolve()
|
|
75
|
+
|
|
76
|
+
# Info level type for discovery functions
|
|
77
|
+
InfoLevel = Literal["list", "min", "full"]
|
|
78
|
+
|
|
79
|
+
# Pack name for dot notation: ot.tools(), ot.packs(), etc.
|
|
80
|
+
PACK_NAME = "ot"
|
|
81
|
+
|
|
82
|
+
# Documentation URL mapping for packs with misaligned slugs
|
|
83
|
+
DOC_SLUGS: dict[str, str] = {
|
|
84
|
+
"brave": "brave-search",
|
|
85
|
+
"code": "code-search",
|
|
86
|
+
"db": "database",
|
|
87
|
+
"ground": "grounding-search",
|
|
88
|
+
"llm": "transform",
|
|
89
|
+
"web": "web-fetch",
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
DOC_BASE_URL = "https://onetool.beycom.online/reference/tools/"
|
|
93
|
+
|
|
94
|
+
__all__ = [
|
|
95
|
+
"PACK_NAME",
|
|
96
|
+
"aliases",
|
|
97
|
+
"config",
|
|
98
|
+
"get_ot_pack_functions",
|
|
99
|
+
"health",
|
|
100
|
+
"help",
|
|
101
|
+
"notify",
|
|
102
|
+
"packs",
|
|
103
|
+
"reload",
|
|
104
|
+
"result",
|
|
105
|
+
"snippets",
|
|
106
|
+
"stats",
|
|
107
|
+
"timed",
|
|
108
|
+
"tools",
|
|
109
|
+
"version",
|
|
110
|
+
]
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def version() -> str:
|
|
114
|
+
"""Return OneTool version string.
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
Version string (e.g., "1.0.0")
|
|
118
|
+
|
|
119
|
+
Example:
|
|
120
|
+
ot.version()
|
|
121
|
+
"""
|
|
122
|
+
return __version__
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def timed(func: _Callable[..., _T], **kwargs: Any) -> dict[str, Any]:
|
|
126
|
+
"""Execute a function and return result with timing info.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
func: The function to call (e.g., brave.search)
|
|
130
|
+
**kwargs: Keyword arguments to pass to the function
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
Dict with 'ms' (elapsed milliseconds) and 'result' keys
|
|
134
|
+
|
|
135
|
+
Example:
|
|
136
|
+
ot.timed(brave.search, query="AI news")
|
|
137
|
+
# Returns: {"ms": 234, "result": {...}}
|
|
138
|
+
"""
|
|
139
|
+
start = _time.perf_counter()
|
|
140
|
+
result = func(**kwargs)
|
|
141
|
+
elapsed = _time.perf_counter() - start
|
|
142
|
+
|
|
143
|
+
return {
|
|
144
|
+
"ms": round(elapsed * 1000),
|
|
145
|
+
"result": result,
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def get_ot_pack_functions() -> dict[str, Any]:
|
|
150
|
+
"""Get all ot pack functions for registration.
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
Dict mapping function names to callables
|
|
154
|
+
"""
|
|
155
|
+
return {
|
|
156
|
+
"tools": tools,
|
|
157
|
+
"packs": packs,
|
|
158
|
+
"aliases": aliases,
|
|
159
|
+
"snippets": snippets,
|
|
160
|
+
"config": config,
|
|
161
|
+
"health": health,
|
|
162
|
+
"help": help,
|
|
163
|
+
"result": result,
|
|
164
|
+
"stats": stats,
|
|
165
|
+
"notify": notify,
|
|
166
|
+
"reload": reload,
|
|
167
|
+
"timed": timed,
|
|
168
|
+
"version": version,
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
# ============================================================================
|
|
173
|
+
# Help Function Utilities
|
|
174
|
+
# ============================================================================
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def _get_doc_url(pack: str) -> str:
|
|
178
|
+
"""Get documentation URL for a pack.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
pack: Pack name (e.g., "brave", "firecrawl")
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
Documentation URL for the pack
|
|
185
|
+
"""
|
|
186
|
+
slug = DOC_SLUGS.get(pack, pack)
|
|
187
|
+
return f"{DOC_BASE_URL}{slug}/"
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def _fuzzy_match(query: str, candidates: list[str], threshold: float = 0.6) -> list[str]:
|
|
191
|
+
"""Return candidates that fuzzy match query, sorted by score.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
query: Search query string
|
|
195
|
+
candidates: List of candidate strings to match against
|
|
196
|
+
threshold: Minimum similarity ratio (0.0 to 1.0) for fuzzy matches
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
List of matching candidates, sorted by match score (best first)
|
|
200
|
+
"""
|
|
201
|
+
from difflib import SequenceMatcher
|
|
202
|
+
|
|
203
|
+
query_lower = query.lower()
|
|
204
|
+
scored: list[tuple[str, float]] = []
|
|
205
|
+
|
|
206
|
+
for candidate in candidates:
|
|
207
|
+
candidate_lower = candidate.lower()
|
|
208
|
+
# Substring match gets high score
|
|
209
|
+
if query_lower in candidate_lower:
|
|
210
|
+
scored.append((candidate, 1.0))
|
|
211
|
+
else:
|
|
212
|
+
ratio = SequenceMatcher(None, query_lower, candidate_lower).ratio()
|
|
213
|
+
if ratio >= threshold:
|
|
214
|
+
scored.append((candidate, ratio))
|
|
215
|
+
|
|
216
|
+
return [c for c, _ in sorted(scored, key=lambda x: -x[1])]
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def _format_general_help() -> str:
|
|
220
|
+
"""Format general help overview shown when no query is provided.
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
Formatted help text with discovery commands, info levels, and examples
|
|
224
|
+
"""
|
|
225
|
+
return """# OneTool Help
|
|
226
|
+
|
|
227
|
+
## Discovery
|
|
228
|
+
ot.tools() - List all tools
|
|
229
|
+
ot.tools(pattern="web") - Filter by pattern
|
|
230
|
+
ot.packs() - List all packs
|
|
231
|
+
ot.snippets() - List all snippets
|
|
232
|
+
ot.aliases() - List all aliases
|
|
233
|
+
ot.help(query="..") - Search for help
|
|
234
|
+
|
|
235
|
+
## Info Levels
|
|
236
|
+
info="list" - Names only
|
|
237
|
+
info="min" - Name + description (default)
|
|
238
|
+
info="full" - Everything
|
|
239
|
+
|
|
240
|
+
## Quick Examples
|
|
241
|
+
brave.search(query="AI news")
|
|
242
|
+
web.fetch(url="https://...")
|
|
243
|
+
$b_q q=search terms
|
|
244
|
+
|
|
245
|
+
## Tips
|
|
246
|
+
- Use keyword args: func(arg=value)
|
|
247
|
+
- Batch when possible: func(items=[...])"""
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def _format_tool_help(tool_info: dict[str, Any], pack: str) -> str:
|
|
251
|
+
"""Format detailed help for a single tool.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
tool_info: Tool info dict from _build_tool_info with info="full"
|
|
255
|
+
pack: Pack name for documentation URL
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
Formatted tool help text
|
|
259
|
+
"""
|
|
260
|
+
lines = [f"# {tool_info['name']}", ""]
|
|
261
|
+
|
|
262
|
+
if tool_info.get("description"):
|
|
263
|
+
lines.append(tool_info["description"])
|
|
264
|
+
lines.append("")
|
|
265
|
+
|
|
266
|
+
if tool_info.get("signature"):
|
|
267
|
+
lines.append("## Signature")
|
|
268
|
+
lines.append(tool_info["signature"])
|
|
269
|
+
lines.append("")
|
|
270
|
+
|
|
271
|
+
if tool_info.get("args"):
|
|
272
|
+
lines.append("## Arguments")
|
|
273
|
+
for arg in tool_info["args"]:
|
|
274
|
+
lines.append(f"- {arg}")
|
|
275
|
+
lines.append("")
|
|
276
|
+
|
|
277
|
+
if tool_info.get("returns"):
|
|
278
|
+
lines.append("## Returns")
|
|
279
|
+
lines.append(tool_info["returns"])
|
|
280
|
+
lines.append("")
|
|
281
|
+
|
|
282
|
+
if tool_info.get("example"):
|
|
283
|
+
lines.append("## Example")
|
|
284
|
+
lines.append(tool_info["example"])
|
|
285
|
+
lines.append("")
|
|
286
|
+
|
|
287
|
+
lines.append("## Docs")
|
|
288
|
+
lines.append(_get_doc_url(pack))
|
|
289
|
+
|
|
290
|
+
return "\n".join(lines)
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
def _format_pack_help(pack_name: str, pack_info: str) -> str:
|
|
294
|
+
"""Format detailed help for a pack.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
pack_name: Name of the pack
|
|
298
|
+
pack_info: Pack info string from packs(info="full")
|
|
299
|
+
|
|
300
|
+
Returns:
|
|
301
|
+
Formatted pack help text with doc URL appended
|
|
302
|
+
"""
|
|
303
|
+
lines = [pack_info, "", "## Docs", _get_doc_url(pack_name)]
|
|
304
|
+
return "\n".join(lines)
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
def _format_snippet_help(snippet_info: str) -> str:
|
|
308
|
+
"""Format detailed help for a snippet.
|
|
309
|
+
|
|
310
|
+
Args:
|
|
311
|
+
snippet_info: Snippet info string from snippets(info="full")
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
Formatted snippet help text
|
|
315
|
+
"""
|
|
316
|
+
return f"# Snippet\n\n{snippet_info}"
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
def _format_alias_help(alias_name: str, target: str) -> str:
|
|
320
|
+
"""Format detailed help for an alias.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
alias_name: Name of the alias
|
|
324
|
+
target: Target function the alias maps to
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
Formatted alias help text
|
|
328
|
+
"""
|
|
329
|
+
lines = [
|
|
330
|
+
f"# Alias: {alias_name}",
|
|
331
|
+
"",
|
|
332
|
+
f"Maps to: `{target}`",
|
|
333
|
+
"",
|
|
334
|
+
"Use this alias as a shorthand for the target function.",
|
|
335
|
+
]
|
|
336
|
+
return "\n".join(lines)
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def _item_matches(item: dict[str, Any] | str, matched_names: list[str], key: str = "name") -> bool:
|
|
340
|
+
"""Check if item name is in matched_names list.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
item: Either a string name or dict with name key
|
|
344
|
+
matched_names: List of matched name strings
|
|
345
|
+
key: Dict key to extract name from (default: "name")
|
|
346
|
+
|
|
347
|
+
Returns:
|
|
348
|
+
True if item's name is in matched_names
|
|
349
|
+
"""
|
|
350
|
+
if isinstance(item, str):
|
|
351
|
+
# For "name: desc" or "name -> target" formats, extract the name part
|
|
352
|
+
if ": " in item:
|
|
353
|
+
name = item.split(":")[0]
|
|
354
|
+
elif " ->" in item:
|
|
355
|
+
name = item.split(" ->")[0]
|
|
356
|
+
else:
|
|
357
|
+
name = item
|
|
358
|
+
return name in matched_names
|
|
359
|
+
return item.get(key) in matched_names
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def _snippet_matches(item: dict[str, Any] | str, matched_names: list[str]) -> bool:
|
|
363
|
+
"""Check if snippet item matches any of the matched names.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
item: Either a string or dict snippet item
|
|
367
|
+
matched_names: List of matched snippet name strings
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
True if snippet matches
|
|
371
|
+
"""
|
|
372
|
+
if not matched_names:
|
|
373
|
+
return False
|
|
374
|
+
if isinstance(item, str):
|
|
375
|
+
name = item.split(":")[0]
|
|
376
|
+
if name in matched_names:
|
|
377
|
+
return True
|
|
378
|
+
return any(item == m or item.startswith(m + ":") for m in matched_names)
|
|
379
|
+
return item.get("name") in matched_names
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
def _format_search_results(
|
|
383
|
+
query: str,
|
|
384
|
+
tools_results: list[dict[str, Any] | str],
|
|
385
|
+
packs_results: list[dict[str, Any] | str],
|
|
386
|
+
snippets_results: list[dict[str, Any] | str],
|
|
387
|
+
aliases_results: list[dict[str, Any] | str],
|
|
388
|
+
info: InfoLevel,
|
|
389
|
+
) -> str:
|
|
390
|
+
"""Format search results grouped by type.
|
|
391
|
+
|
|
392
|
+
Args:
|
|
393
|
+
query: Original search query
|
|
394
|
+
tools_results: Matching tools
|
|
395
|
+
packs_results: Matching packs
|
|
396
|
+
snippets_results: Matching snippets
|
|
397
|
+
aliases_results: Matching aliases
|
|
398
|
+
info: Output verbosity level
|
|
399
|
+
|
|
400
|
+
Returns:
|
|
401
|
+
Formatted search results text
|
|
402
|
+
"""
|
|
403
|
+
lines = [f'# Search results for "{query}"', ""]
|
|
404
|
+
|
|
405
|
+
if tools_results:
|
|
406
|
+
lines.append("## Tools")
|
|
407
|
+
for tool in tools_results:
|
|
408
|
+
if isinstance(tool, str):
|
|
409
|
+
lines.append(f"- {tool}")
|
|
410
|
+
elif info == "min":
|
|
411
|
+
lines.append(f"- {tool['name']}: {tool.get('description', '')}")
|
|
412
|
+
else:
|
|
413
|
+
lines.append(f"- {tool['name']}")
|
|
414
|
+
lines.append("")
|
|
415
|
+
|
|
416
|
+
if packs_results:
|
|
417
|
+
lines.append("## Packs")
|
|
418
|
+
for pack in packs_results:
|
|
419
|
+
if isinstance(pack, str):
|
|
420
|
+
lines.append(f"- {pack}")
|
|
421
|
+
elif info == "min":
|
|
422
|
+
lines.append(f"- {pack['name']} ({pack.get('tool_count', 0)} tools)")
|
|
423
|
+
else:
|
|
424
|
+
lines.append(f"- {pack['name']}")
|
|
425
|
+
lines.append("")
|
|
426
|
+
|
|
427
|
+
if snippets_results:
|
|
428
|
+
lines.append("## Snippets")
|
|
429
|
+
for snippet in snippets_results:
|
|
430
|
+
if isinstance(snippet, str):
|
|
431
|
+
# For info="min", format is "name: description"
|
|
432
|
+
lines.append(f"- ${snippet.split(':')[0]}" if ":" not in snippet else f"- ${snippet}")
|
|
433
|
+
else:
|
|
434
|
+
lines.append(f"- ${snippet}")
|
|
435
|
+
lines.append("")
|
|
436
|
+
|
|
437
|
+
if aliases_results:
|
|
438
|
+
lines.append("## Aliases")
|
|
439
|
+
for alias in aliases_results:
|
|
440
|
+
if isinstance(alias, str):
|
|
441
|
+
lines.append(f"- {alias}")
|
|
442
|
+
else:
|
|
443
|
+
lines.append(f"- {alias['name']} -> {alias['target']}")
|
|
444
|
+
lines.append("")
|
|
445
|
+
|
|
446
|
+
if not any([tools_results, packs_results, snippets_results, aliases_results]):
|
|
447
|
+
lines.append("No matches found.")
|
|
448
|
+
lines.append("")
|
|
449
|
+
lines.append("Try browsing with:")
|
|
450
|
+
lines.append(" ot.tools() - List all tools")
|
|
451
|
+
lines.append(" ot.packs() - List all packs")
|
|
452
|
+
lines.append(" ot.snippets() - List all snippets")
|
|
453
|
+
lines.append(" ot.aliases() - List all aliases")
|
|
454
|
+
|
|
455
|
+
return "\n".join(lines).rstrip()
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
# ============================================================================
|
|
459
|
+
# Tool Discovery Functions
|
|
460
|
+
# ============================================================================
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
def _parse_docstring(doc: str | None) -> dict[str, Any]:
|
|
464
|
+
"""Parse docstring using docstring-parser library.
|
|
465
|
+
|
|
466
|
+
Args:
|
|
467
|
+
doc: Function docstring
|
|
468
|
+
|
|
469
|
+
Returns:
|
|
470
|
+
Dict with 'short', 'args', 'returns', and 'example' keys
|
|
471
|
+
"""
|
|
472
|
+
from docstring_parser import parse as parse_docstring
|
|
473
|
+
|
|
474
|
+
if not doc:
|
|
475
|
+
return {"short": "", "args": [], "returns": "", "example": ""}
|
|
476
|
+
|
|
477
|
+
parsed = parse_docstring(doc)
|
|
478
|
+
|
|
479
|
+
# Extract example from examples section
|
|
480
|
+
example = ""
|
|
481
|
+
if parsed.examples:
|
|
482
|
+
example = "\n".join(
|
|
483
|
+
ex.description or "" for ex in parsed.examples if ex.description
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
# Format args as "name: description" strings
|
|
487
|
+
args = [
|
|
488
|
+
f"{p.arg_name}: {p.description or '(no description)'}" for p in parsed.params
|
|
489
|
+
]
|
|
490
|
+
|
|
491
|
+
return {
|
|
492
|
+
"short": parsed.short_description or "",
|
|
493
|
+
"args": args,
|
|
494
|
+
"returns": parsed.returns.description if parsed.returns else "",
|
|
495
|
+
"example": example,
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
|
|
499
|
+
def _build_tool_info(
|
|
500
|
+
full_name: str, func: Any, source: str, info: InfoLevel
|
|
501
|
+
) -> dict[str, Any] | str:
|
|
502
|
+
"""Build tool info dict for a single tool.
|
|
503
|
+
|
|
504
|
+
Args:
|
|
505
|
+
full_name: Full tool name (e.g., "brave.search")
|
|
506
|
+
func: The function object
|
|
507
|
+
source: Source identifier (e.g., "local", "proxy:github")
|
|
508
|
+
info: Output verbosity level ("list", "min", "full")
|
|
509
|
+
|
|
510
|
+
Returns:
|
|
511
|
+
Tool name string if info="list", otherwise dict with tool info
|
|
512
|
+
"""
|
|
513
|
+
if info == "list":
|
|
514
|
+
return full_name
|
|
515
|
+
|
|
516
|
+
if func:
|
|
517
|
+
try:
|
|
518
|
+
sig = inspect.signature(func)
|
|
519
|
+
signature = f"{full_name}{sig}"
|
|
520
|
+
except (ValueError, TypeError):
|
|
521
|
+
signature = f"{full_name}(...)"
|
|
522
|
+
parsed = _parse_docstring(func.__doc__)
|
|
523
|
+
description = parsed["short"]
|
|
524
|
+
else:
|
|
525
|
+
signature = f"{full_name}(...)"
|
|
526
|
+
description = ""
|
|
527
|
+
parsed = _parse_docstring(None)
|
|
528
|
+
|
|
529
|
+
if info == "min":
|
|
530
|
+
return {"name": full_name, "description": description}
|
|
531
|
+
|
|
532
|
+
# info == "full"
|
|
533
|
+
tool_info: dict[str, Any] = {
|
|
534
|
+
"name": full_name,
|
|
535
|
+
"signature": signature,
|
|
536
|
+
"description": description,
|
|
537
|
+
}
|
|
538
|
+
# Include full documentation for LLM context
|
|
539
|
+
if parsed["args"]:
|
|
540
|
+
tool_info["args"] = parsed["args"]
|
|
541
|
+
if parsed["returns"]:
|
|
542
|
+
tool_info["returns"] = parsed["returns"]
|
|
543
|
+
if parsed["example"]:
|
|
544
|
+
tool_info["example"] = parsed["example"]
|
|
545
|
+
tool_info["source"] = source
|
|
546
|
+
return tool_info
|
|
547
|
+
|
|
548
|
+
|
|
549
|
+
def _schema_to_signature(full_name: str, schema: dict[str, Any]) -> str:
|
|
550
|
+
"""Convert JSON Schema to Python-like signature string.
|
|
551
|
+
|
|
552
|
+
Args:
|
|
553
|
+
full_name: Full tool name (e.g., "github.search")
|
|
554
|
+
schema: JSON Schema dict with 'properties' and 'required' keys
|
|
555
|
+
|
|
556
|
+
Returns:
|
|
557
|
+
Signature string like "github.search(query: str, repo: str = '...')"
|
|
558
|
+
"""
|
|
559
|
+
props = schema.get("properties", {})
|
|
560
|
+
required = set(schema.get("required", []))
|
|
561
|
+
|
|
562
|
+
if not props:
|
|
563
|
+
return f"{full_name}()"
|
|
564
|
+
|
|
565
|
+
params: list[str] = []
|
|
566
|
+
# Process required params first, then optional
|
|
567
|
+
for prop_name in sorted(props.keys(), key=lambda k: (k not in required, k)):
|
|
568
|
+
prop_def = props[prop_name]
|
|
569
|
+
prop_type = prop_def.get("type", "Any")
|
|
570
|
+
|
|
571
|
+
# Map JSON Schema types to Python-like types
|
|
572
|
+
type_map = {
|
|
573
|
+
"string": "str",
|
|
574
|
+
"integer": "int",
|
|
575
|
+
"number": "float",
|
|
576
|
+
"boolean": "bool",
|
|
577
|
+
"array": "list",
|
|
578
|
+
"object": "dict",
|
|
579
|
+
}
|
|
580
|
+
py_type = type_map.get(prop_type, prop_type)
|
|
581
|
+
|
|
582
|
+
if prop_name in required:
|
|
583
|
+
params.append(f"{prop_name}: {py_type}")
|
|
584
|
+
else:
|
|
585
|
+
default = prop_def.get("default")
|
|
586
|
+
if default is not None:
|
|
587
|
+
params.append(f"{prop_name}: {py_type} = {default!r}")
|
|
588
|
+
else:
|
|
589
|
+
params.append(f"{prop_name}: {py_type} = ...")
|
|
590
|
+
|
|
591
|
+
return f"{full_name}({', '.join(params)})"
|
|
592
|
+
|
|
593
|
+
|
|
594
|
+
def _parse_input_schema(schema: dict[str, Any]) -> list[str]:
|
|
595
|
+
"""Extract argument descriptions from JSON Schema properties.
|
|
596
|
+
|
|
597
|
+
Args:
|
|
598
|
+
schema: JSON Schema dict with 'properties' key
|
|
599
|
+
|
|
600
|
+
Returns:
|
|
601
|
+
List of "param_name: description" strings matching local tool format
|
|
602
|
+
"""
|
|
603
|
+
props = schema.get("properties", {})
|
|
604
|
+
required = set(schema.get("required", []))
|
|
605
|
+
|
|
606
|
+
args: list[str] = []
|
|
607
|
+
# Process required params first, then optional
|
|
608
|
+
for prop_name in sorted(props.keys(), key=lambda k: (k not in required, k)):
|
|
609
|
+
prop_def = props[prop_name]
|
|
610
|
+
description = prop_def.get("description", "(no description)")
|
|
611
|
+
args.append(f"{prop_name}: {description}")
|
|
612
|
+
|
|
613
|
+
return args
|
|
614
|
+
|
|
615
|
+
|
|
616
|
+
def _build_proxy_tool_info(
|
|
617
|
+
full_name: str,
|
|
618
|
+
description: str,
|
|
619
|
+
input_schema: dict[str, Any],
|
|
620
|
+
source: str,
|
|
621
|
+
info: InfoLevel,
|
|
622
|
+
) -> dict[str, Any] | str:
|
|
623
|
+
"""Build tool info dict for a proxy tool using its input schema.
|
|
624
|
+
|
|
625
|
+
Args:
|
|
626
|
+
full_name: Full tool name (e.g., "github.search")
|
|
627
|
+
description: Tool description from MCP server
|
|
628
|
+
input_schema: JSON Schema for tool input
|
|
629
|
+
source: Source identifier (e.g., "proxy:github")
|
|
630
|
+
info: Output verbosity level ("list", "min", "full")
|
|
631
|
+
|
|
632
|
+
Returns:
|
|
633
|
+
Tool name string if info="list", otherwise dict with tool info
|
|
634
|
+
"""
|
|
635
|
+
if info == "list":
|
|
636
|
+
return full_name
|
|
637
|
+
|
|
638
|
+
if info == "min":
|
|
639
|
+
return {"name": full_name, "description": description}
|
|
640
|
+
|
|
641
|
+
# info == "full"
|
|
642
|
+
tool_info: dict[str, Any] = {
|
|
643
|
+
"name": full_name,
|
|
644
|
+
"signature": _schema_to_signature(full_name, input_schema),
|
|
645
|
+
"description": description,
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
# Include args if schema has properties with descriptions
|
|
649
|
+
args = _parse_input_schema(input_schema)
|
|
650
|
+
if args:
|
|
651
|
+
tool_info["args"] = args
|
|
652
|
+
|
|
653
|
+
tool_info["source"] = source
|
|
654
|
+
return tool_info
|
|
655
|
+
|
|
656
|
+
|
|
657
|
+
def tools(
|
|
658
|
+
*,
|
|
659
|
+
pattern: str = "",
|
|
660
|
+
info: InfoLevel = "min",
|
|
661
|
+
) -> list[dict[str, Any] | str]:
|
|
662
|
+
"""List all available tools with optional filtering.
|
|
663
|
+
|
|
664
|
+
Lists registered local tools and proxied MCP server tools.
|
|
665
|
+
Use pattern for substring filtering.
|
|
666
|
+
|
|
667
|
+
Args:
|
|
668
|
+
pattern: Filter tools by name pattern (case-insensitive substring match)
|
|
669
|
+
info: Output verbosity level - "list" (names only), "min" (name + description),
|
|
670
|
+
or "full" (complete details including args, returns, example)
|
|
671
|
+
|
|
672
|
+
Returns:
|
|
673
|
+
List of tool names (info="list") or tool dicts (info="min"/"full")
|
|
674
|
+
|
|
675
|
+
Example:
|
|
676
|
+
ot.tools()
|
|
677
|
+
ot.tools(pattern="search")
|
|
678
|
+
ot.tools(pattern="brave.")
|
|
679
|
+
ot.tools(info="list")
|
|
680
|
+
ot.tools(pattern="brave.search", info="full")
|
|
681
|
+
"""
|
|
682
|
+
from ot.executor.tool_loader import load_tool_registry
|
|
683
|
+
|
|
684
|
+
with log(span="ot.tools", pattern=pattern or None, info=info) as s:
|
|
685
|
+
runner_registry = load_tool_registry()
|
|
686
|
+
proxy = get_proxy_manager()
|
|
687
|
+
|
|
688
|
+
tools_list: list[dict[str, Any] | str] = []
|
|
689
|
+
|
|
690
|
+
# Local tools from registry
|
|
691
|
+
from ot.executor.worker_proxy import WorkerPackProxy
|
|
692
|
+
|
|
693
|
+
for pack_name, pack_funcs in runner_registry.packs.items():
|
|
694
|
+
# Handle both dict and WorkerPackProxy
|
|
695
|
+
if isinstance(pack_funcs, WorkerPackProxy):
|
|
696
|
+
func_names = list(pack_funcs.functions)
|
|
697
|
+
func_items = [(n, getattr(pack_funcs, n)) for n in func_names]
|
|
698
|
+
else:
|
|
699
|
+
func_items = list(pack_funcs.items())
|
|
700
|
+
|
|
701
|
+
for func_name, func in func_items:
|
|
702
|
+
full_name = f"{pack_name}.{func_name}"
|
|
703
|
+
|
|
704
|
+
if pattern and pattern.lower() not in full_name.lower():
|
|
705
|
+
continue
|
|
706
|
+
|
|
707
|
+
tools_list.append(_build_tool_info(full_name, func, "local", info))
|
|
708
|
+
|
|
709
|
+
# Proxied tools
|
|
710
|
+
for proxy_tool in proxy.list_tools():
|
|
711
|
+
tool_name = f"{proxy_tool.server}.{proxy_tool.name}"
|
|
712
|
+
|
|
713
|
+
if pattern and pattern.lower() not in tool_name.lower():
|
|
714
|
+
continue
|
|
715
|
+
|
|
716
|
+
tools_list.append(
|
|
717
|
+
_build_proxy_tool_info(
|
|
718
|
+
tool_name,
|
|
719
|
+
proxy_tool.description or "",
|
|
720
|
+
proxy_tool.input_schema,
|
|
721
|
+
f"proxy:{proxy_tool.server}",
|
|
722
|
+
info,
|
|
723
|
+
)
|
|
724
|
+
)
|
|
725
|
+
|
|
726
|
+
# Sort by name (handle both str and dict)
|
|
727
|
+
tools_list.sort(key=lambda t: t if isinstance(t, str) else t["name"])
|
|
728
|
+
s.add("count", len(tools_list))
|
|
729
|
+
return tools_list
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
def packs(
|
|
733
|
+
*,
|
|
734
|
+
pattern: str = "",
|
|
735
|
+
info: InfoLevel = "min",
|
|
736
|
+
) -> list[dict[str, Any] | str]:
|
|
737
|
+
"""List all packs with optional filtering.
|
|
738
|
+
|
|
739
|
+
Lists all available packs (local and proxy).
|
|
740
|
+
Use pattern for substring filtering.
|
|
741
|
+
|
|
742
|
+
Args:
|
|
743
|
+
pattern: Filter packs by name pattern (case-insensitive substring)
|
|
744
|
+
info: Output verbosity level - "list" (names only), "min" (name + source + tool_count),
|
|
745
|
+
or "full" (detailed info with instructions and tool list)
|
|
746
|
+
|
|
747
|
+
Returns:
|
|
748
|
+
List of pack names (info="list") or pack dicts/strings (info="min"/"full")
|
|
749
|
+
|
|
750
|
+
Example:
|
|
751
|
+
ot.packs()
|
|
752
|
+
ot.packs(pattern="brav")
|
|
753
|
+
ot.packs(info="list")
|
|
754
|
+
ot.packs(pattern="brave", info="full")
|
|
755
|
+
"""
|
|
756
|
+
from ot.executor.tool_loader import load_tool_registry
|
|
757
|
+
from ot.prompts import PromptsError, get_pack_instructions, get_prompts
|
|
758
|
+
|
|
759
|
+
with log(span="ot.packs", pattern=pattern or None, info=info) as s:
|
|
760
|
+
runner_registry = load_tool_registry()
|
|
761
|
+
proxy = get_proxy_manager()
|
|
762
|
+
|
|
763
|
+
# Collect all packs
|
|
764
|
+
local_packs = set(runner_registry.packs.keys())
|
|
765
|
+
proxy_packs = set(proxy.servers)
|
|
766
|
+
all_pack_names = sorted(local_packs | proxy_packs)
|
|
767
|
+
|
|
768
|
+
# Filter by pattern
|
|
769
|
+
if pattern:
|
|
770
|
+
all_pack_names = [p for p in all_pack_names if pattern.lower() in p.lower()]
|
|
771
|
+
|
|
772
|
+
# info="list" - just names
|
|
773
|
+
if info == "list":
|
|
774
|
+
s.add("count", len(all_pack_names))
|
|
775
|
+
return all_pack_names # type: ignore[return-value]
|
|
776
|
+
|
|
777
|
+
# info="full" - detailed info for each matching pack
|
|
778
|
+
if info == "full":
|
|
779
|
+
results: list[dict[str, Any] | str] = []
|
|
780
|
+
for pack_name in all_pack_names:
|
|
781
|
+
is_local = pack_name in local_packs
|
|
782
|
+
|
|
783
|
+
# Build detailed pack info
|
|
784
|
+
lines = [f"# {pack_name} pack", ""]
|
|
785
|
+
|
|
786
|
+
# Get instructions
|
|
787
|
+
try:
|
|
788
|
+
prompts_config = get_prompts()
|
|
789
|
+
configured = get_pack_instructions(prompts_config, pack_name)
|
|
790
|
+
if configured:
|
|
791
|
+
lines.append(configured)
|
|
792
|
+
lines.append("")
|
|
793
|
+
except PromptsError:
|
|
794
|
+
pass
|
|
795
|
+
|
|
796
|
+
# List tools in this pack
|
|
797
|
+
lines.append("## Tools")
|
|
798
|
+
lines.append("")
|
|
799
|
+
|
|
800
|
+
if is_local:
|
|
801
|
+
from ot.executor.worker_proxy import WorkerPackProxy
|
|
802
|
+
|
|
803
|
+
pack_funcs = runner_registry.packs[pack_name]
|
|
804
|
+
if isinstance(pack_funcs, WorkerPackProxy):
|
|
805
|
+
func_items = [(n, getattr(pack_funcs, n)) for n in pack_funcs.functions]
|
|
806
|
+
else:
|
|
807
|
+
func_items = list(pack_funcs.items())
|
|
808
|
+
|
|
809
|
+
for func_name, func in sorted(func_items):
|
|
810
|
+
doc = func.__doc__ or "(no description)"
|
|
811
|
+
first_line = doc.split("\n")[0].strip()
|
|
812
|
+
lines.append(f"- **{pack_name}.{func_name}**: {first_line}")
|
|
813
|
+
else:
|
|
814
|
+
proxy_tools = proxy.list_tools(server=pack_name)
|
|
815
|
+
for tool in sorted(proxy_tools, key=lambda t: t.name):
|
|
816
|
+
desc = tool.description or "(no description)"
|
|
817
|
+
first_line = desc.split("\n")[0].strip()
|
|
818
|
+
lines.append(f"- **{pack_name}.{tool.name}**: {first_line}")
|
|
819
|
+
|
|
820
|
+
results.append("\n".join(lines))
|
|
821
|
+
|
|
822
|
+
s.add("count", len(results))
|
|
823
|
+
return results
|
|
824
|
+
|
|
825
|
+
# info="min" (default) - summary for each pack
|
|
826
|
+
packs_list: list[dict[str, Any] | str] = []
|
|
827
|
+
|
|
828
|
+
for pack_name in all_pack_names:
|
|
829
|
+
is_local = pack_name in local_packs
|
|
830
|
+
source = "local" if is_local else "proxy"
|
|
831
|
+
|
|
832
|
+
# Count tools in pack
|
|
833
|
+
if is_local:
|
|
834
|
+
from ot.executor.worker_proxy import WorkerPackProxy
|
|
835
|
+
|
|
836
|
+
pack_funcs = runner_registry.packs[pack_name]
|
|
837
|
+
if isinstance(pack_funcs, WorkerPackProxy):
|
|
838
|
+
tool_count = len(pack_funcs.functions)
|
|
839
|
+
else:
|
|
840
|
+
tool_count = len(pack_funcs)
|
|
841
|
+
else:
|
|
842
|
+
tool_count = len(proxy.list_tools(server=pack_name))
|
|
843
|
+
|
|
844
|
+
packs_list.append({
|
|
845
|
+
"name": pack_name,
|
|
846
|
+
"source": source,
|
|
847
|
+
"tool_count": tool_count,
|
|
848
|
+
})
|
|
849
|
+
|
|
850
|
+
s.add("count", len(packs_list))
|
|
851
|
+
return packs_list
|
|
852
|
+
|
|
853
|
+
|
|
854
|
+
# ============================================================================
|
|
855
|
+
# Messaging Functions
|
|
856
|
+
# ============================================================================
|
|
857
|
+
|
|
858
|
+
_background_tasks: set[asyncio.Task[None]] = set()
|
|
859
|
+
|
|
860
|
+
|
|
861
|
+
def _resolve_path(path: str) -> Path:
|
|
862
|
+
"""Resolve a topic file path relative to OT_DIR (.onetool/).
|
|
863
|
+
|
|
864
|
+
Uses SDK resolve_ot_path() for consistent path resolution.
|
|
865
|
+
|
|
866
|
+
Path resolution for topic files follows OT_DIR conventions:
|
|
867
|
+
- Relative paths: resolved relative to OT_DIR (.onetool/)
|
|
868
|
+
- Absolute paths: used as-is
|
|
869
|
+
- ~ paths: expanded to home directory
|
|
870
|
+
- Prefixed paths (CWD/, GLOBAL/, OT_DIR/): resolved to respective dirs
|
|
871
|
+
|
|
872
|
+
Note: ${VAR} patterns are NOT expanded here. Use ~/path instead of
|
|
873
|
+
${HOME}/path. Secrets (e.g., ${API_KEY}) are expanded during config
|
|
874
|
+
loading, not path resolution.
|
|
875
|
+
|
|
876
|
+
Args:
|
|
877
|
+
path: Path string from topic config.
|
|
878
|
+
|
|
879
|
+
Returns:
|
|
880
|
+
Resolved absolute Path.
|
|
881
|
+
"""
|
|
882
|
+
return resolve_ot_path(path)
|
|
883
|
+
|
|
884
|
+
|
|
885
|
+
def _match_topic_to_file(topic: str) -> Path | None:
|
|
886
|
+
"""Match topic to file path using first matching pattern.
|
|
887
|
+
|
|
888
|
+
Paths in topic config are resolved relative to OT_DIR (.onetool/).
|
|
889
|
+
See _resolve_path() for full path resolution behaviour.
|
|
890
|
+
|
|
891
|
+
Args:
|
|
892
|
+
topic: Topic string to match (e.g., "status:scan").
|
|
893
|
+
|
|
894
|
+
Returns:
|
|
895
|
+
Resolved file path for matching topic, or None if no match.
|
|
896
|
+
"""
|
|
897
|
+
cfg = get_config()
|
|
898
|
+
msg_config = cfg.tools.msg
|
|
899
|
+
|
|
900
|
+
for topic_config in msg_config.topics:
|
|
901
|
+
topic_pattern = topic_config.pattern
|
|
902
|
+
file_path = topic_config.file
|
|
903
|
+
|
|
904
|
+
if fnmatch.fnmatch(topic, topic_pattern):
|
|
905
|
+
return _resolve_path(file_path)
|
|
906
|
+
|
|
907
|
+
return None
|
|
908
|
+
|
|
909
|
+
|
|
910
|
+
async def _write_to_file(file_path: Path, doc: dict[str, Any]) -> None:
|
|
911
|
+
"""Write message document to file asynchronously."""
|
|
912
|
+
with log(span="ot.write", file=str(file_path), topic=doc.get("topic")) as s:
|
|
913
|
+
try:
|
|
914
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
915
|
+
async with aiofiles.open(file_path, "a") as f:
|
|
916
|
+
await f.write("---\n")
|
|
917
|
+
await f.write(
|
|
918
|
+
yaml.safe_dump(doc, default_flow_style=False, allow_unicode=True)
|
|
919
|
+
)
|
|
920
|
+
s.add("written", True)
|
|
921
|
+
except Exception as e:
|
|
922
|
+
s.add("error", str(e))
|
|
923
|
+
|
|
924
|
+
|
|
925
|
+
def notify(*, topic: str, message: str) -> str:
|
|
926
|
+
"""Publish a message to the matching topic file.
|
|
927
|
+
|
|
928
|
+
Routes the message to a YAML file based on topic pattern matching
|
|
929
|
+
configured in onetool.yaml. The write happens asynchronously.
|
|
930
|
+
|
|
931
|
+
Args:
|
|
932
|
+
topic: Topic string for routing (e.g., "status:scan", "notes")
|
|
933
|
+
message: Message content (text, can be multiline)
|
|
934
|
+
|
|
935
|
+
Returns:
|
|
936
|
+
"OK: <topic> -> <file>" if routed, "OK: no matching topic" if no match
|
|
937
|
+
|
|
938
|
+
Example:
|
|
939
|
+
ot.notify(topic="notes", message="Remember to review PR #123")
|
|
940
|
+
"""
|
|
941
|
+
with log(span="ot.notify", topic=topic) as s:
|
|
942
|
+
file_path = _match_topic_to_file(topic)
|
|
943
|
+
|
|
944
|
+
if file_path is None:
|
|
945
|
+
s.add("matched", False)
|
|
946
|
+
return "SKIP: no matching topic"
|
|
947
|
+
|
|
948
|
+
doc = {
|
|
949
|
+
"ts": datetime.now(UTC).isoformat().replace("+00:00", "Z"),
|
|
950
|
+
"topic": topic,
|
|
951
|
+
"message": message,
|
|
952
|
+
}
|
|
953
|
+
|
|
954
|
+
try:
|
|
955
|
+
loop = asyncio.get_running_loop()
|
|
956
|
+
task = loop.create_task(_write_to_file(file_path, doc))
|
|
957
|
+
_background_tasks.add(task)
|
|
958
|
+
task.add_done_callback(_background_tasks.discard)
|
|
959
|
+
except RuntimeError:
|
|
960
|
+
asyncio.run(_write_to_file(file_path, doc))
|
|
961
|
+
|
|
962
|
+
s.add("matched", True)
|
|
963
|
+
s.add("file", str(file_path))
|
|
964
|
+
return f"OK: {topic} -> {file_path}"
|
|
965
|
+
|
|
966
|
+
|
|
967
|
+
# ============================================================================
|
|
968
|
+
# Configuration & Health Functions
|
|
969
|
+
# ============================================================================
|
|
970
|
+
|
|
971
|
+
|
|
972
|
+
def config() -> dict[str, Any]:
|
|
973
|
+
"""Show key configuration values.
|
|
974
|
+
|
|
975
|
+
Returns tools_dir, include, aliases, snippets, and server names.
|
|
976
|
+
|
|
977
|
+
Returns:
|
|
978
|
+
Dict with configuration summary
|
|
979
|
+
|
|
980
|
+
Example:
|
|
981
|
+
ot.config()
|
|
982
|
+
"""
|
|
983
|
+
with log(span="ot.config") as s:
|
|
984
|
+
cfg = get_config()
|
|
985
|
+
|
|
986
|
+
result: dict[str, Any] = {
|
|
987
|
+
"tools_dir": cfg.tools_dir,
|
|
988
|
+
"include": cfg.include,
|
|
989
|
+
"aliases": dict(cfg.alias) if cfg.alias else {},
|
|
990
|
+
"snippets": {
|
|
991
|
+
name: {"description": snippet.description}
|
|
992
|
+
for name, snippet in cfg.snippets.items()
|
|
993
|
+
}
|
|
994
|
+
if cfg.snippets
|
|
995
|
+
else {},
|
|
996
|
+
"servers": list(cfg.servers.keys()) if cfg.servers else [],
|
|
997
|
+
}
|
|
998
|
+
|
|
999
|
+
s.add("toolsDirCount", len(result["tools_dir"]))
|
|
1000
|
+
s.add("includeCount", len(result["include"]))
|
|
1001
|
+
s.add("aliasCount", len(result["aliases"]))
|
|
1002
|
+
s.add("snippetCount", len(result["snippets"]))
|
|
1003
|
+
s.add("serverCount", len(result["servers"]))
|
|
1004
|
+
|
|
1005
|
+
return result
|
|
1006
|
+
|
|
1007
|
+
|
|
1008
|
+
def health() -> dict[str, Any]:
|
|
1009
|
+
"""Check health of OneTool components.
|
|
1010
|
+
|
|
1011
|
+
Returns:
|
|
1012
|
+
Dict with component status for registry and proxy
|
|
1013
|
+
|
|
1014
|
+
Example:
|
|
1015
|
+
ot.health()
|
|
1016
|
+
"""
|
|
1017
|
+
from ot.executor.tool_loader import load_tool_registry
|
|
1018
|
+
|
|
1019
|
+
with log(span="ot.health") as s:
|
|
1020
|
+
from ot.executor.worker_proxy import WorkerPackProxy
|
|
1021
|
+
|
|
1022
|
+
runner_registry = load_tool_registry()
|
|
1023
|
+
proxy = get_proxy_manager()
|
|
1024
|
+
cfg = get_config()
|
|
1025
|
+
|
|
1026
|
+
# Count functions, handling both dict and WorkerPackProxy
|
|
1027
|
+
tool_count = 0
|
|
1028
|
+
for funcs in runner_registry.packs.values():
|
|
1029
|
+
if isinstance(funcs, WorkerPackProxy):
|
|
1030
|
+
tool_count += len(funcs.functions)
|
|
1031
|
+
else:
|
|
1032
|
+
tool_count += len(funcs)
|
|
1033
|
+
registry_status = {
|
|
1034
|
+
"status": "ok",
|
|
1035
|
+
"tool_count": tool_count,
|
|
1036
|
+
}
|
|
1037
|
+
|
|
1038
|
+
server_statuses: dict[str, str] = {}
|
|
1039
|
+
for server_name in cfg.servers:
|
|
1040
|
+
conn = proxy.get_connection(server_name)
|
|
1041
|
+
server_statuses[server_name] = "connected" if conn else "disconnected"
|
|
1042
|
+
|
|
1043
|
+
proxy_status: dict[str, Any] = {
|
|
1044
|
+
"status": "ok"
|
|
1045
|
+
if all(status == "connected" for status in server_statuses.values())
|
|
1046
|
+
or not server_statuses
|
|
1047
|
+
else "degraded",
|
|
1048
|
+
"server_count": len(cfg.servers),
|
|
1049
|
+
}
|
|
1050
|
+
if server_statuses:
|
|
1051
|
+
proxy_status["servers"] = server_statuses
|
|
1052
|
+
|
|
1053
|
+
result = {
|
|
1054
|
+
"version": __version__,
|
|
1055
|
+
"python": f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}",
|
|
1056
|
+
"cwd": str(resolve_cwd_path(".")),
|
|
1057
|
+
"registry": registry_status,
|
|
1058
|
+
"proxy": proxy_status,
|
|
1059
|
+
}
|
|
1060
|
+
|
|
1061
|
+
s.add("registryOk", registry_status["status"] == "ok")
|
|
1062
|
+
s.add("proxyOk", proxy_status["status"] == "ok")
|
|
1063
|
+
|
|
1064
|
+
return result
|
|
1065
|
+
|
|
1066
|
+
|
|
1067
|
+
def reload() -> str:
|
|
1068
|
+
"""Force reload of all configuration.
|
|
1069
|
+
|
|
1070
|
+
Clears all cached state and reloads from disk:
|
|
1071
|
+
- Configuration (onetool.yaml and includes)
|
|
1072
|
+
- Secrets (secrets.yaml)
|
|
1073
|
+
- Tool registry (tool files from tools_dir)
|
|
1074
|
+
- Prompts
|
|
1075
|
+
- MCP proxy connections
|
|
1076
|
+
- Parameter resolution caches
|
|
1077
|
+
|
|
1078
|
+
Use after modifying config files, adding/removing tools, or
|
|
1079
|
+
changing secrets during a session.
|
|
1080
|
+
|
|
1081
|
+
Returns:
|
|
1082
|
+
Status message confirming reload
|
|
1083
|
+
|
|
1084
|
+
Example:
|
|
1085
|
+
ot.reload()
|
|
1086
|
+
"""
|
|
1087
|
+
import sys
|
|
1088
|
+
|
|
1089
|
+
with log(span="ot.reload") as s:
|
|
1090
|
+
import ot.config.loader
|
|
1091
|
+
import ot.config.secrets
|
|
1092
|
+
import ot.executor.param_resolver
|
|
1093
|
+
import ot.executor.tool_loader
|
|
1094
|
+
import ot.prompts
|
|
1095
|
+
import ot.proxy
|
|
1096
|
+
import ot.registry
|
|
1097
|
+
|
|
1098
|
+
# Clear config cache (must be first - other caches depend on it)
|
|
1099
|
+
ot.config.loader._config = None
|
|
1100
|
+
|
|
1101
|
+
# Clear secrets cache
|
|
1102
|
+
ot.config.secrets._secrets = None
|
|
1103
|
+
|
|
1104
|
+
# Clear prompts cache
|
|
1105
|
+
ot.prompts._prompts = None
|
|
1106
|
+
|
|
1107
|
+
# Clear tool loader module cache
|
|
1108
|
+
ot.executor.tool_loader._module_cache.clear()
|
|
1109
|
+
|
|
1110
|
+
# Clean up dynamically loaded tool modules from sys.modules
|
|
1111
|
+
# Tool loader uses "tools.{stem}" naming pattern
|
|
1112
|
+
tool_modules = [name for name in sys.modules if name.startswith("tools.")]
|
|
1113
|
+
for mod_name in tool_modules:
|
|
1114
|
+
del sys.modules[mod_name]
|
|
1115
|
+
s.add("toolModulesCleared", len(tool_modules))
|
|
1116
|
+
|
|
1117
|
+
# Clear tool registry cache (will rescan on next access)
|
|
1118
|
+
ot.registry._registry = None
|
|
1119
|
+
|
|
1120
|
+
# Clear param resolver cache (depends on registry)
|
|
1121
|
+
ot.executor.param_resolver.get_tool_param_names.cache_clear()
|
|
1122
|
+
ot.executor.param_resolver._mcp_param_cache.clear()
|
|
1123
|
+
|
|
1124
|
+
# Reload config to validate and report stats
|
|
1125
|
+
cfg = get_config()
|
|
1126
|
+
|
|
1127
|
+
# Reconnect MCP proxy servers with fresh config
|
|
1128
|
+
ot.proxy.reconnect_proxy_manager()
|
|
1129
|
+
s.add("aliasCount", len(cfg.alias) if cfg.alias else 0)
|
|
1130
|
+
s.add("snippetCount", len(cfg.snippets) if cfg.snippets else 0)
|
|
1131
|
+
s.add("serverCount", len(cfg.servers) if cfg.servers else 0)
|
|
1132
|
+
|
|
1133
|
+
return "OK: Configuration reloaded"
|
|
1134
|
+
|
|
1135
|
+
|
|
1136
|
+
def stats(
|
|
1137
|
+
*,
|
|
1138
|
+
period: str = "all",
|
|
1139
|
+
tool: str = "",
|
|
1140
|
+
output: str = "",
|
|
1141
|
+
) -> dict[str, Any] | str:
|
|
1142
|
+
"""Get runtime statistics for OneTool usage.
|
|
1143
|
+
|
|
1144
|
+
Returns aggregated statistics including call counts, success rates,
|
|
1145
|
+
durations, and estimated context/time savings from tool consolidation.
|
|
1146
|
+
|
|
1147
|
+
Args:
|
|
1148
|
+
period: Time period to filter - "day", "week", "month", or "all" (default: "all")
|
|
1149
|
+
tool: Filter by tool name (e.g., "brave.search"). Empty for all tools.
|
|
1150
|
+
output: Path to write HTML report. Empty for JSON output only.
|
|
1151
|
+
|
|
1152
|
+
Returns:
|
|
1153
|
+
Dict with aggregated statistics including:
|
|
1154
|
+
- total_calls: Total number of tool calls
|
|
1155
|
+
- success_rate: Percentage of successful calls
|
|
1156
|
+
- context_saved: Estimated context tokens saved
|
|
1157
|
+
- time_saved_ms: Estimated time saved in milliseconds
|
|
1158
|
+
- tools: Per-tool breakdown
|
|
1159
|
+
|
|
1160
|
+
Example:
|
|
1161
|
+
ot.stats()
|
|
1162
|
+
ot.stats(period="day")
|
|
1163
|
+
ot.stats(period="week", tool="brave.search")
|
|
1164
|
+
ot.stats(output="stats_report.html")
|
|
1165
|
+
"""
|
|
1166
|
+
from ot.stats import Period, StatsReader
|
|
1167
|
+
from ot.support import get_support_dict
|
|
1168
|
+
|
|
1169
|
+
with log(span="ot.stats", period=period, tool=tool or None) as s:
|
|
1170
|
+
cfg = get_config()
|
|
1171
|
+
|
|
1172
|
+
# Validate period
|
|
1173
|
+
valid_periods: list[Period] = ["day", "week", "month", "all"]
|
|
1174
|
+
if period not in valid_periods:
|
|
1175
|
+
s.add("error", "invalid_period")
|
|
1176
|
+
return f"Error: Invalid period '{period}'. Valid: day, week, month, all. Example: ot.stats(period='day')"
|
|
1177
|
+
|
|
1178
|
+
# Check if stats are enabled
|
|
1179
|
+
if not cfg.stats.enabled:
|
|
1180
|
+
s.add("error", "stats_disabled")
|
|
1181
|
+
return "Error: Statistics collection is disabled in configuration"
|
|
1182
|
+
|
|
1183
|
+
# Read stats
|
|
1184
|
+
stats_path = cfg.get_stats_file_path()
|
|
1185
|
+
reader = StatsReader(
|
|
1186
|
+
path=stats_path,
|
|
1187
|
+
context_per_call=cfg.stats.context_per_call,
|
|
1188
|
+
time_overhead_per_call_ms=cfg.stats.time_overhead_per_call_ms,
|
|
1189
|
+
model=cfg.stats.model,
|
|
1190
|
+
cost_per_million_input_tokens=cfg.stats.cost_per_million_input_tokens,
|
|
1191
|
+
cost_per_million_output_tokens=cfg.stats.cost_per_million_output_tokens,
|
|
1192
|
+
chars_per_token=cfg.stats.chars_per_token,
|
|
1193
|
+
)
|
|
1194
|
+
|
|
1195
|
+
aggregated = reader.read(
|
|
1196
|
+
period=period, # type: ignore[arg-type]
|
|
1197
|
+
tool=tool if tool else None,
|
|
1198
|
+
)
|
|
1199
|
+
|
|
1200
|
+
result = aggregated.to_dict()
|
|
1201
|
+
result["support"] = get_support_dict()
|
|
1202
|
+
s.add("totalCalls", result["total_calls"])
|
|
1203
|
+
s.add("toolCount", len(result["tools"]))
|
|
1204
|
+
|
|
1205
|
+
# Generate HTML report if output path specified
|
|
1206
|
+
if output:
|
|
1207
|
+
from ot.stats.html import generate_html_report
|
|
1208
|
+
|
|
1209
|
+
# Resolve output path relative to tmp directory
|
|
1210
|
+
output_path = cfg.get_result_store_path() / output
|
|
1211
|
+
html_content = generate_html_report(aggregated)
|
|
1212
|
+
try:
|
|
1213
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
1214
|
+
output_path.write_text(html_content)
|
|
1215
|
+
result["html_report"] = str(output_path)
|
|
1216
|
+
s.add("htmlReport", str(output_path))
|
|
1217
|
+
except OSError as e:
|
|
1218
|
+
s.add("error", "write_failed")
|
|
1219
|
+
return f"Error: Cannot write to '{output}': {e.strerror}"
|
|
1220
|
+
|
|
1221
|
+
return result
|
|
1222
|
+
|
|
1223
|
+
|
|
1224
|
+
# ============================================================================
|
|
1225
|
+
# Result Query Function
|
|
1226
|
+
# ============================================================================
|
|
1227
|
+
|
|
1228
|
+
|
|
1229
|
+
def result(
|
|
1230
|
+
*,
|
|
1231
|
+
handle: str,
|
|
1232
|
+
offset: int = 1,
|
|
1233
|
+
limit: int = 100,
|
|
1234
|
+
search: str = "",
|
|
1235
|
+
fuzzy: bool = False,
|
|
1236
|
+
) -> dict[str, Any]:
|
|
1237
|
+
"""Query stored large output results with pagination.
|
|
1238
|
+
|
|
1239
|
+
When tool outputs exceed max_inline_size, they are stored to disk
|
|
1240
|
+
and a handle is returned. Use this function to retrieve the content
|
|
1241
|
+
with offset/limit semantics matching Claude's Read tool.
|
|
1242
|
+
|
|
1243
|
+
Args:
|
|
1244
|
+
handle: The result handle from a stored output
|
|
1245
|
+
offset: Starting line number (1-indexed, like Claude's Read tool)
|
|
1246
|
+
limit: Maximum lines to return (default 100)
|
|
1247
|
+
search: Regex pattern to filter lines (optional)
|
|
1248
|
+
fuzzy: Use fuzzy matching instead of regex (optional)
|
|
1249
|
+
|
|
1250
|
+
Returns:
|
|
1251
|
+
Dict with:
|
|
1252
|
+
- lines: List of matching lines
|
|
1253
|
+
- total_lines: Total lines in stored result
|
|
1254
|
+
- returned: Number of lines returned
|
|
1255
|
+
- offset: Starting offset used
|
|
1256
|
+
- has_more: Boolean indicating if more lines exist
|
|
1257
|
+
|
|
1258
|
+
Raises:
|
|
1259
|
+
ValueError: If handle not found or expired
|
|
1260
|
+
|
|
1261
|
+
Example:
|
|
1262
|
+
ot.result(handle="abc123")
|
|
1263
|
+
ot.result(handle="abc123", offset=101, limit=50)
|
|
1264
|
+
ot.result(handle="abc123", search="error")
|
|
1265
|
+
ot.result(handle="abc123", search="config", fuzzy=True)
|
|
1266
|
+
"""
|
|
1267
|
+
from ot.executor.result_store import get_result_store
|
|
1268
|
+
|
|
1269
|
+
# Validate offset and limit (1-indexed)
|
|
1270
|
+
if offset < 1:
|
|
1271
|
+
raise ValueError(f"offset must be >= 1 (1-indexed), got {offset}")
|
|
1272
|
+
if limit < 1:
|
|
1273
|
+
raise ValueError(f"limit must be >= 1, got {limit}")
|
|
1274
|
+
|
|
1275
|
+
with log(
|
|
1276
|
+
span="ot.result",
|
|
1277
|
+
handle=handle,
|
|
1278
|
+
offset=offset,
|
|
1279
|
+
limit=limit,
|
|
1280
|
+
search=search if search else None,
|
|
1281
|
+
) as s:
|
|
1282
|
+
store = get_result_store()
|
|
1283
|
+
|
|
1284
|
+
try:
|
|
1285
|
+
query_result = store.query(
|
|
1286
|
+
handle=handle,
|
|
1287
|
+
offset=offset,
|
|
1288
|
+
limit=limit,
|
|
1289
|
+
search=search,
|
|
1290
|
+
fuzzy=fuzzy,
|
|
1291
|
+
)
|
|
1292
|
+
s.add("returned", query_result.returned)
|
|
1293
|
+
s.add("totalLines", query_result.total_lines)
|
|
1294
|
+
return query_result.to_dict()
|
|
1295
|
+
except ValueError as e:
|
|
1296
|
+
s.add("error", str(e))
|
|
1297
|
+
raise
|
|
1298
|
+
|
|
1299
|
+
|
|
1300
|
+
# ============================================================================
|
|
1301
|
+
# Introspection Functions
|
|
1302
|
+
# ============================================================================
|
|
1303
|
+
|
|
1304
|
+
|
|
1305
|
+
def aliases(
|
|
1306
|
+
*,
|
|
1307
|
+
pattern: str = "",
|
|
1308
|
+
info: InfoLevel = "min",
|
|
1309
|
+
) -> list[dict[str, Any] | str]:
|
|
1310
|
+
"""List aliases with optional filtering.
|
|
1311
|
+
|
|
1312
|
+
Lists all configured aliases.
|
|
1313
|
+
Use pattern for substring filtering.
|
|
1314
|
+
|
|
1315
|
+
Args:
|
|
1316
|
+
pattern: Filter aliases by name or target pattern (case-insensitive substring)
|
|
1317
|
+
info: Output verbosity level - "list" (names only), "min" (name -> target),
|
|
1318
|
+
or "full" (structured dict with name and target)
|
|
1319
|
+
|
|
1320
|
+
Returns:
|
|
1321
|
+
List of alias names, strings, or dicts depending on info level
|
|
1322
|
+
|
|
1323
|
+
Example:
|
|
1324
|
+
ot.aliases()
|
|
1325
|
+
ot.aliases(pattern="search")
|
|
1326
|
+
ot.aliases(info="list")
|
|
1327
|
+
ot.aliases(pattern="ws", info="full")
|
|
1328
|
+
"""
|
|
1329
|
+
with log(span="ot.aliases", pattern=pattern or None, info=info) as s:
|
|
1330
|
+
cfg = get_config()
|
|
1331
|
+
|
|
1332
|
+
if not cfg.alias:
|
|
1333
|
+
s.add("count", 0)
|
|
1334
|
+
return []
|
|
1335
|
+
|
|
1336
|
+
# Filter by pattern or list all
|
|
1337
|
+
items = sorted(cfg.alias.items())
|
|
1338
|
+
if pattern:
|
|
1339
|
+
pattern_lower = pattern.lower()
|
|
1340
|
+
items = [(k, v) for k, v in items if pattern_lower in k.lower() or pattern_lower in v.lower()]
|
|
1341
|
+
|
|
1342
|
+
s.add("count", len(items))
|
|
1343
|
+
|
|
1344
|
+
# info="list" - just names
|
|
1345
|
+
if info == "list":
|
|
1346
|
+
return [k for k, v in items]
|
|
1347
|
+
|
|
1348
|
+
# info="full" - structured dicts
|
|
1349
|
+
if info == "full":
|
|
1350
|
+
return [{"name": k, "target": v} for k, v in items]
|
|
1351
|
+
|
|
1352
|
+
# info="min" (default) - "name -> target" strings
|
|
1353
|
+
return [f"{k} -> {v}" for k, v in items]
|
|
1354
|
+
|
|
1355
|
+
|
|
1356
|
+
def snippets(
|
|
1357
|
+
*,
|
|
1358
|
+
pattern: str = "",
|
|
1359
|
+
info: InfoLevel = "min",
|
|
1360
|
+
) -> list[dict[str, Any] | str]:
|
|
1361
|
+
"""List snippets with optional filtering.
|
|
1362
|
+
|
|
1363
|
+
Lists all configured snippets.
|
|
1364
|
+
Use pattern for substring filtering.
|
|
1365
|
+
|
|
1366
|
+
Args:
|
|
1367
|
+
pattern: Filter snippets by name/description pattern (case-insensitive substring)
|
|
1368
|
+
info: Output verbosity level - "list" (names only), "min" (name: description),
|
|
1369
|
+
or "full" (complete definition with params, body, example)
|
|
1370
|
+
|
|
1371
|
+
Returns:
|
|
1372
|
+
List of snippet names, strings, or dicts depending on info level
|
|
1373
|
+
|
|
1374
|
+
Example:
|
|
1375
|
+
ot.snippets()
|
|
1376
|
+
ot.snippets(pattern="pkg")
|
|
1377
|
+
ot.snippets(info="list")
|
|
1378
|
+
ot.snippets(pattern="brv_research", info="full")
|
|
1379
|
+
"""
|
|
1380
|
+
with log(span="ot.snippets", pattern=pattern or None, info=info) as s:
|
|
1381
|
+
cfg = get_config()
|
|
1382
|
+
|
|
1383
|
+
if not cfg.snippets:
|
|
1384
|
+
s.add("count", 0)
|
|
1385
|
+
return []
|
|
1386
|
+
|
|
1387
|
+
# Filter by pattern or list all
|
|
1388
|
+
items = sorted(cfg.snippets.items())
|
|
1389
|
+
if pattern:
|
|
1390
|
+
pattern_lower = pattern.lower()
|
|
1391
|
+
items = [
|
|
1392
|
+
(k, v) for k, v in items
|
|
1393
|
+
if pattern_lower in k.lower() or pattern_lower in (v.description or "").lower()
|
|
1394
|
+
]
|
|
1395
|
+
|
|
1396
|
+
s.add("count", len(items))
|
|
1397
|
+
|
|
1398
|
+
# info="list" - just names
|
|
1399
|
+
if info == "list":
|
|
1400
|
+
return [k for k, v in items]
|
|
1401
|
+
|
|
1402
|
+
# info="full" - complete definition for each snippet
|
|
1403
|
+
if info == "full":
|
|
1404
|
+
results: list[dict[str, Any] | str] = []
|
|
1405
|
+
for snippet_name, snippet_def in items:
|
|
1406
|
+
# Format output as YAML-like
|
|
1407
|
+
lines = [f"name: {snippet_name}"]
|
|
1408
|
+
|
|
1409
|
+
if snippet_def.description:
|
|
1410
|
+
lines.append(f"description: {snippet_def.description}")
|
|
1411
|
+
|
|
1412
|
+
if snippet_def.params:
|
|
1413
|
+
lines.append("params:")
|
|
1414
|
+
for param_name, param_def in snippet_def.params.items():
|
|
1415
|
+
param_parts = []
|
|
1416
|
+
if param_def.default is not None:
|
|
1417
|
+
param_parts.append(f"default: {param_def.default}")
|
|
1418
|
+
if param_def.description:
|
|
1419
|
+
param_parts.append(f'description: "{param_def.description}"')
|
|
1420
|
+
lines.append(f" {param_name}: {{{', '.join(param_parts)}}}")
|
|
1421
|
+
|
|
1422
|
+
lines.append("body: |")
|
|
1423
|
+
for body_line in snippet_def.body.rstrip().split("\n"):
|
|
1424
|
+
lines.append(f" {body_line}")
|
|
1425
|
+
|
|
1426
|
+
# Add example invocation
|
|
1427
|
+
lines.append("")
|
|
1428
|
+
lines.append("# Example:")
|
|
1429
|
+
|
|
1430
|
+
# Build example with defaults
|
|
1431
|
+
example_args = []
|
|
1432
|
+
for param_name, param_def in snippet_def.params.items():
|
|
1433
|
+
if param_def.default is not None:
|
|
1434
|
+
continue # Skip params with defaults in example
|
|
1435
|
+
example_args.append(f'{param_name}="..."')
|
|
1436
|
+
|
|
1437
|
+
if example_args:
|
|
1438
|
+
lines.append(f"# ${snippet_name} {' '.join(example_args)}")
|
|
1439
|
+
else:
|
|
1440
|
+
lines.append(f"# ${snippet_name}")
|
|
1441
|
+
|
|
1442
|
+
results.append("\n".join(lines))
|
|
1443
|
+
|
|
1444
|
+
return results
|
|
1445
|
+
|
|
1446
|
+
# info="min" (default) - "name: description" strings
|
|
1447
|
+
return [f"{k}: {v.description or '(no description)'}" for k, v in items]
|
|
1448
|
+
|
|
1449
|
+
|
|
1450
|
+
# ============================================================================
|
|
1451
|
+
# Help Function
|
|
1452
|
+
# ============================================================================
|
|
1453
|
+
|
|
1454
|
+
|
|
1455
|
+
def help(*, query: str = "", info: InfoLevel = "min") -> str:
|
|
1456
|
+
"""Get help on OneTool commands, tools, packs, snippets, or aliases.
|
|
1457
|
+
|
|
1458
|
+
Provides a unified entry point for discovering and getting help on
|
|
1459
|
+
any OneTool component. With no arguments, shows a general overview.
|
|
1460
|
+
With a query, searches across all types and returns detailed help.
|
|
1461
|
+
|
|
1462
|
+
Args:
|
|
1463
|
+
query: Tool name, pack name, snippet, alias, or search term.
|
|
1464
|
+
Empty string shows general help overview.
|
|
1465
|
+
info: Detail level - "list" (names only), "min" (name + description),
|
|
1466
|
+
"full" (everything). Default is "min".
|
|
1467
|
+
|
|
1468
|
+
Returns:
|
|
1469
|
+
Formatted help text
|
|
1470
|
+
|
|
1471
|
+
Example:
|
|
1472
|
+
ot.help()
|
|
1473
|
+
ot.help(query="brave.search")
|
|
1474
|
+
ot.help(query="firecrawl")
|
|
1475
|
+
ot.help(query="$b_q")
|
|
1476
|
+
ot.help(query="web fetch", info="list")
|
|
1477
|
+
"""
|
|
1478
|
+
with log(span="ot.help", query=query or None, info=info) as s:
|
|
1479
|
+
# No query - show general help
|
|
1480
|
+
if not query:
|
|
1481
|
+
s.add("type", "general")
|
|
1482
|
+
return _format_general_help()
|
|
1483
|
+
|
|
1484
|
+
cfg = get_config()
|
|
1485
|
+
|
|
1486
|
+
# Check for exact tool match (contains ".")
|
|
1487
|
+
if "." in query:
|
|
1488
|
+
tool_results = tools(pattern=query, info="full")
|
|
1489
|
+
# Look for exact match
|
|
1490
|
+
for tool in tool_results:
|
|
1491
|
+
if isinstance(tool, dict) and tool.get("name") == query:
|
|
1492
|
+
pack = query.split(".")[0]
|
|
1493
|
+
s.add("type", "tool")
|
|
1494
|
+
s.add("match", query)
|
|
1495
|
+
return _format_tool_help(tool, pack)
|
|
1496
|
+
|
|
1497
|
+
# Check for exact pack match
|
|
1498
|
+
pack_names = packs(info="list")
|
|
1499
|
+
if query in pack_names:
|
|
1500
|
+
pack_results = packs(pattern=query, info="full")
|
|
1501
|
+
if pack_results:
|
|
1502
|
+
s.add("type", "pack")
|
|
1503
|
+
s.add("match", query)
|
|
1504
|
+
return _format_pack_help(query, str(pack_results[0]))
|
|
1505
|
+
|
|
1506
|
+
# Check for snippet match (starts with "$")
|
|
1507
|
+
if query.startswith("$"):
|
|
1508
|
+
snippet_name = query[1:] # Remove "$"
|
|
1509
|
+
snippet_results = snippets(pattern=snippet_name, info="full")
|
|
1510
|
+
# Look for exact match
|
|
1511
|
+
for snippet in snippet_results:
|
|
1512
|
+
if isinstance(snippet, str) and snippet.startswith(f"name: {snippet_name}"):
|
|
1513
|
+
s.add("type", "snippet")
|
|
1514
|
+
s.add("match", query)
|
|
1515
|
+
return _format_snippet_help(snippet)
|
|
1516
|
+
|
|
1517
|
+
# Check for exact alias match
|
|
1518
|
+
if cfg.alias and query in cfg.alias:
|
|
1519
|
+
target = cfg.alias[query]
|
|
1520
|
+
s.add("type", "alias")
|
|
1521
|
+
s.add("match", query)
|
|
1522
|
+
return _format_alias_help(query, target)
|
|
1523
|
+
|
|
1524
|
+
# Fuzzy search across all types
|
|
1525
|
+
s.add("type", "search")
|
|
1526
|
+
|
|
1527
|
+
# Get all candidates for fuzzy matching
|
|
1528
|
+
all_tool_names = tools(info="list")
|
|
1529
|
+
all_pack_names = pack_names # Already have this
|
|
1530
|
+
all_snippet_names = snippets(info="list")
|
|
1531
|
+
all_alias_names = aliases(info="list")
|
|
1532
|
+
|
|
1533
|
+
# Fuzzy match across types
|
|
1534
|
+
matched_tools = _fuzzy_match(query, [str(t) for t in all_tool_names])
|
|
1535
|
+
matched_packs = _fuzzy_match(query, [str(p) for p in all_pack_names])
|
|
1536
|
+
matched_snippets = _fuzzy_match(query, [str(sn) for sn in all_snippet_names])
|
|
1537
|
+
matched_aliases = _fuzzy_match(query, [str(a) for a in all_alias_names])
|
|
1538
|
+
|
|
1539
|
+
total_matches = len(matched_tools) + len(matched_packs) + len(matched_snippets) + len(matched_aliases)
|
|
1540
|
+
s.add("matches", total_matches)
|
|
1541
|
+
|
|
1542
|
+
# Show search results - info parameter controls detail level
|
|
1543
|
+
tools_results = [t for t in tools(info=info) if _item_matches(t, matched_tools)]
|
|
1544
|
+
packs_results = [p for p in packs(info=info) if _item_matches(p, matched_packs)]
|
|
1545
|
+
snippets_results = [sn for sn in snippets(info=info) if _snippet_matches(sn, matched_snippets)]
|
|
1546
|
+
aliases_results = [a for a in aliases(info=info) if _item_matches(a, matched_aliases)]
|
|
1547
|
+
|
|
1548
|
+
return _format_search_results(
|
|
1549
|
+
query=query,
|
|
1550
|
+
tools_results=tools_results,
|
|
1551
|
+
packs_results=packs_results,
|
|
1552
|
+
snippets_results=snippets_results,
|
|
1553
|
+
aliases_results=aliases_results,
|
|
1554
|
+
info=info,
|
|
1555
|
+
)
|