onetool-mcp 1.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. bench/__init__.py +5 -0
  2. bench/cli.py +69 -0
  3. bench/harness/__init__.py +66 -0
  4. bench/harness/client.py +692 -0
  5. bench/harness/config.py +397 -0
  6. bench/harness/csv_writer.py +109 -0
  7. bench/harness/evaluate.py +512 -0
  8. bench/harness/metrics.py +283 -0
  9. bench/harness/runner.py +899 -0
  10. bench/py.typed +0 -0
  11. bench/reporter.py +629 -0
  12. bench/run.py +487 -0
  13. bench/secrets.py +101 -0
  14. bench/utils.py +16 -0
  15. onetool/__init__.py +4 -0
  16. onetool/cli.py +391 -0
  17. onetool/py.typed +0 -0
  18. onetool_mcp-1.0.0b1.dist-info/METADATA +163 -0
  19. onetool_mcp-1.0.0b1.dist-info/RECORD +132 -0
  20. onetool_mcp-1.0.0b1.dist-info/WHEEL +4 -0
  21. onetool_mcp-1.0.0b1.dist-info/entry_points.txt +3 -0
  22. onetool_mcp-1.0.0b1.dist-info/licenses/LICENSE.txt +687 -0
  23. onetool_mcp-1.0.0b1.dist-info/licenses/NOTICE.txt +64 -0
  24. ot/__init__.py +37 -0
  25. ot/__main__.py +6 -0
  26. ot/_cli.py +107 -0
  27. ot/_tui.py +53 -0
  28. ot/config/__init__.py +46 -0
  29. ot/config/defaults/bench.yaml +4 -0
  30. ot/config/defaults/diagram-templates/api-flow.mmd +33 -0
  31. ot/config/defaults/diagram-templates/c4-context.puml +30 -0
  32. ot/config/defaults/diagram-templates/class-diagram.mmd +87 -0
  33. ot/config/defaults/diagram-templates/feature-mindmap.mmd +70 -0
  34. ot/config/defaults/diagram-templates/microservices.d2 +81 -0
  35. ot/config/defaults/diagram-templates/project-gantt.mmd +37 -0
  36. ot/config/defaults/diagram-templates/state-machine.mmd +42 -0
  37. ot/config/defaults/onetool.yaml +25 -0
  38. ot/config/defaults/prompts.yaml +97 -0
  39. ot/config/defaults/servers.yaml +7 -0
  40. ot/config/defaults/snippets.yaml +4 -0
  41. ot/config/defaults/tool_templates/__init__.py +7 -0
  42. ot/config/defaults/tool_templates/extension.py +52 -0
  43. ot/config/defaults/tool_templates/isolated.py +61 -0
  44. ot/config/dynamic.py +121 -0
  45. ot/config/global_templates/__init__.py +2 -0
  46. ot/config/global_templates/bench-secrets-template.yaml +6 -0
  47. ot/config/global_templates/bench.yaml +9 -0
  48. ot/config/global_templates/onetool.yaml +27 -0
  49. ot/config/global_templates/secrets-template.yaml +44 -0
  50. ot/config/global_templates/servers.yaml +18 -0
  51. ot/config/global_templates/snippets.yaml +235 -0
  52. ot/config/loader.py +1087 -0
  53. ot/config/mcp.py +145 -0
  54. ot/config/secrets.py +190 -0
  55. ot/config/tool_config.py +125 -0
  56. ot/decorators.py +116 -0
  57. ot/executor/__init__.py +35 -0
  58. ot/executor/base.py +16 -0
  59. ot/executor/fence_processor.py +83 -0
  60. ot/executor/linter.py +142 -0
  61. ot/executor/pack_proxy.py +260 -0
  62. ot/executor/param_resolver.py +140 -0
  63. ot/executor/pep723.py +288 -0
  64. ot/executor/result_store.py +369 -0
  65. ot/executor/runner.py +496 -0
  66. ot/executor/simple.py +163 -0
  67. ot/executor/tool_loader.py +396 -0
  68. ot/executor/validator.py +398 -0
  69. ot/executor/worker_pool.py +388 -0
  70. ot/executor/worker_proxy.py +189 -0
  71. ot/http_client.py +145 -0
  72. ot/logging/__init__.py +37 -0
  73. ot/logging/config.py +315 -0
  74. ot/logging/entry.py +213 -0
  75. ot/logging/format.py +188 -0
  76. ot/logging/span.py +349 -0
  77. ot/meta.py +1555 -0
  78. ot/paths.py +453 -0
  79. ot/prompts.py +218 -0
  80. ot/proxy/__init__.py +21 -0
  81. ot/proxy/manager.py +396 -0
  82. ot/py.typed +0 -0
  83. ot/registry/__init__.py +189 -0
  84. ot/registry/models.py +57 -0
  85. ot/registry/parser.py +269 -0
  86. ot/registry/registry.py +413 -0
  87. ot/server.py +315 -0
  88. ot/shortcuts/__init__.py +15 -0
  89. ot/shortcuts/aliases.py +87 -0
  90. ot/shortcuts/snippets.py +258 -0
  91. ot/stats/__init__.py +35 -0
  92. ot/stats/html.py +250 -0
  93. ot/stats/jsonl_writer.py +283 -0
  94. ot/stats/reader.py +354 -0
  95. ot/stats/timing.py +57 -0
  96. ot/support.py +63 -0
  97. ot/tools.py +114 -0
  98. ot/utils/__init__.py +81 -0
  99. ot/utils/batch.py +161 -0
  100. ot/utils/cache.py +120 -0
  101. ot/utils/deps.py +403 -0
  102. ot/utils/exceptions.py +23 -0
  103. ot/utils/factory.py +179 -0
  104. ot/utils/format.py +65 -0
  105. ot/utils/http.py +202 -0
  106. ot/utils/platform.py +45 -0
  107. ot/utils/sanitize.py +130 -0
  108. ot/utils/truncate.py +69 -0
  109. ot_tools/__init__.py +4 -0
  110. ot_tools/_convert/__init__.py +12 -0
  111. ot_tools/_convert/excel.py +279 -0
  112. ot_tools/_convert/pdf.py +254 -0
  113. ot_tools/_convert/powerpoint.py +268 -0
  114. ot_tools/_convert/utils.py +358 -0
  115. ot_tools/_convert/word.py +283 -0
  116. ot_tools/brave_search.py +604 -0
  117. ot_tools/code_search.py +736 -0
  118. ot_tools/context7.py +495 -0
  119. ot_tools/convert.py +614 -0
  120. ot_tools/db.py +415 -0
  121. ot_tools/diagram.py +1604 -0
  122. ot_tools/diagram.yaml +167 -0
  123. ot_tools/excel.py +1372 -0
  124. ot_tools/file.py +1348 -0
  125. ot_tools/firecrawl.py +732 -0
  126. ot_tools/grounding_search.py +646 -0
  127. ot_tools/package.py +604 -0
  128. ot_tools/py.typed +0 -0
  129. ot_tools/ripgrep.py +544 -0
  130. ot_tools/scaffold.py +471 -0
  131. ot_tools/transform.py +213 -0
  132. ot_tools/web_fetch.py +384 -0
ot_tools/scaffold.py ADDED
@@ -0,0 +1,471 @@
1
+ """Extension scaffolding tools.
2
+
3
+ Provides tools for creating new tools from templates.
4
+
5
+ Templates:
6
+ - extension: In-process tool with full onetool access (default, recommended)
7
+ - isolated: Subprocess tool with PEP 723 dependencies, fully standalone
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import ast
13
+ import re
14
+ from pathlib import Path
15
+
16
+ from ot.logging import LogSpan
17
+ from ot.paths import get_global_dir, get_project_dir
18
+
19
+ # Pack for dot notation: scaffold.create(), scaffold.templates(), scaffold.list()
20
+ pack = "scaffold"
21
+
22
+ __all__ = ["create", "extensions", "templates", "validate"]
23
+
24
+
25
+ def _get_templates_dir() -> Path:
26
+ """Get the extension templates directory."""
27
+ from ot.paths import get_bundled_config_dir
28
+
29
+ return get_bundled_config_dir() / "tool_templates"
30
+
31
+
32
+ def templates() -> str:
33
+ """List available extension templates.
34
+
35
+ Returns:
36
+ Formatted list of templates with descriptions
37
+
38
+ Example:
39
+ scaffold.templates()
40
+ """
41
+ with LogSpan(span="scaffold.templates") as s:
42
+ templates_dir = _get_templates_dir()
43
+
44
+ if not templates_dir.exists():
45
+ s.add(error="templates_dir_missing")
46
+ return "Error: Templates directory not found"
47
+
48
+ templates = []
49
+ for template_file in templates_dir.glob("*.py"):
50
+ if template_file.name.startswith("_"):
51
+ continue
52
+
53
+ # Read the module docstring
54
+ content = template_file.read_text()
55
+ docstring = ""
56
+ if '"""' in content:
57
+ match = re.search(r'"""(.*?)"""', content, re.DOTALL)
58
+ if match:
59
+ lines = match.group(1).strip().split("\n")
60
+ # Skip placeholder lines like {{description}}
61
+ for line in lines:
62
+ line = line.strip()
63
+ if line and "{{" not in line:
64
+ docstring = line
65
+ break
66
+
67
+ templates.append({
68
+ "name": template_file.stem,
69
+ "description": docstring or "No description",
70
+ })
71
+
72
+ if not templates:
73
+ return "No templates found"
74
+
75
+ lines = ["Available extension templates:", ""]
76
+ for t in templates:
77
+ lines.append(f" {t['name']}")
78
+ lines.append(f" {t['description']}")
79
+ lines.append("")
80
+
81
+ lines.append("Use scaffold.create() to create a new extension from a template.")
82
+ s.add(count=len(templates))
83
+ return "\n".join(lines)
84
+
85
+
86
+ def create(
87
+ *,
88
+ name: str,
89
+ template: str = "extension",
90
+ pack_name: str | None = None,
91
+ function: str = "run",
92
+ description: str = "My extension tool",
93
+ function_description: str = "Execute the tool function",
94
+ api_key: str = "MY_API_KEY",
95
+ scope: str = "project",
96
+ ) -> str:
97
+ """Create a new extension tool from a template.
98
+
99
+ Creates a new extension in .onetool/tools/{name}/{name}.py or
100
+ ~/.onetool/tools/{name}/{name}.py depending on scope.
101
+
102
+ Args:
103
+ name: Extension name (will be used as directory and file name)
104
+ template: Template name - "extension" (default, in-process) or "isolated" (subprocess)
105
+ pack_name: Pack name for dot notation (default: same as name)
106
+ function: Main function name (default: run)
107
+ description: Module description
108
+ function_description: Function docstring description
109
+ api_key: API key secret name (for optional API configuration)
110
+ scope: Where to create - "project" (default) or "global"
111
+
112
+ Returns:
113
+ Success message with instructions, or error message
114
+
115
+ Example:
116
+ scaffold.create(name="my_tool", function="search")
117
+ scaffold.create(name="numpy_tool", template="isolated")
118
+ """
119
+ with LogSpan(span="scaffold.create", name=name, template=template) as s:
120
+ # Validate name
121
+ if not re.match(r"^[a-z][a-z0-9_]*$", name):
122
+ return "Error: Name must be lowercase alphanumeric with underscores, starting with a letter"
123
+
124
+ # Get templates directory
125
+ templates_dir = _get_templates_dir()
126
+ template_file = templates_dir / f"{template}.py"
127
+
128
+ if not template_file.exists():
129
+ available = [f.stem for f in templates_dir.glob("*.py") if not f.name.startswith("_")]
130
+ return f"Error: Template '{template}' not found. Available: {', '.join(available)}"
131
+
132
+ # Determine output directory
133
+ if scope == "global":
134
+ base_dir = get_global_dir() / "tools"
135
+ else:
136
+ project_dir = get_project_dir()
137
+ if not project_dir:
138
+ # Create .onetool if it doesn't exist
139
+ from ot.paths import ensure_project_dir
140
+ project_dir = ensure_project_dir(quiet=True)
141
+ base_dir = project_dir / "tools"
142
+
143
+ ext_dir = base_dir / name
144
+ ext_file = ext_dir / f"{name}.py"
145
+
146
+ # Check if already exists
147
+ if ext_file.exists():
148
+ return f"Error: Extension already exists at {ext_file}"
149
+
150
+ # Read and process template
151
+ content = template_file.read_text()
152
+
153
+ # Replace placeholders
154
+ pack = pack_name or name
155
+ replacements = {
156
+ "{{pack}}": pack,
157
+ "{{function}}": function,
158
+ "{{description}}": description,
159
+ "{{function_description}}": function_description,
160
+ "{{API_KEY}}": api_key,
161
+ }
162
+
163
+ for placeholder, value in replacements.items():
164
+ content = content.replace(placeholder, value)
165
+
166
+ # Create directory and write file
167
+ ext_dir.mkdir(parents=True, exist_ok=True)
168
+ ext_file.write_text(content)
169
+
170
+ s.add(path=str(ext_file), scope=scope)
171
+
172
+ # Build helpful output with next steps
173
+ lines = [
174
+ f"Created extension: {ext_file}",
175
+ "",
176
+ "Next steps:",
177
+ " 1. Edit the file to implement your logic",
178
+ f" 2. Validate before reload: scaffold.validate(path=\"{ext_file}\")",
179
+ " 3. Reload to activate: ot.reload()",
180
+ f" 4. Use your tool: {pack}.{function}()",
181
+ ]
182
+ return "\n".join(lines)
183
+
184
+
185
+ def extensions() -> str:
186
+ """List extension tools loaded from tools_dir config.
187
+
188
+ Shows all extension tool files currently loaded, with full paths.
189
+ Tools are loaded from paths specified in the tools_dir config.
190
+
191
+ Returns:
192
+ Formatted list of loaded extension files
193
+
194
+ Example:
195
+ scaffold.extensions()
196
+ """
197
+ with LogSpan(span="scaffold.extensions") as s:
198
+ from ot.config.loader import get_config
199
+
200
+ config = get_config()
201
+ if config is None:
202
+ s.add(error="no_config")
203
+ return "No configuration loaded"
204
+
205
+ tool_files = config.get_tool_files()
206
+
207
+ if not tool_files:
208
+ s.add(count=0)
209
+ return "No extensions loaded. Create one with scaffold.create()"
210
+
211
+ lines = ["Loaded extensions:", ""]
212
+ for path in sorted(tool_files):
213
+ lines.append(f" {path}")
214
+
215
+ lines.append("")
216
+ lines.append(f"Total: {len(tool_files)} files")
217
+
218
+ s.add(count=len(tool_files))
219
+ return "\n".join(lines)
220
+
221
+
222
+ def _has_pep723_deps(content: str) -> bool:
223
+ """Check if content has PEP 723 script metadata with dependencies."""
224
+ if "# /// script" not in content:
225
+ return False
226
+ # Look for dependencies line in the script block
227
+ in_script_block = False
228
+ for line in content.split("\n"):
229
+ if line.strip() == "# /// script":
230
+ in_script_block = True
231
+ elif line.strip() == "# ///":
232
+ in_script_block = False
233
+ elif in_script_block and "dependencies" in line:
234
+ return True
235
+ return False
236
+
237
+
238
+ def _check_best_practices(
239
+ content: str, tree: ast.Module, *, is_isolated: bool = False
240
+ ) -> tuple[dict[str, bool], list[str]]:
241
+ """Check for best practices violations.
242
+
243
+ Args:
244
+ content: The file content
245
+ tree: The parsed AST
246
+ is_isolated: Whether this is an isolated tool (skips logging check)
247
+
248
+ Returns:
249
+ Tuple of (checks dict, warnings list)
250
+ """
251
+ checks: dict[str, bool] = {}
252
+ warnings: list[str] = []
253
+ lines = content.split("\n")
254
+
255
+ # Check for module docstring
256
+ has_docstring = ast.get_docstring(tree) is not None
257
+ checks["module_docstring"] = has_docstring
258
+ if not has_docstring:
259
+ warnings.append("Best practice: Add a module docstring describing the tool")
260
+
261
+ # Check for from __future__ import annotations
262
+ has_future_annotations = "from __future__ import annotations" in content
263
+ checks["future_annotations"] = has_future_annotations
264
+ if not has_future_annotations:
265
+ warnings.append("Best practice: Add 'from __future__ import annotations' for forward compatibility")
266
+
267
+ # Find line numbers of key elements
268
+ pack_line = None
269
+ first_import_line = None
270
+
271
+ for i, line in enumerate(lines, 1):
272
+ if line.startswith("pack = ") and pack_line is None:
273
+ pack_line = i
274
+ if (line.startswith("import ") or line.startswith("from ")) and first_import_line is None and "from __future__" not in line:
275
+ first_import_line = i
276
+
277
+ # Check: pack before imports
278
+ pack_before_imports = not (pack_line and first_import_line and pack_line > first_import_line)
279
+ checks["pack_before_imports"] = pack_before_imports
280
+ if not pack_before_imports:
281
+ warnings.append("Best practice: 'pack = \"name\"' should appear before imports")
282
+
283
+ # Check for LogSpan or log usage (skip for isolated tools - they can't use onetool logging)
284
+ if is_isolated:
285
+ checks["log_usage"] = True # N/A for isolated tools
286
+ else:
287
+ has_log_usage = "LogSpan" in content or "with log(" in content
288
+ checks["log_usage"] = has_log_usage
289
+ if not has_log_usage:
290
+ warnings.append("Best practice: Consider using LogSpan or log() for observability")
291
+
292
+ # Check for raise statements (should prefer return error strings)
293
+ has_raise = any(isinstance(node, ast.Raise) for node in ast.walk(tree))
294
+ checks["no_raise"] = not has_raise
295
+ if has_raise:
296
+ warnings.append("Best practice: Consider returning error strings instead of raising exceptions")
297
+
298
+ # Check for keyword-only args in exported functions
299
+ exported_funcs = _get_exported_functions(tree)
300
+ all_kwonly = True
301
+ for func in exported_funcs:
302
+ if not func.args.kwonlyargs and func.args.args:
303
+ # Has positional args but no keyword-only args
304
+ all_kwonly = False
305
+ break
306
+ checks["keyword_only_args"] = all_kwonly
307
+ if not all_kwonly:
308
+ warnings.append("Best practice: Use keyword-only args (*, param) for API clarity")
309
+
310
+ # Check for complete docstrings (Args, Returns, Example)
311
+ docstring_complete = True
312
+ for func in exported_funcs:
313
+ docstring = ast.get_docstring(func)
314
+ if docstring:
315
+ has_args = "Args:" in docstring or not func.args.kwonlyargs
316
+ has_returns = "Returns:" in docstring or "Return:" in docstring
317
+ has_example = "Example:" in docstring or "Examples:" in docstring
318
+ if not (has_args and has_returns and has_example):
319
+ docstring_complete = False
320
+ break
321
+ else:
322
+ docstring_complete = False
323
+ break
324
+ checks["docstring_complete"] = docstring_complete
325
+ if not docstring_complete:
326
+ warnings.append("Best practice: Docstrings should have Args, Returns, and Example sections")
327
+
328
+ return checks, warnings
329
+
330
+
331
+ def _get_exported_functions(tree: ast.Module) -> list[ast.FunctionDef]:
332
+ """Get functions that are exported via __all__."""
333
+ # Find __all__ list
334
+ all_names: set[str] = set()
335
+ for node in ast.walk(tree):
336
+ if isinstance(node, ast.Assign):
337
+ for target in node.targets:
338
+ if isinstance(target, ast.Name) and target.id == "__all__" and isinstance(node.value, ast.List):
339
+ for elt in node.value.elts:
340
+ if isinstance(elt, ast.Constant) and isinstance(elt.value, str):
341
+ all_names.add(elt.value)
342
+
343
+ # Find exported functions
344
+ funcs: list[ast.FunctionDef] = []
345
+ for node in tree.body:
346
+ if isinstance(node, ast.FunctionDef) and node.name in all_names:
347
+ funcs.append(node)
348
+ return funcs
349
+
350
+
351
+ def validate(*, path: str) -> str:
352
+ """Validate an extension before reload.
353
+
354
+ Checks Python syntax, required structure, and best practices.
355
+
356
+ Args:
357
+ path: Full path to the extension file
358
+
359
+ Returns:
360
+ Validation result with any errors or warnings
361
+
362
+ Example:
363
+ scaffold.validate(path="/path/to/extension.py")
364
+ """
365
+ with LogSpan(span="scaffold.validate", path=path) as s:
366
+ ext_path = Path(path)
367
+
368
+ if not ext_path.exists():
369
+ s.add(error="file_not_found")
370
+ return f"Error: File not found: {path}"
371
+
372
+ if ext_path.suffix != ".py":
373
+ s.add(error="not_python_file")
374
+ return f"Error: Not a Python file: {path}"
375
+
376
+ try:
377
+ content = ext_path.read_text()
378
+ except Exception as e:
379
+ s.add(error=str(e))
380
+ return f"Error reading file: {e}"
381
+
382
+ errors: list[str] = []
383
+ warnings: list[str] = []
384
+
385
+ # Check 1: Python syntax
386
+ try:
387
+ tree = ast.parse(content)
388
+ except SyntaxError as e:
389
+ s.add(error="syntax_error")
390
+ return f"Syntax error at line {e.lineno}: {e.msg}"
391
+
392
+ # Check 2: Required structure - pack variable
393
+ has_pack = any(
394
+ isinstance(node, ast.Assign)
395
+ and any(isinstance(t, ast.Name) and t.id == "pack" for t in node.targets)
396
+ for node in ast.walk(tree)
397
+ )
398
+ if not has_pack:
399
+ errors.append("Missing 'pack = \"name\"' variable for tool discovery")
400
+
401
+ # Check 3: Required structure - __all__ variable
402
+ has_all = any(
403
+ isinstance(node, ast.Assign)
404
+ and any(isinstance(t, ast.Name) and t.id == "__all__" for t in node.targets)
405
+ for node in ast.walk(tree)
406
+ )
407
+ if not has_all:
408
+ errors.append("Missing '__all__ = [...]' export list")
409
+
410
+ # Check 4: Isolated tools (PEP 723) need inline JSON-RPC loop
411
+ if _has_pep723_deps(content):
412
+ has_json_rpc = 'if __name__ == "__main__":' in content and "json.loads" in content
413
+ if not has_json_rpc:
414
+ errors.append("Missing inline JSON-RPC loop - required for isolated tools with PEP 723 dependencies")
415
+
416
+ # Check 5: Best practices
417
+ is_isolated = _has_pep723_deps(content)
418
+ checks, bp_warnings = _check_best_practices(content, tree, is_isolated=is_isolated)
419
+ warnings.extend(bp_warnings)
420
+
421
+ # Check 6: Warn about deprecated ot_sdk imports
422
+ if "from ot_sdk" in content or "import ot_sdk" in content:
423
+ warnings.append("DEPRECATED: ot_sdk imports are deprecated. Use ot.* imports for extension tools, or inline JSON-RPC for isolated tools")
424
+
425
+ # Build result showing what passed and failed
426
+ result: list[str] = []
427
+
428
+ # Show check results
429
+ result.append("Checks:")
430
+ result.append(f" [{'x' if has_pack else ' '}] pack = \"name\" variable")
431
+ result.append(f" [{'x' if has_all else ' '}] __all__ export list")
432
+ if _has_pep723_deps(content):
433
+ has_json_rpc = 'if __name__ == "__main__":' in content and "json.loads" in content
434
+ result.append(f" [{'x' if has_json_rpc else ' '}] inline JSON-RPC loop (isolated)")
435
+ result.append(" [x] Python syntax valid")
436
+ result.append(f" [{'x' if checks.get('module_docstring', True) else ' '}] module docstring")
437
+ result.append(f" [{'x' if checks.get('future_annotations', True) else ' '}] from __future__ import annotations")
438
+ result.append(f" [{'x' if checks.get('pack_before_imports', True) else ' '}] pack before imports")
439
+ result.append(f" [{'x' if checks.get('keyword_only_args', True) else ' '}] keyword-only args")
440
+ result.append(f" [{'x' if checks.get('docstring_complete', True) else ' '}] complete docstrings")
441
+ result.append(f" [{'x' if checks.get('log_usage', True) else ' '}] logging usage")
442
+ result.append(f" [{'x' if checks.get('no_raise', True) else ' '}] returns errors (no raise)")
443
+
444
+ if errors:
445
+ s.add(valid=False, errors=len(errors), warnings=len(warnings))
446
+ result.insert(0, "Validation FAILED")
447
+ result.insert(1, "")
448
+ result.append("")
449
+ result.append("Errors:")
450
+ for err in errors:
451
+ result.append(f" - {err}")
452
+ if warnings:
453
+ result.append("")
454
+ result.append("Warnings:")
455
+ for warn in warnings:
456
+ result.append(f" - {warn}")
457
+ return "\n".join(result)
458
+
459
+ s.add(valid=True, warnings=len(warnings))
460
+ result.insert(0, "Validation PASSED")
461
+ result.insert(1, "")
462
+
463
+ if warnings:
464
+ result.append("")
465
+ result.append("Warnings:")
466
+ for warn in warnings:
467
+ result.append(f" - {warn}")
468
+
469
+ result.append("")
470
+ result.append("Ready to reload: ot.reload()")
471
+ return "\n".join(result)
ot_tools/transform.py ADDED
@@ -0,0 +1,213 @@
1
+ """Transform - LLM-powered data transformation.
2
+
3
+ Takes input data and a prompt, uses an LLM to transform/process it.
4
+
5
+ Example:
6
+ llm.transform(
7
+ brave.search(query="metal prices", count=10),
8
+ prompt="Extract prices as YAML with fields: metal, price, unit, url",
9
+ )
10
+
11
+ Supports OpenAI API and OpenRouter (OpenAI-compatible).
12
+
13
+ **Requires configuration:**
14
+ - OPENAI_API_KEY in secrets.yaml
15
+ - transform.base_url in onetool.yaml (e.g., https://openrouter.ai/api/v1)
16
+ - transform.model in onetool.yaml (e.g., openai/gpt-5-mini)
17
+
18
+ Tool is not available until all three are configured.
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ # Pack for dot notation: llm.transform()
24
+ pack = "llm"
25
+
26
+ __all__ = ["transform"]
27
+
28
+ # Dependency declarations for CLI validation
29
+ __ot_requires__ = {
30
+ "lib": [("openai", "pip install openai")],
31
+ "secrets": ["OPENAI_API_KEY"],
32
+ }
33
+
34
+ from typing import Any
35
+
36
+ from openai import OpenAI
37
+ from pydantic import BaseModel, Field
38
+
39
+ from ot.config import get_secret, get_tool_config
40
+ from ot.logging import LogSpan
41
+
42
+
43
+ class Config(BaseModel):
44
+ """Pack configuration - discovered by registry."""
45
+
46
+ base_url: str = Field(
47
+ default="",
48
+ description="OpenAI-compatible API base URL (e.g., https://openrouter.ai/api/v1)",
49
+ )
50
+ model: str = Field(
51
+ default="",
52
+ description="Model to use for transformation (e.g., openai/gpt-4o-mini)",
53
+ )
54
+ timeout: int = Field(
55
+ default=30,
56
+ description="API timeout in seconds",
57
+ )
58
+ max_tokens: int | None = Field(
59
+ default=None,
60
+ description="Maximum tokens in response (None=no limit)",
61
+ )
62
+
63
+
64
+ def _get_config() -> Config:
65
+ """Get transform pack configuration."""
66
+ return get_tool_config("transform", Config)
67
+
68
+
69
+ def _get_api_config() -> tuple[str | None, str | None, str | None, Config]:
70
+ """Get API configuration from settings.
71
+
72
+ Returns:
73
+ Tuple of (api_key, base_url, default_model, config) - api_key/base_url/model
74
+ are None if not configured
75
+ """
76
+ config = _get_config()
77
+ api_key = get_secret("OPENAI_API_KEY")
78
+ base_url = config.base_url or None
79
+ default_model = config.model or None
80
+ return api_key, base_url, default_model, config
81
+
82
+
83
+ def transform(
84
+ *,
85
+ input: Any,
86
+ prompt: str,
87
+ model: str | None = None,
88
+ json_mode: bool = False,
89
+ ) -> str:
90
+ """Transform input data using an LLM.
91
+
92
+ Takes any input data (typically a string result from another tool call)
93
+ and processes it according to the prompt instructions.
94
+
95
+ Args:
96
+ input: Data to transform (will be converted to string if not already)
97
+ prompt: Instructions for how to transform/process the input
98
+ model: AI model to use (uses transform.model from config if not specified)
99
+ json_mode: If True, request JSON output format from the model
100
+
101
+ Returns:
102
+ The LLM's response as a string, or error message if not configured
103
+
104
+ Examples:
105
+ # Extract structured data from search results
106
+ llm.transform(
107
+ input=brave.search(query="gold price today", count=5),
108
+ prompt="Extract the current gold price in USD/oz as a single number",
109
+ )
110
+
111
+ # Convert to YAML format
112
+ llm.transform(
113
+ input=brave.search(query="metal prices", count=10),
114
+ prompt="Return ONLY valid YAML with fields: metal, price, unit, url",
115
+ )
116
+
117
+ # Summarize content
118
+ llm.transform(
119
+ input=some_long_text,
120
+ prompt="Summarize this in 3 bullet points"
121
+ )
122
+
123
+ # Get JSON output
124
+ llm.transform(
125
+ input=data,
126
+ prompt="Extract name and email as JSON",
127
+ json_mode=True
128
+ )
129
+ """
130
+ with LogSpan(span="llm.transform", promptLen=len(prompt)) as s:
131
+ # Validate inputs
132
+ if not prompt or not prompt.strip():
133
+ s.add(error="empty_prompt")
134
+ return "Error: prompt is required and cannot be empty"
135
+
136
+ input_str = str(input)
137
+ if not input_str.strip():
138
+ s.add(error="empty_input")
139
+ return "Error: input is required and cannot be empty"
140
+
141
+ s.add(inputLen=len(input_str))
142
+
143
+ # Get API config
144
+ api_key, base_url, default_model, config = _get_api_config()
145
+
146
+ # Check if transform tool is configured
147
+ if not api_key:
148
+ s.add(error="not_configured")
149
+ return "Error: Transform tool not available. Set OPENAI_API_KEY in secrets.yaml."
150
+
151
+ if not base_url:
152
+ s.add(error="no_base_url")
153
+ return (
154
+ "Error: Transform tool not available. Set transform.base_url in config."
155
+ )
156
+
157
+ # Create client with timeout
158
+ client = OpenAI(api_key=api_key, base_url=base_url, timeout=config.timeout)
159
+
160
+ # Build the message
161
+ user_message = f"""Input data:
162
+ {input_str}
163
+
164
+ Instructions:
165
+ {prompt}"""
166
+
167
+ used_model = model or default_model
168
+ if not used_model:
169
+ s.add(error="no_model")
170
+ return "Error: Transform tool not available. Set transform.model in config."
171
+
172
+ s.add(model=used_model, jsonMode=json_mode)
173
+
174
+ try:
175
+ # Build API call kwargs
176
+ api_kwargs: dict[str, Any] = {
177
+ "model": used_model,
178
+ "messages": [
179
+ {
180
+ "role": "system",
181
+ "content": "You are a data transformation assistant. Follow the user's instructions precisely. Output ONLY the requested format, no explanations.",
182
+ },
183
+ {"role": "user", "content": user_message},
184
+ ],
185
+ "temperature": 0.1,
186
+ }
187
+
188
+ if config.max_tokens is not None:
189
+ api_kwargs["max_tokens"] = config.max_tokens
190
+
191
+ if json_mode:
192
+ api_kwargs["response_format"] = {"type": "json_object"}
193
+
194
+ response = client.chat.completions.create(**api_kwargs)
195
+ result = response.choices[0].message.content or ""
196
+ s.add(outputLen=len(result))
197
+
198
+ # Log token usage if available
199
+ if response.usage:
200
+ s.add(
201
+ inputTokens=response.usage.prompt_tokens,
202
+ outputTokens=response.usage.completion_tokens,
203
+ totalTokens=response.usage.total_tokens,
204
+ )
205
+
206
+ return result
207
+ except Exception as e:
208
+ error_msg = str(e)
209
+ # Sanitize sensitive info from error messages
210
+ if "api_key" in error_msg.lower() or "sk-" in error_msg:
211
+ error_msg = "Authentication error - check OPENAI_API_KEY in secrets.yaml"
212
+ s.add(error=error_msg)
213
+ return f"Error: {error_msg}"