lm-deluge 0.0.67__py3-none-any.whl → 0.0.88__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- lm_deluge/__init__.py +25 -2
- lm_deluge/api_requests/anthropic.py +92 -17
- lm_deluge/api_requests/base.py +47 -11
- lm_deluge/api_requests/bedrock.py +7 -4
- lm_deluge/api_requests/chat_reasoning.py +4 -0
- lm_deluge/api_requests/gemini.py +138 -18
- lm_deluge/api_requests/openai.py +114 -21
- lm_deluge/client.py +282 -49
- lm_deluge/config.py +15 -3
- lm_deluge/mock_openai.py +643 -0
- lm_deluge/models/__init__.py +12 -1
- lm_deluge/models/anthropic.py +17 -2
- lm_deluge/models/arcee.py +16 -0
- lm_deluge/models/deepseek.py +36 -4
- lm_deluge/models/google.py +29 -0
- lm_deluge/models/grok.py +24 -0
- lm_deluge/models/kimi.py +36 -0
- lm_deluge/models/minimax.py +10 -0
- lm_deluge/models/openai.py +100 -0
- lm_deluge/models/openrouter.py +86 -8
- lm_deluge/models/together.py +11 -0
- lm_deluge/models/zai.py +1 -0
- lm_deluge/pipelines/gepa/__init__.py +95 -0
- lm_deluge/pipelines/gepa/core.py +354 -0
- lm_deluge/pipelines/gepa/docs/samples.py +696 -0
- lm_deluge/pipelines/gepa/examples/01_synthetic_keywords.py +140 -0
- lm_deluge/pipelines/gepa/examples/02_gsm8k_math.py +261 -0
- lm_deluge/pipelines/gepa/examples/03_hotpotqa_multihop.py +300 -0
- lm_deluge/pipelines/gepa/examples/04_batch_classification.py +271 -0
- lm_deluge/pipelines/gepa/examples/simple_qa.py +129 -0
- lm_deluge/pipelines/gepa/optimizer.py +435 -0
- lm_deluge/pipelines/gepa/proposer.py +235 -0
- lm_deluge/pipelines/gepa/util.py +165 -0
- lm_deluge/{llm_tools → pipelines}/score.py +2 -2
- lm_deluge/{llm_tools → pipelines}/translate.py +5 -3
- lm_deluge/prompt.py +224 -40
- lm_deluge/request_context.py +7 -2
- lm_deluge/tool/__init__.py +1118 -0
- lm_deluge/tool/builtin/anthropic/__init__.py +300 -0
- lm_deluge/tool/builtin/gemini.py +59 -0
- lm_deluge/tool/builtin/openai.py +74 -0
- lm_deluge/tool/cua/__init__.py +173 -0
- lm_deluge/tool/cua/actions.py +148 -0
- lm_deluge/tool/cua/base.py +27 -0
- lm_deluge/tool/cua/batch.py +215 -0
- lm_deluge/tool/cua/converters.py +466 -0
- lm_deluge/tool/cua/kernel.py +702 -0
- lm_deluge/tool/cua/trycua.py +989 -0
- lm_deluge/tool/prefab/__init__.py +45 -0
- lm_deluge/tool/prefab/batch_tool.py +156 -0
- lm_deluge/tool/prefab/docs.py +1119 -0
- lm_deluge/tool/prefab/email.py +294 -0
- lm_deluge/tool/prefab/filesystem.py +1711 -0
- lm_deluge/tool/prefab/full_text_search/__init__.py +285 -0
- lm_deluge/tool/prefab/full_text_search/tantivy_index.py +396 -0
- lm_deluge/tool/prefab/memory.py +458 -0
- lm_deluge/tool/prefab/otc/__init__.py +165 -0
- lm_deluge/tool/prefab/otc/executor.py +281 -0
- lm_deluge/tool/prefab/otc/parse.py +188 -0
- lm_deluge/tool/prefab/random.py +212 -0
- lm_deluge/tool/prefab/rlm/__init__.py +296 -0
- lm_deluge/tool/prefab/rlm/executor.py +349 -0
- lm_deluge/tool/prefab/rlm/parse.py +144 -0
- lm_deluge/tool/prefab/sandbox.py +1621 -0
- lm_deluge/tool/prefab/sheets.py +385 -0
- lm_deluge/tool/prefab/subagents.py +233 -0
- lm_deluge/tool/prefab/todos.py +342 -0
- lm_deluge/tool/prefab/tool_search.py +169 -0
- lm_deluge/tool/prefab/web_search.py +199 -0
- lm_deluge/tracker.py +16 -13
- lm_deluge/util/schema.py +412 -0
- lm_deluge/warnings.py +8 -0
- {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/METADATA +22 -9
- lm_deluge-0.0.88.dist-info/RECORD +117 -0
- lm_deluge/built_in_tools/anthropic/__init__.py +0 -128
- lm_deluge/built_in_tools/openai.py +0 -28
- lm_deluge/presets/cerebras.py +0 -17
- lm_deluge/presets/meta.py +0 -13
- lm_deluge/tool.py +0 -849
- lm_deluge-0.0.67.dist-info/RECORD +0 -72
- lm_deluge/{llm_tools → pipelines}/__init__.py +1 -1
- /lm_deluge/{llm_tools → pipelines}/classify.py +0 -0
- /lm_deluge/{llm_tools → pipelines}/extract.py +0 -0
- /lm_deluge/{llm_tools → pipelines}/locate.py +0 -0
- /lm_deluge/{llm_tools → pipelines}/ocr.py +0 -0
- /lm_deluge/{built_in_tools → tool/builtin}/anthropic/bash.py +0 -0
- /lm_deluge/{built_in_tools → tool/builtin}/anthropic/computer_use.py +0 -0
- /lm_deluge/{built_in_tools → tool/builtin}/anthropic/editor.py +0 -0
- /lm_deluge/{built_in_tools → tool/builtin}/base.py +0 -0
- {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/WHEEL +0 -0
- {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/licenses/LICENSE +0 -0
- {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1118 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import inspect
|
|
3
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
4
|
+
from functools import lru_cache
|
|
5
|
+
from typing import (
|
|
6
|
+
Annotated,
|
|
7
|
+
Any,
|
|
8
|
+
Callable,
|
|
9
|
+
Coroutine,
|
|
10
|
+
Literal,
|
|
11
|
+
TypedDict,
|
|
12
|
+
get_args,
|
|
13
|
+
get_origin,
|
|
14
|
+
get_type_hints,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
from fastmcp import Client # pip install fastmcp >= 2.0
|
|
18
|
+
from mcp.types import Tool as MCPTool
|
|
19
|
+
from pydantic import BaseModel, Field, TypeAdapter, field_validator
|
|
20
|
+
|
|
21
|
+
from lm_deluge.image import Image
|
|
22
|
+
from lm_deluge.prompt import Text, ToolResultPart
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@lru_cache(maxsize=1000)
|
|
26
|
+
def _get_cached_typeadapter(cls: type | Callable) -> TypeAdapter:
|
|
27
|
+
"""
|
|
28
|
+
Cache TypeAdapters since they're expensive to create.
|
|
29
|
+
For functions, we also handle Annotated[T, "string"] -> Annotated[T, Field(description="string")].
|
|
30
|
+
"""
|
|
31
|
+
if inspect.isfunction(cls) or inspect.ismethod(cls):
|
|
32
|
+
if hasattr(cls, "__annotations__") and cls.__annotations__:
|
|
33
|
+
try:
|
|
34
|
+
resolved_hints = get_type_hints(cls, include_extras=True)
|
|
35
|
+
except Exception:
|
|
36
|
+
resolved_hints = cls.__annotations__
|
|
37
|
+
|
|
38
|
+
# Convert Annotated[T, "string"] to Annotated[T, Field(description="string")]
|
|
39
|
+
processed_hints = {}
|
|
40
|
+
for name, annotation in resolved_hints.items():
|
|
41
|
+
if (
|
|
42
|
+
get_origin(annotation) is Annotated
|
|
43
|
+
and len(get_args(annotation)) == 2
|
|
44
|
+
and isinstance(get_args(annotation)[1], str)
|
|
45
|
+
):
|
|
46
|
+
base_type, description = get_args(annotation)
|
|
47
|
+
processed_hints[name] = Annotated[
|
|
48
|
+
base_type, Field(description=description)
|
|
49
|
+
]
|
|
50
|
+
else:
|
|
51
|
+
processed_hints[name] = annotation
|
|
52
|
+
|
|
53
|
+
# Create new function with processed annotations if changed
|
|
54
|
+
if processed_hints != cls.__annotations__:
|
|
55
|
+
import types
|
|
56
|
+
|
|
57
|
+
if inspect.ismethod(cls):
|
|
58
|
+
actual_func = cls.__func__
|
|
59
|
+
code = actual_func.__code__
|
|
60
|
+
globals_dict = actual_func.__globals__
|
|
61
|
+
name = actual_func.__name__
|
|
62
|
+
defaults = actual_func.__defaults__
|
|
63
|
+
kwdefaults = actual_func.__kwdefaults__
|
|
64
|
+
closure = actual_func.__closure__
|
|
65
|
+
else:
|
|
66
|
+
code = cls.__code__
|
|
67
|
+
globals_dict = cls.__globals__
|
|
68
|
+
name = cls.__name__
|
|
69
|
+
defaults = cls.__defaults__
|
|
70
|
+
kwdefaults = cls.__kwdefaults__
|
|
71
|
+
closure = cls.__closure__
|
|
72
|
+
|
|
73
|
+
new_func = types.FunctionType(
|
|
74
|
+
code,
|
|
75
|
+
globals_dict,
|
|
76
|
+
name,
|
|
77
|
+
defaults,
|
|
78
|
+
closure,
|
|
79
|
+
)
|
|
80
|
+
if kwdefaults is not None:
|
|
81
|
+
new_func.__kwdefaults__ = kwdefaults
|
|
82
|
+
new_func.__dict__.update(cls.__dict__)
|
|
83
|
+
new_func.__module__ = cls.__module__
|
|
84
|
+
new_func.__qualname__ = getattr(cls, "__qualname__", cls.__name__)
|
|
85
|
+
new_func.__annotations__ = processed_hints
|
|
86
|
+
|
|
87
|
+
if inspect.ismethod(cls):
|
|
88
|
+
new_method = types.MethodType(new_func, cls.__self__)
|
|
89
|
+
return TypeAdapter(new_method)
|
|
90
|
+
else:
|
|
91
|
+
return TypeAdapter(new_func)
|
|
92
|
+
|
|
93
|
+
return TypeAdapter(cls)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _clean_schema(
|
|
97
|
+
schema: dict[str, Any],
|
|
98
|
+
*,
|
|
99
|
+
prune_titles: bool = True,
|
|
100
|
+
prune_additional_properties: bool = True,
|
|
101
|
+
) -> dict[str, Any]:
|
|
102
|
+
"""
|
|
103
|
+
Clean up a JSON schema by removing titles and additionalProperties: false.
|
|
104
|
+
This is applied recursively to all nested schemas.
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
def _traverse(node: Any) -> Any:
|
|
108
|
+
if isinstance(node, dict):
|
|
109
|
+
new_node = {}
|
|
110
|
+
for key, value in node.items():
|
|
111
|
+
# Skip titles if pruning
|
|
112
|
+
if prune_titles and key == "title":
|
|
113
|
+
continue
|
|
114
|
+
# Skip additionalProperties: false if pruning
|
|
115
|
+
if (
|
|
116
|
+
prune_additional_properties
|
|
117
|
+
and key == "additionalProperties"
|
|
118
|
+
and value is False
|
|
119
|
+
):
|
|
120
|
+
continue
|
|
121
|
+
new_node[key] = _traverse(value)
|
|
122
|
+
return new_node
|
|
123
|
+
elif isinstance(node, list):
|
|
124
|
+
return [_traverse(item) for item in node]
|
|
125
|
+
else:
|
|
126
|
+
return node
|
|
127
|
+
|
|
128
|
+
return _traverse(schema)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def _get_type_hint_string(type_annotation: Any) -> str:
|
|
132
|
+
"""
|
|
133
|
+
Get a readable string representation of a type annotation.
|
|
134
|
+
Handles generic types, unions, etc.
|
|
135
|
+
"""
|
|
136
|
+
import re
|
|
137
|
+
|
|
138
|
+
# Handle None type
|
|
139
|
+
if type_annotation is type(None):
|
|
140
|
+
return "None"
|
|
141
|
+
|
|
142
|
+
# For generic types, get_origin and get_args give us the components
|
|
143
|
+
origin = get_origin(type_annotation)
|
|
144
|
+
args = get_args(type_annotation)
|
|
145
|
+
|
|
146
|
+
if origin is not None and args:
|
|
147
|
+
# Get the origin name (list, dict, etc.)
|
|
148
|
+
if hasattr(origin, "__name__"):
|
|
149
|
+
origin_name = origin.__name__
|
|
150
|
+
else:
|
|
151
|
+
origin_name = str(origin).replace("typing.", "")
|
|
152
|
+
|
|
153
|
+
# Recursively get arg strings
|
|
154
|
+
arg_strs = [_get_type_hint_string(arg) for arg in args]
|
|
155
|
+
|
|
156
|
+
# Handle Union types (including | syntax)
|
|
157
|
+
if origin_name in ("Union", "UnionType"):
|
|
158
|
+
return " | ".join(arg_strs)
|
|
159
|
+
|
|
160
|
+
return f"{origin_name}[{', '.join(arg_strs)}]"
|
|
161
|
+
|
|
162
|
+
# Try to get __name__ for simple types (int, str, custom classes)
|
|
163
|
+
if hasattr(type_annotation, "__name__"):
|
|
164
|
+
return type_annotation.__name__
|
|
165
|
+
|
|
166
|
+
# For anything else, use string representation and clean it up
|
|
167
|
+
type_str = str(type_annotation)
|
|
168
|
+
|
|
169
|
+
# Remove module prefixes like '__main__.', 'mymodule.', etc.
|
|
170
|
+
type_str = re.sub(r"\b\w+\.", "", type_str)
|
|
171
|
+
# Remove 'typing.' prefix (in case it's still there)
|
|
172
|
+
type_str = type_str.replace("typing.", "")
|
|
173
|
+
# Remove 'typing_extensions.' prefix
|
|
174
|
+
type_str = type_str.replace("typing_extensions.", "")
|
|
175
|
+
|
|
176
|
+
return type_str
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def _format_output_schema_for_description(
|
|
180
|
+
return_type: Any,
|
|
181
|
+
output_schema: dict[str, Any] | None,
|
|
182
|
+
) -> str | None:
|
|
183
|
+
"""
|
|
184
|
+
Format output schema information for inclusion in tool description.
|
|
185
|
+
|
|
186
|
+
Returns a string like:
|
|
187
|
+
"Returns: list[SearchResult]
|
|
188
|
+
|
|
189
|
+
SearchResult: {"properties": {...}, "type": "object"}"
|
|
190
|
+
|
|
191
|
+
Or None if there's no meaningful output schema to show.
|
|
192
|
+
"""
|
|
193
|
+
import json
|
|
194
|
+
|
|
195
|
+
if return_type is None or return_type is inspect.Parameter.empty:
|
|
196
|
+
return None
|
|
197
|
+
|
|
198
|
+
# Get the type hint string
|
|
199
|
+
type_hint = _get_type_hint_string(return_type)
|
|
200
|
+
|
|
201
|
+
# Start with the return type
|
|
202
|
+
parts = [f"Returns: {type_hint}"]
|
|
203
|
+
|
|
204
|
+
# If there are $defs, include them
|
|
205
|
+
if output_schema and "$defs" in output_schema:
|
|
206
|
+
defs = output_schema["$defs"]
|
|
207
|
+
for def_name, def_schema in defs.items():
|
|
208
|
+
# Format the schema compactly (single line)
|
|
209
|
+
schema_str = json.dumps(def_schema, separators=(",", ":"))
|
|
210
|
+
parts.append(f"{def_name}: {schema_str}")
|
|
211
|
+
|
|
212
|
+
return "\n\n".join(parts)
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def _is_typeddict(cls: Any) -> bool:
|
|
216
|
+
"""Check if a class is a TypedDict."""
|
|
217
|
+
return (
|
|
218
|
+
isinstance(cls, type)
|
|
219
|
+
and hasattr(cls, "__annotations__")
|
|
220
|
+
and hasattr(cls, "__total__")
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def _normalize_parameters(
|
|
225
|
+
params: Any,
|
|
226
|
+
) -> tuple[dict[str, Any], list[str], dict[str, Any] | None]:
|
|
227
|
+
"""
|
|
228
|
+
Normalize various parameter input formats to JSON schema components.
|
|
229
|
+
|
|
230
|
+
Accepts:
|
|
231
|
+
- None -> empty schema
|
|
232
|
+
- dict with "type" keys (already JSON schema) -> pass through
|
|
233
|
+
- dict mapping names to Python types {name: str, age: int}
|
|
234
|
+
- dict mapping names to (type, extras) tuples {name: (str, {"description": "..."})}
|
|
235
|
+
- Pydantic BaseModel class
|
|
236
|
+
- TypedDict class
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
(properties, required, definitions)
|
|
240
|
+
"""
|
|
241
|
+
|
|
242
|
+
def _schema_from_type(annotation: Any) -> dict[str, Any]:
|
|
243
|
+
"""
|
|
244
|
+
Prefer TypeAdapter-based schemas (handles Union/Optional, Annotated, etc).
|
|
245
|
+
Fall back to the legacy mapper if TypeAdapter cannot handle the type.
|
|
246
|
+
"""
|
|
247
|
+
try:
|
|
248
|
+
ta = TypeAdapter(annotation)
|
|
249
|
+
return _clean_schema(ta.json_schema())
|
|
250
|
+
except Exception:
|
|
251
|
+
return _python_type_to_json_schema(annotation)
|
|
252
|
+
|
|
253
|
+
if params is None:
|
|
254
|
+
return {}, [], None
|
|
255
|
+
|
|
256
|
+
# Pydantic model
|
|
257
|
+
if isinstance(params, type) and issubclass(params, BaseModel):
|
|
258
|
+
schema = params.model_json_schema()
|
|
259
|
+
schema = _clean_schema(schema)
|
|
260
|
+
properties = schema.get("properties", {})
|
|
261
|
+
required = schema.get("required", [])
|
|
262
|
+
definitions = schema.get("$defs")
|
|
263
|
+
return properties, required, definitions
|
|
264
|
+
|
|
265
|
+
# TypedDict
|
|
266
|
+
if _is_typeddict(params):
|
|
267
|
+
try:
|
|
268
|
+
ta = TypeAdapter(params)
|
|
269
|
+
schema = _clean_schema(ta.json_schema())
|
|
270
|
+
properties = schema.get("properties", {})
|
|
271
|
+
required = schema.get("required", [])
|
|
272
|
+
definitions = schema.get("$defs")
|
|
273
|
+
return properties, required, definitions
|
|
274
|
+
except Exception:
|
|
275
|
+
# Fall back to manual extraction
|
|
276
|
+
hints = get_type_hints(params)
|
|
277
|
+
properties = {}
|
|
278
|
+
required = []
|
|
279
|
+
for field_name, field_type in hints.items():
|
|
280
|
+
properties[field_name] = _python_type_to_json_schema(field_type)
|
|
281
|
+
required.append(field_name)
|
|
282
|
+
return properties, required, None
|
|
283
|
+
|
|
284
|
+
# Must be a dict at this point
|
|
285
|
+
if not isinstance(params, dict):
|
|
286
|
+
raise TypeError(
|
|
287
|
+
f"parameters must be a dict, Pydantic model, or TypedDict, "
|
|
288
|
+
f"got {type(params).__name__}"
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
# Check if it's already a JSON schema (has "type" keys in values)
|
|
292
|
+
# vs a simple {name: type} mapping
|
|
293
|
+
if params and all(
|
|
294
|
+
isinstance(v, dict) and "type" in v for v in params.values() if v is not None
|
|
295
|
+
):
|
|
296
|
+
# Already JSON schema format - extract required from presence of "optional" key
|
|
297
|
+
required = [
|
|
298
|
+
name for name, schema in params.items() if not schema.get("optional", False)
|
|
299
|
+
]
|
|
300
|
+
# Remove "optional" keys as they're not valid JSON schema
|
|
301
|
+
cleaned = {}
|
|
302
|
+
for name, schema in params.items():
|
|
303
|
+
cleaned[name] = {k: v for k, v in schema.items() if k != "optional"}
|
|
304
|
+
return cleaned, required, None
|
|
305
|
+
|
|
306
|
+
# Simple {name: type} or {name: (type, extras)} mapping
|
|
307
|
+
properties = {}
|
|
308
|
+
required = []
|
|
309
|
+
|
|
310
|
+
for param_name, param_spec in params.items():
|
|
311
|
+
# Tuple of (type, extras)
|
|
312
|
+
if isinstance(param_spec, tuple) and len(param_spec) == 2:
|
|
313
|
+
param_type, extras = param_spec
|
|
314
|
+
if isinstance(extras, dict):
|
|
315
|
+
schema = _schema_from_type(param_type)
|
|
316
|
+
schema.update(extras)
|
|
317
|
+
# Remove "optional" key as it's not valid JSON schema
|
|
318
|
+
is_optional = schema.pop("optional", False)
|
|
319
|
+
properties[param_name] = schema
|
|
320
|
+
if not is_optional:
|
|
321
|
+
required.append(param_name)
|
|
322
|
+
continue
|
|
323
|
+
|
|
324
|
+
# Python type (int, str, list[str], etc.)
|
|
325
|
+
if isinstance(param_spec, type) or get_origin(param_spec) is not None:
|
|
326
|
+
properties[param_name] = _schema_from_type(param_spec)
|
|
327
|
+
required.append(param_name)
|
|
328
|
+
continue
|
|
329
|
+
|
|
330
|
+
# Already a JSON schema dict
|
|
331
|
+
if isinstance(param_spec, dict):
|
|
332
|
+
schema = param_spec.copy()
|
|
333
|
+
is_optional = schema.pop("optional", False)
|
|
334
|
+
properties[param_name] = schema
|
|
335
|
+
if not is_optional:
|
|
336
|
+
required.append(param_name)
|
|
337
|
+
continue
|
|
338
|
+
|
|
339
|
+
# Unknown - try to convert
|
|
340
|
+
properties[param_name] = _schema_from_type(param_spec)
|
|
341
|
+
required.append(param_name)
|
|
342
|
+
|
|
343
|
+
return properties, required, None
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
def _python_type_to_json_schema(python_type: Any) -> dict[str, Any]:
|
|
347
|
+
"""
|
|
348
|
+
Convert Python type annotations to JSON Schema.
|
|
349
|
+
Handles: primitives, Optional, Literal, list[T], dict[str, T], Union.
|
|
350
|
+
"""
|
|
351
|
+
# Get origin and args for generic types
|
|
352
|
+
origin = get_origin(python_type)
|
|
353
|
+
args = get_args(python_type)
|
|
354
|
+
|
|
355
|
+
# Handle Optional[T] or T | None
|
|
356
|
+
if origin is type(None) or python_type is type(None):
|
|
357
|
+
return {"type": "null"}
|
|
358
|
+
|
|
359
|
+
# Handle Union types (including Optional)
|
|
360
|
+
if origin is Literal:
|
|
361
|
+
# Literal["a", "b"] -> enum
|
|
362
|
+
return {"type": "string", "enum": list(args)}
|
|
363
|
+
|
|
364
|
+
# Handle list[T]
|
|
365
|
+
if origin is list:
|
|
366
|
+
if args:
|
|
367
|
+
items_schema = _python_type_to_json_schema(args[0])
|
|
368
|
+
return {"type": "array", "items": items_schema}
|
|
369
|
+
return {"type": "array"}
|
|
370
|
+
|
|
371
|
+
# Handle dict[str, T]
|
|
372
|
+
if origin is dict:
|
|
373
|
+
if len(args) >= 2:
|
|
374
|
+
# For dict[str, T], we can set additionalProperties
|
|
375
|
+
value_schema = _python_type_to_json_schema(args[1])
|
|
376
|
+
return {"type": "object", "additionalProperties": value_schema}
|
|
377
|
+
return {"type": "object"}
|
|
378
|
+
|
|
379
|
+
# Handle basic types
|
|
380
|
+
if python_type is int:
|
|
381
|
+
return {"type": "integer"}
|
|
382
|
+
elif python_type is float:
|
|
383
|
+
return {"type": "number"}
|
|
384
|
+
elif python_type is str:
|
|
385
|
+
return {"type": "string"}
|
|
386
|
+
elif python_type is bool:
|
|
387
|
+
return {"type": "boolean"}
|
|
388
|
+
elif python_type is list:
|
|
389
|
+
return {"type": "array"}
|
|
390
|
+
elif python_type is dict:
|
|
391
|
+
return {"type": "object"}
|
|
392
|
+
else:
|
|
393
|
+
# Default to string for unknown types
|
|
394
|
+
return {"type": "string"}
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
async def _load_all_mcp_tools(client: Client) -> list["Tool"]:
|
|
398
|
+
metas: list[MCPTool] = await client.list_tools()
|
|
399
|
+
|
|
400
|
+
def make_runner(name: str):
|
|
401
|
+
async def _async_call(**kw):
|
|
402
|
+
async with client:
|
|
403
|
+
# maybe should be call_tool_mcp if don't want to raise error
|
|
404
|
+
raw_result = await client.call_tool(name, kw)
|
|
405
|
+
|
|
406
|
+
# for now just concatenate them all into a result string
|
|
407
|
+
results = []
|
|
408
|
+
if not isinstance(raw_result, list): # newer versions of fastmcp
|
|
409
|
+
content_blocks = raw_result.content
|
|
410
|
+
else:
|
|
411
|
+
content_blocks = raw_result
|
|
412
|
+
for block in content_blocks:
|
|
413
|
+
if block.type == "text":
|
|
414
|
+
results.append(Text(block.text))
|
|
415
|
+
elif block.type == "image":
|
|
416
|
+
data_url = f"data:{block.mimeType};base64,{block.data}"
|
|
417
|
+
results.append(Image(data=data_url))
|
|
418
|
+
|
|
419
|
+
return results
|
|
420
|
+
|
|
421
|
+
return _async_call
|
|
422
|
+
|
|
423
|
+
tools: list[Tool] = []
|
|
424
|
+
for m in metas:
|
|
425
|
+
# Extract definitions from the schema (could be $defs or definitions)
|
|
426
|
+
definitions = m.inputSchema.get("$defs") or m.inputSchema.get("definitions")
|
|
427
|
+
|
|
428
|
+
tools.append(
|
|
429
|
+
Tool(
|
|
430
|
+
name=m.name,
|
|
431
|
+
description=m.description,
|
|
432
|
+
parameters=m.inputSchema.get("properties", {}),
|
|
433
|
+
required=m.inputSchema.get("required", []),
|
|
434
|
+
additionalProperties=m.inputSchema.get("additionalProperties"),
|
|
435
|
+
definitions=definitions,
|
|
436
|
+
run=make_runner(m.name),
|
|
437
|
+
)
|
|
438
|
+
)
|
|
439
|
+
return tools
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
class Tool(BaseModel):
|
|
443
|
+
"""
|
|
444
|
+
Provider‑agnostic tool definition with no extra nesting.
|
|
445
|
+
|
|
446
|
+
The `parameters` argument accepts multiple formats:
|
|
447
|
+
- dict with JSON schema: {"query": {"type": "string"}}
|
|
448
|
+
- dict with Python types: {"query": str, "limit": int}
|
|
449
|
+
- dict with (type, extras) tuples: {"query": (str, {"description": "..."})}
|
|
450
|
+
- Pydantic BaseModel class
|
|
451
|
+
- TypedDict class
|
|
452
|
+
|
|
453
|
+
Examples:
|
|
454
|
+
# From JSON schema (traditional)
|
|
455
|
+
Tool(name="search", parameters={"query": {"type": "string"}}, ...)
|
|
456
|
+
|
|
457
|
+
# From Python types (simple)
|
|
458
|
+
Tool(name="search", parameters={"query": str, "limit": int}, ...)
|
|
459
|
+
|
|
460
|
+
# From Pydantic model
|
|
461
|
+
class SearchParams(BaseModel):
|
|
462
|
+
query: str
|
|
463
|
+
limit: int = 10
|
|
464
|
+
Tool(name="search", parameters=SearchParams, ...)
|
|
465
|
+
|
|
466
|
+
# From TypedDict
|
|
467
|
+
class SearchParams(TypedDict):
|
|
468
|
+
query: str
|
|
469
|
+
limit: NotRequired[int]
|
|
470
|
+
Tool(name="search", parameters=SearchParams, ...)
|
|
471
|
+
|
|
472
|
+
# From function (recommended for most cases)
|
|
473
|
+
Tool.from_function(my_search_function)
|
|
474
|
+
"""
|
|
475
|
+
|
|
476
|
+
model_config = {"arbitrary_types_allowed": True}
|
|
477
|
+
|
|
478
|
+
name: str
|
|
479
|
+
description: str | None = None
|
|
480
|
+
parameters: dict[str, Any] | None = None
|
|
481
|
+
required: list[str] = Field(default_factory=list)
|
|
482
|
+
additionalProperties: bool | None = None
|
|
483
|
+
# if desired, can provide a callable to run the tool
|
|
484
|
+
run: Callable | None = None
|
|
485
|
+
# for built-in tools that don't require schema
|
|
486
|
+
is_built_in: bool = False
|
|
487
|
+
type: str | None = None
|
|
488
|
+
built_in_args: dict[str, Any] = Field(default_factory=dict)
|
|
489
|
+
# JSON Schema definitions (for $ref support)
|
|
490
|
+
definitions: dict[str, Any] | None = None
|
|
491
|
+
# Output schema (extracted from return type annotation)
|
|
492
|
+
output_schema: dict[str, Any] | None = None
|
|
493
|
+
# TypeAdapter for output validation (not serialized, stored as private attr)
|
|
494
|
+
_output_type_adapter: TypeAdapter | None = None
|
|
495
|
+
|
|
496
|
+
def __init__(self, **data):
|
|
497
|
+
# Normalize parameters before passing to Pydantic
|
|
498
|
+
raw_params = data.get("parameters")
|
|
499
|
+
if raw_params is not None:
|
|
500
|
+
properties, required_fields, definitions = _normalize_parameters(raw_params)
|
|
501
|
+
data["parameters"] = properties
|
|
502
|
+
# Only set required if not explicitly provided (check for key presence, not truthiness)
|
|
503
|
+
if "required" not in data:
|
|
504
|
+
data["required"] = required_fields
|
|
505
|
+
# Only set definitions if not explicitly provided and we have new ones
|
|
506
|
+
if definitions and "definitions" not in data:
|
|
507
|
+
data["definitions"] = definitions
|
|
508
|
+
|
|
509
|
+
super().__init__(**data)
|
|
510
|
+
|
|
511
|
+
@field_validator("name")
|
|
512
|
+
@classmethod
|
|
513
|
+
def validate_name(cls, v: str) -> str:
|
|
514
|
+
if v.startswith("_computer_"):
|
|
515
|
+
raise ValueError(
|
|
516
|
+
f"Tool name '{v}' uses reserved prefix '_computer_'. "
|
|
517
|
+
"This prefix is reserved for computer use actions."
|
|
518
|
+
)
|
|
519
|
+
return v
|
|
520
|
+
|
|
521
|
+
def _is_async(self) -> bool:
|
|
522
|
+
return inspect.iscoroutinefunction(self.run)
|
|
523
|
+
|
|
524
|
+
def _validate_output(self, result: Any) -> Any:
|
|
525
|
+
"""Validate output against output_schema if TypeAdapter is available."""
|
|
526
|
+
if self._output_type_adapter is None:
|
|
527
|
+
raise ValueError(
|
|
528
|
+
"Cannot validate output: no output type adapter available. "
|
|
529
|
+
"Make sure the tool was created with from_function() and has a return type annotation."
|
|
530
|
+
)
|
|
531
|
+
# This will raise ValidationError if result doesn't match the schema
|
|
532
|
+
return self._output_type_adapter.validate_python(result)
|
|
533
|
+
|
|
534
|
+
def call(
|
|
535
|
+
self, *, validate_output: bool = False, **kwargs
|
|
536
|
+
) -> str | list[ToolResultPart]:
|
|
537
|
+
"""
|
|
538
|
+
Call the tool with the given arguments.
|
|
539
|
+
|
|
540
|
+
Args:
|
|
541
|
+
validate_output: If True, validate the return value against the
|
|
542
|
+
output schema. Raises ValidationError if validation fails.
|
|
543
|
+
Requires the tool to have been created with from_function()
|
|
544
|
+
and have a return type annotation.
|
|
545
|
+
**kwargs: Arguments to pass to the tool function.
|
|
546
|
+
|
|
547
|
+
Returns:
|
|
548
|
+
The result of the tool function.
|
|
549
|
+
|
|
550
|
+
Raises:
|
|
551
|
+
ValueError: If no run function is provided or validation is requested
|
|
552
|
+
but no output type adapter is available.
|
|
553
|
+
pydantic.ValidationError: If validate_output=True and the result
|
|
554
|
+
doesn't match the output schema.
|
|
555
|
+
"""
|
|
556
|
+
if self.run is None:
|
|
557
|
+
raise ValueError("No run function provided")
|
|
558
|
+
|
|
559
|
+
if self._is_async():
|
|
560
|
+
coro: Coroutine = self.run(**kwargs) # type: ignore[arg-type]
|
|
561
|
+
try:
|
|
562
|
+
loop = asyncio.get_running_loop()
|
|
563
|
+
assert loop
|
|
564
|
+
except RuntimeError:
|
|
565
|
+
# no loop → safe to block
|
|
566
|
+
result = asyncio.run(coro)
|
|
567
|
+
else:
|
|
568
|
+
# Loop is running → execute coroutine in a worker thread
|
|
569
|
+
def _runner():
|
|
570
|
+
return asyncio.run(coro)
|
|
571
|
+
|
|
572
|
+
with ThreadPoolExecutor(max_workers=1) as executor:
|
|
573
|
+
result = executor.submit(_runner).result()
|
|
574
|
+
else:
|
|
575
|
+
# plain function
|
|
576
|
+
result = self.run(**kwargs)
|
|
577
|
+
|
|
578
|
+
if validate_output:
|
|
579
|
+
self._validate_output(result)
|
|
580
|
+
|
|
581
|
+
return result
|
|
582
|
+
|
|
583
|
+
async def acall(
|
|
584
|
+
self, *, validate_output: bool = False, **kwargs
|
|
585
|
+
) -> str | list[ToolResultPart]:
|
|
586
|
+
"""
|
|
587
|
+
Async version of call().
|
|
588
|
+
|
|
589
|
+
Args:
|
|
590
|
+
validate_output: If True, validate the return value against the
|
|
591
|
+
output schema. Raises ValidationError if validation fails.
|
|
592
|
+
**kwargs: Arguments to pass to the tool function.
|
|
593
|
+
|
|
594
|
+
Returns:
|
|
595
|
+
The result of the tool function.
|
|
596
|
+
"""
|
|
597
|
+
if self.run is None:
|
|
598
|
+
raise ValueError("No run function provided")
|
|
599
|
+
|
|
600
|
+
if self._is_async():
|
|
601
|
+
result = await self.run(**kwargs) # type: ignore[func-returns-value]
|
|
602
|
+
else:
|
|
603
|
+
loop = asyncio.get_running_loop()
|
|
604
|
+
assert self.run is not None, "can't run None"
|
|
605
|
+
result = await loop.run_in_executor(None, lambda: self.run(**kwargs)) # type: ignore
|
|
606
|
+
|
|
607
|
+
if validate_output:
|
|
608
|
+
self._validate_output(result)
|
|
609
|
+
|
|
610
|
+
return result
|
|
611
|
+
|
|
612
|
+
@classmethod
|
|
613
|
+
def from_function(
|
|
614
|
+
cls,
|
|
615
|
+
func: Callable,
|
|
616
|
+
*,
|
|
617
|
+
name: str | None = None,
|
|
618
|
+
include_output_schema_in_description: bool = False,
|
|
619
|
+
) -> "Tool":
|
|
620
|
+
"""
|
|
621
|
+
Create a Tool from a function using introspection.
|
|
622
|
+
|
|
623
|
+
Uses Pydantic's TypeAdapter for robust schema generation, supporting:
|
|
624
|
+
- All Python types (primitives, generics, unions, Literal, etc.)
|
|
625
|
+
- Pydantic models and TypedDict as parameter types
|
|
626
|
+
- Annotated[T, Field(description="...")] for parameter descriptions
|
|
627
|
+
- Annotated[T, "description"] shorthand for descriptions
|
|
628
|
+
- Complex nested types with proper $defs/$ref handling
|
|
629
|
+
- Output schema extraction from return type annotation
|
|
630
|
+
|
|
631
|
+
Args:
|
|
632
|
+
func: The function to create a tool from.
|
|
633
|
+
name: Optional name override for the tool. If not provided,
|
|
634
|
+
uses the function's __name__.
|
|
635
|
+
include_output_schema_in_description: If True, append the return type
|
|
636
|
+
and any complex type definitions to the tool description. This can
|
|
637
|
+
help the model understand what the tool returns. Default is False.
|
|
638
|
+
|
|
639
|
+
Example:
|
|
640
|
+
def search(
|
|
641
|
+
query: Annotated[str, Field(description="Search query")],
|
|
642
|
+
limit: int = 10,
|
|
643
|
+
filters: dict[str, str] | None = None,
|
|
644
|
+
) -> list[dict]:
|
|
645
|
+
'''Search the database.'''
|
|
646
|
+
...
|
|
647
|
+
|
|
648
|
+
tool = Tool.from_function(search)
|
|
649
|
+
# tool.output_schema contains schema for list[dict]
|
|
650
|
+
# tool.call(query="test", validate_output=True) validates return value
|
|
651
|
+
|
|
652
|
+
# With custom name:
|
|
653
|
+
tool = Tool.from_function(search, name="search_database")
|
|
654
|
+
# tool.name is "search_database"
|
|
655
|
+
|
|
656
|
+
# With output schema in description:
|
|
657
|
+
tool = Tool.from_function(search, include_output_schema_in_description=True)
|
|
658
|
+
# Description becomes:
|
|
659
|
+
# "Search the database.
|
|
660
|
+
#
|
|
661
|
+
# Returns: list[dict]"
|
|
662
|
+
"""
|
|
663
|
+
# Get function name (use override if provided)
|
|
664
|
+
tool_name = name if name is not None else func.__name__
|
|
665
|
+
|
|
666
|
+
# Get docstring for description
|
|
667
|
+
description = func.__doc__ or f"Call the {tool_name} function"
|
|
668
|
+
description = description.strip()
|
|
669
|
+
|
|
670
|
+
# Use TypeAdapter for robust schema generation
|
|
671
|
+
type_adapter = _get_cached_typeadapter(func)
|
|
672
|
+
schema = type_adapter.json_schema()
|
|
673
|
+
|
|
674
|
+
# Clean up the schema (remove titles, additionalProperties: false)
|
|
675
|
+
schema = _clean_schema(schema)
|
|
676
|
+
|
|
677
|
+
# Extract parameters and required from the schema
|
|
678
|
+
parameters = schema.get("properties", {})
|
|
679
|
+
required = schema.get("required", [])
|
|
680
|
+
definitions = schema.get("$defs")
|
|
681
|
+
|
|
682
|
+
# Extract output schema from return type annotation
|
|
683
|
+
output_schema = None
|
|
684
|
+
output_type_adapter = None
|
|
685
|
+
sig = inspect.signature(func)
|
|
686
|
+
return_type = sig.return_annotation
|
|
687
|
+
|
|
688
|
+
if return_type is not inspect.Parameter.empty:
|
|
689
|
+
try:
|
|
690
|
+
# Resolve string annotations if needed
|
|
691
|
+
if isinstance(return_type, str):
|
|
692
|
+
hints = get_type_hints(func)
|
|
693
|
+
return_type = hints.get("return", return_type)
|
|
694
|
+
|
|
695
|
+
# Create TypeAdapter for output validation
|
|
696
|
+
output_type_adapter = TypeAdapter(return_type)
|
|
697
|
+
output_schema = _clean_schema(output_type_adapter.json_schema())
|
|
698
|
+
except Exception:
|
|
699
|
+
# If we can't create a schema for the return type, that's fine
|
|
700
|
+
# (e.g., for non-serializable types like custom classes)
|
|
701
|
+
pass
|
|
702
|
+
|
|
703
|
+
# Optionally append output schema info to description
|
|
704
|
+
if (
|
|
705
|
+
include_output_schema_in_description
|
|
706
|
+
and return_type is not inspect.Parameter.empty
|
|
707
|
+
):
|
|
708
|
+
output_info = _format_output_schema_for_description(
|
|
709
|
+
return_type, output_schema
|
|
710
|
+
)
|
|
711
|
+
if output_info:
|
|
712
|
+
description = f"{description}\n\n{output_info}"
|
|
713
|
+
|
|
714
|
+
tool = cls(
|
|
715
|
+
name=tool_name,
|
|
716
|
+
description=description,
|
|
717
|
+
parameters=parameters,
|
|
718
|
+
required=required,
|
|
719
|
+
definitions=definitions,
|
|
720
|
+
output_schema=output_schema,
|
|
721
|
+
run=func,
|
|
722
|
+
)
|
|
723
|
+
# Store the TypeAdapter for runtime validation (not serialized)
|
|
724
|
+
tool._output_type_adapter = output_type_adapter
|
|
725
|
+
return tool
|
|
726
|
+
|
|
727
|
+
@classmethod
|
|
728
|
+
async def from_mcp_config(
|
|
729
|
+
cls,
|
|
730
|
+
config: dict[str, Any],
|
|
731
|
+
*,
|
|
732
|
+
timeout: float | None = None,
|
|
733
|
+
) -> list["Tool"]:
|
|
734
|
+
"""
|
|
735
|
+
config: full Claude-Desktop-style dict *or* just its "mcpServers" block
|
|
736
|
+
Returns {server_key: [Tool, …], …}
|
|
737
|
+
"""
|
|
738
|
+
# allow caller to pass either the whole desktop file or just the sub-dict
|
|
739
|
+
servers_block = config.get("mcpServers", config)
|
|
740
|
+
|
|
741
|
+
# FastMCP understands the whole config dict directly
|
|
742
|
+
client = Client({"mcpServers": servers_block}, timeout=timeout)
|
|
743
|
+
async with client:
|
|
744
|
+
all_tools = await _load_all_mcp_tools(client)
|
|
745
|
+
|
|
746
|
+
# bucket by prefix that FastMCP added (serverkey_toolname)
|
|
747
|
+
return all_tools
|
|
748
|
+
|
|
749
|
+
@classmethod
|
|
750
|
+
async def from_mcp(
|
|
751
|
+
cls,
|
|
752
|
+
server_name: str,
|
|
753
|
+
*,
|
|
754
|
+
tool_name: str | None = None,
|
|
755
|
+
timeout: float | None = None,
|
|
756
|
+
**server_spec, # url="…" OR command="…" args=[…]
|
|
757
|
+
) -> Any: # Tool | list[Tool]
|
|
758
|
+
"""
|
|
759
|
+
Thin wrapper for one server. Example uses:
|
|
760
|
+
|
|
761
|
+
Tool.from_mcp(url="https://weather.example.com/mcp")
|
|
762
|
+
Tool.from_mcp(command="python", args=["./assistant.py"], tool_name="answer_question")
|
|
763
|
+
"""
|
|
764
|
+
# ensure at least one of command or url is defined
|
|
765
|
+
if not (server_spec.get("url") or server_spec.get("command")):
|
|
766
|
+
raise ValueError("most provide url or command")
|
|
767
|
+
# build a one-server desktop-style dict
|
|
768
|
+
cfg = {server_name: server_spec}
|
|
769
|
+
tools = await cls.from_mcp_config(cfg, timeout=timeout)
|
|
770
|
+
if tool_name is None:
|
|
771
|
+
return tools
|
|
772
|
+
for t in tools:
|
|
773
|
+
if t.name.endswith(f"{tool_name}"): # prefixed by FastMCP
|
|
774
|
+
return t
|
|
775
|
+
raise ValueError(f"Tool '{tool_name}' not found on that server")
|
|
776
|
+
|
|
777
|
+
@staticmethod
|
|
778
|
+
def _tool_from_meta(meta: dict[str, Any], runner) -> "Tool":
|
|
779
|
+
props = meta["inputSchema"].get("properties", {})
|
|
780
|
+
req = meta["inputSchema"].get("required", [])
|
|
781
|
+
addl = meta["inputSchema"].get("additionalProperties")
|
|
782
|
+
return Tool(
|
|
783
|
+
name=meta["name"],
|
|
784
|
+
description=meta.get("description", ""),
|
|
785
|
+
parameters=props,
|
|
786
|
+
required=req,
|
|
787
|
+
additionalProperties=addl,
|
|
788
|
+
run=runner,
|
|
789
|
+
)
|
|
790
|
+
|
|
791
|
+
def _is_strict_mode_compatible(self) -> bool:
|
|
792
|
+
"""
|
|
793
|
+
Check if this tool's schema is compatible with OpenAI strict mode.
|
|
794
|
+
Strict mode requires all objects to have defined properties.
|
|
795
|
+
"""
|
|
796
|
+
|
|
797
|
+
def has_undefined_objects(schema: dict | list | Any) -> bool:
|
|
798
|
+
"""Recursively check for objects without defined properties."""
|
|
799
|
+
if isinstance(schema, dict):
|
|
800
|
+
# Check if this is an object type without properties
|
|
801
|
+
if schema.get("type") == "object":
|
|
802
|
+
# If additionalProperties is True or properties is missing/empty
|
|
803
|
+
if schema.get("additionalProperties") is True:
|
|
804
|
+
return True
|
|
805
|
+
if "properties" not in schema or not schema["properties"]:
|
|
806
|
+
return True
|
|
807
|
+
# Recursively check nested schemas
|
|
808
|
+
for value in schema.values():
|
|
809
|
+
if has_undefined_objects(value):
|
|
810
|
+
return True
|
|
811
|
+
elif isinstance(schema, list):
|
|
812
|
+
for item in schema:
|
|
813
|
+
if has_undefined_objects(item):
|
|
814
|
+
return True
|
|
815
|
+
return False
|
|
816
|
+
|
|
817
|
+
return not has_undefined_objects(self.parameters or {})
|
|
818
|
+
|
|
819
|
+
def _json_schema(
|
|
820
|
+
self, include_additional_properties=False, remove_defaults=False
|
|
821
|
+
) -> dict[str, Any]:
|
|
822
|
+
def _add_additional_properties_recursive(
|
|
823
|
+
schema: dict | list | Any, remove_defaults: bool = False
|
|
824
|
+
) -> dict | list | Any:
|
|
825
|
+
"""Recursively add additionalProperties: false to all object-type schemas.
|
|
826
|
+
In strict mode (when remove_defaults=True), also makes all properties required."""
|
|
827
|
+
if isinstance(schema, dict):
|
|
828
|
+
# Copy the dictionary to avoid modifying the original
|
|
829
|
+
new_schema = schema.copy()
|
|
830
|
+
|
|
831
|
+
# make sure to label arrays and objects
|
|
832
|
+
if "type" not in new_schema:
|
|
833
|
+
if "properties" in new_schema:
|
|
834
|
+
new_schema["type"] = "object"
|
|
835
|
+
elif "items" in new_schema:
|
|
836
|
+
new_schema["type"] = "array"
|
|
837
|
+
|
|
838
|
+
# If this is an object type schema, set additionalProperties: false
|
|
839
|
+
if new_schema.get("type") == "object":
|
|
840
|
+
new_schema["additionalProperties"] = False
|
|
841
|
+
|
|
842
|
+
# In strict mode, all properties must be required
|
|
843
|
+
if remove_defaults and "properties" in new_schema:
|
|
844
|
+
new_schema["required"] = list(new_schema["properties"].keys())
|
|
845
|
+
|
|
846
|
+
# Remove default values if requested (for strict mode)
|
|
847
|
+
if remove_defaults and "default" in new_schema:
|
|
848
|
+
del new_schema["default"]
|
|
849
|
+
|
|
850
|
+
# Recursively process all values in the dictionary
|
|
851
|
+
for key, value in new_schema.items():
|
|
852
|
+
new_schema[key] = _add_additional_properties_recursive(
|
|
853
|
+
value, remove_defaults
|
|
854
|
+
)
|
|
855
|
+
|
|
856
|
+
return new_schema
|
|
857
|
+
elif isinstance(schema, list):
|
|
858
|
+
# Recursively process all items in the list
|
|
859
|
+
return [
|
|
860
|
+
_add_additional_properties_recursive(item, remove_defaults)
|
|
861
|
+
for item in schema
|
|
862
|
+
]
|
|
863
|
+
else:
|
|
864
|
+
# Return primitive values as-is
|
|
865
|
+
return schema
|
|
866
|
+
|
|
867
|
+
# Start with the base schema structure
|
|
868
|
+
if include_additional_properties and self.parameters:
|
|
869
|
+
# Apply recursive additionalProperties processing to parameters
|
|
870
|
+
processed_parameters = _add_additional_properties_recursive(
|
|
871
|
+
self.parameters, remove_defaults
|
|
872
|
+
)
|
|
873
|
+
else:
|
|
874
|
+
processed_parameters = self.parameters
|
|
875
|
+
|
|
876
|
+
# Process definitions too
|
|
877
|
+
if self.definitions and include_additional_properties:
|
|
878
|
+
processed_definitions = _add_additional_properties_recursive(
|
|
879
|
+
self.definitions, remove_defaults
|
|
880
|
+
)
|
|
881
|
+
else:
|
|
882
|
+
processed_definitions = self.definitions
|
|
883
|
+
|
|
884
|
+
res = {
|
|
885
|
+
"type": "object",
|
|
886
|
+
"properties": processed_parameters,
|
|
887
|
+
"required": self.required, # Use the tool's actual required list
|
|
888
|
+
}
|
|
889
|
+
|
|
890
|
+
if include_additional_properties:
|
|
891
|
+
res["additionalProperties"] = False
|
|
892
|
+
|
|
893
|
+
# Include definitions if present (for $ref support)
|
|
894
|
+
if processed_definitions:
|
|
895
|
+
res["$defs"] = processed_definitions
|
|
896
|
+
|
|
897
|
+
return res
|
|
898
|
+
|
|
899
|
+
# ---------- dumpers ----------
|
|
900
|
+
def for_openai_completions(
|
|
901
|
+
self, *, strict: bool = True, **kwargs
|
|
902
|
+
) -> dict[str, Any]:
|
|
903
|
+
if self.is_built_in:
|
|
904
|
+
return {"type": self.type, **self.built_in_args, **kwargs}
|
|
905
|
+
|
|
906
|
+
# Check if schema is compatible with strict mode
|
|
907
|
+
if strict and not self._is_strict_mode_compatible():
|
|
908
|
+
strict = False
|
|
909
|
+
|
|
910
|
+
if strict:
|
|
911
|
+
# For strict mode, remove defaults and make all parameters required
|
|
912
|
+
schema = self._json_schema(
|
|
913
|
+
include_additional_properties=True, remove_defaults=True
|
|
914
|
+
)
|
|
915
|
+
schema["required"] = list(
|
|
916
|
+
(self.parameters or {}).keys()
|
|
917
|
+
) # All parameters required in strict mode
|
|
918
|
+
else:
|
|
919
|
+
# For non-strict mode, use the original required list
|
|
920
|
+
schema = self._json_schema(include_additional_properties=True)
|
|
921
|
+
|
|
922
|
+
return {
|
|
923
|
+
"type": "function",
|
|
924
|
+
"function": {
|
|
925
|
+
"name": self.name,
|
|
926
|
+
"description": self.description,
|
|
927
|
+
"parameters": schema,
|
|
928
|
+
"strict": strict,
|
|
929
|
+
},
|
|
930
|
+
}
|
|
931
|
+
|
|
932
|
+
def for_openai(self, strict: bool = True, **kwargs):
|
|
933
|
+
"""just an alias for the above"""
|
|
934
|
+
return self.for_openai_completions(strict=strict, **kwargs)
|
|
935
|
+
|
|
936
|
+
def for_openai_responses(self, *, strict: bool = True, **kwargs) -> dict[str, Any]:
|
|
937
|
+
if self.is_built_in:
|
|
938
|
+
return {"type": self.type, **self.built_in_args, **kwargs}
|
|
939
|
+
|
|
940
|
+
# Check if schema is compatible with strict mode
|
|
941
|
+
if strict and not self._is_strict_mode_compatible():
|
|
942
|
+
strict = False
|
|
943
|
+
|
|
944
|
+
if strict:
|
|
945
|
+
# For strict mode, remove defaults and make all parameters required
|
|
946
|
+
schema = self._json_schema(
|
|
947
|
+
include_additional_properties=True, remove_defaults=True
|
|
948
|
+
)
|
|
949
|
+
schema["required"] = list(
|
|
950
|
+
(self.parameters or {}).keys()
|
|
951
|
+
) # All parameters required in strict mode
|
|
952
|
+
|
|
953
|
+
return {
|
|
954
|
+
"type": "function",
|
|
955
|
+
"name": self.name,
|
|
956
|
+
"description": self.description,
|
|
957
|
+
"parameters": schema,
|
|
958
|
+
"strict": True,
|
|
959
|
+
}
|
|
960
|
+
else:
|
|
961
|
+
# For non-strict mode, use the original required list
|
|
962
|
+
return {
|
|
963
|
+
"type": "function",
|
|
964
|
+
"name": self.name,
|
|
965
|
+
"description": self.description,
|
|
966
|
+
"parameters": self._json_schema(include_additional_properties=True),
|
|
967
|
+
}
|
|
968
|
+
|
|
969
|
+
def for_anthropic(self, *, strict: bool = True, **kwargs) -> dict[str, Any]:
|
|
970
|
+
# built-in tools have "name", "type", maybe metadata
|
|
971
|
+
if self.is_built_in:
|
|
972
|
+
return {
|
|
973
|
+
"name": self.name,
|
|
974
|
+
"type": self.type,
|
|
975
|
+
**self.built_in_args,
|
|
976
|
+
**kwargs,
|
|
977
|
+
}
|
|
978
|
+
|
|
979
|
+
# Check if schema is compatible with strict mode
|
|
980
|
+
if strict and not self._is_strict_mode_compatible():
|
|
981
|
+
strict = False
|
|
982
|
+
|
|
983
|
+
if strict:
|
|
984
|
+
# For strict mode, remove defaults and make all parameters required
|
|
985
|
+
schema = self._json_schema(
|
|
986
|
+
include_additional_properties=True, remove_defaults=True
|
|
987
|
+
)
|
|
988
|
+
schema["required"] = list(
|
|
989
|
+
(self.parameters or {}).keys()
|
|
990
|
+
) # All parameters required in strict mode
|
|
991
|
+
|
|
992
|
+
return {
|
|
993
|
+
"name": self.name,
|
|
994
|
+
"description": self.description,
|
|
995
|
+
"input_schema": schema,
|
|
996
|
+
"strict": True,
|
|
997
|
+
}
|
|
998
|
+
else:
|
|
999
|
+
# For non-strict mode, use the original required list
|
|
1000
|
+
return {
|
|
1001
|
+
"name": self.name,
|
|
1002
|
+
"description": self.description,
|
|
1003
|
+
"input_schema": self._json_schema(),
|
|
1004
|
+
}
|
|
1005
|
+
|
|
1006
|
+
def for_google(self) -> dict[str, Any]:
|
|
1007
|
+
"""
|
|
1008
|
+
Shape used by google.genai docs.
|
|
1009
|
+
"""
|
|
1010
|
+
return {
|
|
1011
|
+
"name": self.name,
|
|
1012
|
+
"description": self.description,
|
|
1013
|
+
"parameters": self._json_schema(),
|
|
1014
|
+
}
|
|
1015
|
+
|
|
1016
|
+
def for_mistral(self) -> dict[str, Any]:
|
|
1017
|
+
return self.for_openai_completions()
|
|
1018
|
+
|
|
1019
|
+
def dump_for(
|
|
1020
|
+
self,
|
|
1021
|
+
provider: Literal[
|
|
1022
|
+
"openai-responses", "openai-completions", "anthropic", "google"
|
|
1023
|
+
],
|
|
1024
|
+
**kw,
|
|
1025
|
+
) -> dict[str, Any]:
|
|
1026
|
+
if provider == "openai-responses":
|
|
1027
|
+
return self.for_openai_responses(**kw)
|
|
1028
|
+
if provider == "openai-completions":
|
|
1029
|
+
return self.for_openai_completions(**kw)
|
|
1030
|
+
if provider == "anthropic":
|
|
1031
|
+
return self.for_anthropic(**kw)
|
|
1032
|
+
if provider == "google":
|
|
1033
|
+
return self.for_google()
|
|
1034
|
+
raise ValueError(provider)
|
|
1035
|
+
|
|
1036
|
+
@classmethod
|
|
1037
|
+
def built_in(cls, name: str, **kwargs):
|
|
1038
|
+
if "type" in kwargs:
|
|
1039
|
+
type = kwargs.pop("type")
|
|
1040
|
+
else:
|
|
1041
|
+
type = name
|
|
1042
|
+
return cls(name=name, type=type, is_built_in=True, built_in_args=kwargs)
|
|
1043
|
+
|
|
1044
|
+
|
|
1045
|
+
class OpenAIMCPSpec(TypedDict):
|
|
1046
|
+
type: str
|
|
1047
|
+
server_label: str
|
|
1048
|
+
server_url: str
|
|
1049
|
+
headers: dict | None
|
|
1050
|
+
require_approval: str
|
|
1051
|
+
|
|
1052
|
+
|
|
1053
|
+
class MCPServer(BaseModel):
|
|
1054
|
+
"""
|
|
1055
|
+
Allow MCPServers to be passed directly, if provider supports it.
|
|
1056
|
+
Provider can directly call MCP instead of handling it client-side.
|
|
1057
|
+
Should work with Anthropic MCP connector and OpenAI responses API.
|
|
1058
|
+
"""
|
|
1059
|
+
|
|
1060
|
+
name: str
|
|
1061
|
+
url: str
|
|
1062
|
+
# anthropic-specific
|
|
1063
|
+
token: str | None = None
|
|
1064
|
+
configuration: dict | None = None
|
|
1065
|
+
# openai-specific
|
|
1066
|
+
headers: dict | None = None
|
|
1067
|
+
|
|
1068
|
+
# tools cache
|
|
1069
|
+
_tools: list[Tool] | None = None
|
|
1070
|
+
|
|
1071
|
+
@classmethod
|
|
1072
|
+
def from_openai(cls, spec: OpenAIMCPSpec):
|
|
1073
|
+
return cls(
|
|
1074
|
+
name=spec["server_label"],
|
|
1075
|
+
url=spec["server_url"],
|
|
1076
|
+
headers=spec.get("headers"),
|
|
1077
|
+
)
|
|
1078
|
+
|
|
1079
|
+
def for_openai_responses(self):
|
|
1080
|
+
res: dict[str, Any] = {
|
|
1081
|
+
"type": "mcp",
|
|
1082
|
+
"server_label": self.name,
|
|
1083
|
+
"server_url": self.url,
|
|
1084
|
+
"require_approval": "never",
|
|
1085
|
+
}
|
|
1086
|
+
if self.headers:
|
|
1087
|
+
res["headers"] = self.headers
|
|
1088
|
+
|
|
1089
|
+
return res
|
|
1090
|
+
|
|
1091
|
+
def for_anthropic(self):
|
|
1092
|
+
res: dict[str, Any] = {
|
|
1093
|
+
"type": "url",
|
|
1094
|
+
"url": self.url,
|
|
1095
|
+
"name": self.name,
|
|
1096
|
+
}
|
|
1097
|
+
if self.token:
|
|
1098
|
+
res["authorization_token"] = self.token
|
|
1099
|
+
if self.configuration:
|
|
1100
|
+
res["tool_configuration"] = self.configuration
|
|
1101
|
+
|
|
1102
|
+
return res
|
|
1103
|
+
|
|
1104
|
+
async def to_tools(self) -> list[Tool]:
|
|
1105
|
+
"""
|
|
1106
|
+
Compatible with ALL providers.
|
|
1107
|
+
Caches so we don't have to hit the server a ton of times.
|
|
1108
|
+
"""
|
|
1109
|
+
if self._tools:
|
|
1110
|
+
return self._tools
|
|
1111
|
+
else:
|
|
1112
|
+
tools: list[Tool] = await Tool.from_mcp(self.name, url=self.url)
|
|
1113
|
+
self._tools = tools
|
|
1114
|
+
return tools
|
|
1115
|
+
|
|
1116
|
+
|
|
1117
|
+
# Note: prefab submodule is available via lm_deluge.tool.prefab
|
|
1118
|
+
# but not auto-imported here to avoid circular imports
|