lm-deluge 0.0.67__py3-none-any.whl → 0.0.88__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (92) hide show
  1. lm_deluge/__init__.py +25 -2
  2. lm_deluge/api_requests/anthropic.py +92 -17
  3. lm_deluge/api_requests/base.py +47 -11
  4. lm_deluge/api_requests/bedrock.py +7 -4
  5. lm_deluge/api_requests/chat_reasoning.py +4 -0
  6. lm_deluge/api_requests/gemini.py +138 -18
  7. lm_deluge/api_requests/openai.py +114 -21
  8. lm_deluge/client.py +282 -49
  9. lm_deluge/config.py +15 -3
  10. lm_deluge/mock_openai.py +643 -0
  11. lm_deluge/models/__init__.py +12 -1
  12. lm_deluge/models/anthropic.py +17 -2
  13. lm_deluge/models/arcee.py +16 -0
  14. lm_deluge/models/deepseek.py +36 -4
  15. lm_deluge/models/google.py +29 -0
  16. lm_deluge/models/grok.py +24 -0
  17. lm_deluge/models/kimi.py +36 -0
  18. lm_deluge/models/minimax.py +10 -0
  19. lm_deluge/models/openai.py +100 -0
  20. lm_deluge/models/openrouter.py +86 -8
  21. lm_deluge/models/together.py +11 -0
  22. lm_deluge/models/zai.py +1 -0
  23. lm_deluge/pipelines/gepa/__init__.py +95 -0
  24. lm_deluge/pipelines/gepa/core.py +354 -0
  25. lm_deluge/pipelines/gepa/docs/samples.py +696 -0
  26. lm_deluge/pipelines/gepa/examples/01_synthetic_keywords.py +140 -0
  27. lm_deluge/pipelines/gepa/examples/02_gsm8k_math.py +261 -0
  28. lm_deluge/pipelines/gepa/examples/03_hotpotqa_multihop.py +300 -0
  29. lm_deluge/pipelines/gepa/examples/04_batch_classification.py +271 -0
  30. lm_deluge/pipelines/gepa/examples/simple_qa.py +129 -0
  31. lm_deluge/pipelines/gepa/optimizer.py +435 -0
  32. lm_deluge/pipelines/gepa/proposer.py +235 -0
  33. lm_deluge/pipelines/gepa/util.py +165 -0
  34. lm_deluge/{llm_tools → pipelines}/score.py +2 -2
  35. lm_deluge/{llm_tools → pipelines}/translate.py +5 -3
  36. lm_deluge/prompt.py +224 -40
  37. lm_deluge/request_context.py +7 -2
  38. lm_deluge/tool/__init__.py +1118 -0
  39. lm_deluge/tool/builtin/anthropic/__init__.py +300 -0
  40. lm_deluge/tool/builtin/gemini.py +59 -0
  41. lm_deluge/tool/builtin/openai.py +74 -0
  42. lm_deluge/tool/cua/__init__.py +173 -0
  43. lm_deluge/tool/cua/actions.py +148 -0
  44. lm_deluge/tool/cua/base.py +27 -0
  45. lm_deluge/tool/cua/batch.py +215 -0
  46. lm_deluge/tool/cua/converters.py +466 -0
  47. lm_deluge/tool/cua/kernel.py +702 -0
  48. lm_deluge/tool/cua/trycua.py +989 -0
  49. lm_deluge/tool/prefab/__init__.py +45 -0
  50. lm_deluge/tool/prefab/batch_tool.py +156 -0
  51. lm_deluge/tool/prefab/docs.py +1119 -0
  52. lm_deluge/tool/prefab/email.py +294 -0
  53. lm_deluge/tool/prefab/filesystem.py +1711 -0
  54. lm_deluge/tool/prefab/full_text_search/__init__.py +285 -0
  55. lm_deluge/tool/prefab/full_text_search/tantivy_index.py +396 -0
  56. lm_deluge/tool/prefab/memory.py +458 -0
  57. lm_deluge/tool/prefab/otc/__init__.py +165 -0
  58. lm_deluge/tool/prefab/otc/executor.py +281 -0
  59. lm_deluge/tool/prefab/otc/parse.py +188 -0
  60. lm_deluge/tool/prefab/random.py +212 -0
  61. lm_deluge/tool/prefab/rlm/__init__.py +296 -0
  62. lm_deluge/tool/prefab/rlm/executor.py +349 -0
  63. lm_deluge/tool/prefab/rlm/parse.py +144 -0
  64. lm_deluge/tool/prefab/sandbox.py +1621 -0
  65. lm_deluge/tool/prefab/sheets.py +385 -0
  66. lm_deluge/tool/prefab/subagents.py +233 -0
  67. lm_deluge/tool/prefab/todos.py +342 -0
  68. lm_deluge/tool/prefab/tool_search.py +169 -0
  69. lm_deluge/tool/prefab/web_search.py +199 -0
  70. lm_deluge/tracker.py +16 -13
  71. lm_deluge/util/schema.py +412 -0
  72. lm_deluge/warnings.py +8 -0
  73. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/METADATA +22 -9
  74. lm_deluge-0.0.88.dist-info/RECORD +117 -0
  75. lm_deluge/built_in_tools/anthropic/__init__.py +0 -128
  76. lm_deluge/built_in_tools/openai.py +0 -28
  77. lm_deluge/presets/cerebras.py +0 -17
  78. lm_deluge/presets/meta.py +0 -13
  79. lm_deluge/tool.py +0 -849
  80. lm_deluge-0.0.67.dist-info/RECORD +0 -72
  81. lm_deluge/{llm_tools → pipelines}/__init__.py +1 -1
  82. /lm_deluge/{llm_tools → pipelines}/classify.py +0 -0
  83. /lm_deluge/{llm_tools → pipelines}/extract.py +0 -0
  84. /lm_deluge/{llm_tools → pipelines}/locate.py +0 -0
  85. /lm_deluge/{llm_tools → pipelines}/ocr.py +0 -0
  86. /lm_deluge/{built_in_tools → tool/builtin}/anthropic/bash.py +0 -0
  87. /lm_deluge/{built_in_tools → tool/builtin}/anthropic/computer_use.py +0 -0
  88. /lm_deluge/{built_in_tools → tool/builtin}/anthropic/editor.py +0 -0
  89. /lm_deluge/{built_in_tools → tool/builtin}/base.py +0 -0
  90. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/WHEEL +0 -0
  91. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/licenses/LICENSE +0 -0
  92. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.88.dist-info}/top_level.txt +0 -0
lm_deluge/tool.py DELETED
@@ -1,849 +0,0 @@
1
- import asyncio
2
- import inspect
3
- from concurrent.futures import ThreadPoolExecutor
4
- from typing import (
5
- Any,
6
- Callable,
7
- Coroutine,
8
- Literal,
9
- Type,
10
- TypedDict,
11
- get_args,
12
- get_origin,
13
- get_type_hints,
14
- )
15
-
16
- from fastmcp import Client # pip install fastmcp >= 2.0
17
- from mcp.types import Tool as MCPTool
18
- from pydantic import BaseModel, Field, field_validator
19
-
20
- from lm_deluge.image import Image
21
- from lm_deluge.prompt import Text, ToolResultPart
22
-
23
-
24
- def _python_type_to_json_schema_enhanced(python_type: Any) -> dict[str, Any]:
25
- """
26
- Convert Python type annotations to JSON Schema.
27
- Handles: primitives, Optional, Literal, list[T], dict[str, T], Union.
28
- """
29
- # Get origin and args for generic types
30
- origin = get_origin(python_type)
31
- args = get_args(python_type)
32
-
33
- # Handle Optional[T] or T | None
34
- if origin is type(None) or python_type is type(None):
35
- return {"type": "null"}
36
-
37
- # Handle Union types (including Optional)
38
- if origin is Literal:
39
- # Literal["a", "b"] -> enum
40
- return {"type": "string", "enum": list(args)}
41
-
42
- # Handle list[T]
43
- if origin is list:
44
- if args:
45
- items_schema = _python_type_to_json_schema_enhanced(args[0])
46
- return {"type": "array", "items": items_schema}
47
- return {"type": "array"}
48
-
49
- # Handle dict[str, T]
50
- if origin is dict:
51
- if len(args) >= 2:
52
- # For dict[str, T], we can set additionalProperties
53
- value_schema = _python_type_to_json_schema_enhanced(args[1])
54
- return {"type": "object", "additionalProperties": value_schema}
55
- return {"type": "object"}
56
-
57
- # Handle basic types
58
- if python_type is int:
59
- return {"type": "integer"}
60
- elif python_type is float:
61
- return {"type": "number"}
62
- elif python_type is str:
63
- return {"type": "string"}
64
- elif python_type is bool:
65
- return {"type": "boolean"}
66
- elif python_type is list:
67
- return {"type": "array"}
68
- elif python_type is dict:
69
- return {"type": "object"}
70
- else:
71
- # Default to string for unknown types
72
- return {"type": "string"}
73
-
74
-
75
- class ToolParams:
76
- """
77
- Helper class for constructing tool parameters more easily.
78
-
79
- Usage:
80
- # Simple constructor with Python types
81
- params = ToolParams({"city": str, "age": int})
82
-
83
- # With extras (description, enum, etc)
84
- params = ToolParams({
85
- "operation": (str, {"enum": ["add", "sub"], "description": "Math operation"}),
86
- "value": (int, {"description": "The value"})
87
- })
88
-
89
- # From Pydantic model
90
- params = ToolParams.from_pydantic(MyModel)
91
-
92
- # From TypedDict
93
- params = ToolParams.from_typed_dict(MyTypedDict)
94
-
95
- # From existing JSON Schema
96
- params = ToolParams.from_json_schema(schema_dict, required=["field1"])
97
- """
98
-
99
- def __init__(self, spec: dict[str, Any]):
100
- """
101
- Create ToolParams from a dict mapping parameter names to types or (type, extras) tuples.
102
-
103
- Args:
104
- spec: Dict where values can be:
105
- - A Python type (str, int, list[str], etc.)
106
- - A tuple of (type, extras_dict) for additional JSON Schema properties
107
- - An already-formed JSON Schema dict (passed through as-is)
108
- """
109
- self.parameters: dict[str, Any] = {}
110
- self.required: list[str] = []
111
-
112
- for param_name, param_spec in spec.items():
113
- # If it's a tuple, extract (type, extras)
114
- if isinstance(param_spec, tuple):
115
- param_type, extras = param_spec
116
- schema = _python_type_to_json_schema_enhanced(param_type)
117
- schema.update(extras)
118
- self.parameters[param_name] = schema
119
- # Mark as required unless explicitly marked as optional
120
- if extras.get("optional") is not True:
121
- self.required.append(param_name)
122
- # If it's already a dict with "type" key, use as-is
123
- elif isinstance(param_spec, dict) and "type" in param_spec:
124
- self.parameters[param_name] = param_spec
125
- # Assume required unless marked optional
126
- if param_spec.get("optional") is not True:
127
- self.required.append(param_name)
128
- # Otherwise treat as a Python type
129
- else:
130
- self.parameters[param_name] = _python_type_to_json_schema_enhanced(
131
- param_spec
132
- )
133
- self.required.append(param_name)
134
-
135
- @classmethod
136
- def from_pydantic(cls, model: Type[BaseModel]) -> "ToolParams":
137
- """
138
- Create ToolParams from a Pydantic model.
139
-
140
- Args:
141
- model: A Pydantic BaseModel class
142
- """
143
- # Get the JSON schema from Pydantic
144
- schema = model.model_json_schema()
145
- properties = schema.get("properties", {})
146
- required = schema.get("required", [])
147
-
148
- return cls.from_json_schema(properties, required)
149
-
150
- @classmethod
151
- def from_typed_dict(cls, typed_dict: Type) -> "ToolParams":
152
- """
153
- Create ToolParams from a TypedDict.
154
-
155
- Args:
156
- typed_dict: A TypedDict class
157
- """
158
- hints = get_type_hints(typed_dict)
159
-
160
- # TypedDict doesn't have a built-in way to mark optional fields,
161
- # but we can check for Optional in the type hints
162
- params = {}
163
- required = []
164
-
165
- for field_name, field_type in hints.items():
166
- # Check if it's Optional (Union with None)
167
- origin = get_origin(field_type)
168
- # args = get_args(field_type)
169
-
170
- is_optional = False
171
- actual_type = field_type
172
-
173
- # Check for Union types (including Optional[T] which is Union[T, None])
174
- if origin is type(None):
175
- is_optional = True
176
- actual_type = type(None)
177
-
178
- # For now, treat all TypedDict fields as required unless they're explicitly Optional
179
- schema = _python_type_to_json_schema_enhanced(actual_type)
180
- params[field_name] = schema
181
-
182
- if not is_optional:
183
- required.append(field_name)
184
-
185
- instance = cls.__new__(cls)
186
- instance.parameters = params
187
- instance.required = required
188
- return instance
189
-
190
- @classmethod
191
- def from_json_schema(
192
- cls, properties: dict[str, Any], required: list[str] | None = None
193
- ) -> "ToolParams":
194
- """
195
- Create ToolParams from an existing JSON Schema properties dict.
196
-
197
- Args:
198
- properties: The "properties" section of a JSON Schema
199
- required: List of required field names
200
- """
201
- instance = cls.__new__(cls)
202
- instance.parameters = properties
203
- instance.required = required or []
204
- return instance
205
-
206
- def to_dict(self) -> dict[str, Any]:
207
- """
208
- Convert to a dict with 'parameters' and 'required' keys.
209
- Useful for unpacking into Tool constructor.
210
- """
211
- return {"parameters": self.parameters, "required": self.required}
212
-
213
-
214
- async def _load_all_mcp_tools(client: Client) -> list["Tool"]:
215
- metas: list[MCPTool] = await client.list_tools()
216
-
217
- def make_runner(name: str):
218
- async def _async_call(**kw):
219
- async with client:
220
- # maybe should be call_tool_mcp if don't want to raise error
221
- raw_result = await client.call_tool(name, kw)
222
-
223
- # for now just concatenate them all into a result string
224
- results = []
225
- if not isinstance(raw_result, list): # newer versions of fastmcp
226
- content_blocks = raw_result.content
227
- else:
228
- content_blocks = raw_result
229
- for block in content_blocks:
230
- if block.type == "text":
231
- results.append(Text(block.text))
232
- elif block.type == "image":
233
- data_url = f"data:{block.mimeType};base64,{block.data}"
234
- results.append(Image(data=data_url))
235
-
236
- return results
237
-
238
- return _async_call
239
-
240
- tools: list[Tool] = []
241
- for m in metas:
242
- # Extract definitions from the schema (could be $defs or definitions)
243
- definitions = m.inputSchema.get("$defs") or m.inputSchema.get("definitions")
244
-
245
- tools.append(
246
- Tool(
247
- name=m.name,
248
- description=m.description,
249
- parameters=m.inputSchema.get("properties", {}),
250
- required=m.inputSchema.get("required", []),
251
- additionalProperties=m.inputSchema.get("additionalProperties"),
252
- definitions=definitions,
253
- run=make_runner(m.name),
254
- )
255
- )
256
- return tools
257
-
258
-
259
- class Tool(BaseModel):
260
- """
261
- Provider‑agnostic tool definition with no extra nesting.
262
- """
263
-
264
- name: str
265
- description: str | None = None
266
- parameters: dict[str, Any] | None = None
267
- required: list[str] = Field(default_factory=list)
268
- additionalProperties: bool | None = None # only
269
- # if desired, can provide a callable to run the tool
270
- run: Callable | None = None
271
- # for built-in tools that don't require schema
272
- is_built_in: bool = False
273
- type: str | None = None
274
- built_in_args: dict[str, Any] = Field(default_factory=dict)
275
- # JSON Schema definitions (for $ref support)
276
- definitions: dict[str, Any] | None = None
277
-
278
- @field_validator("name")
279
- @classmethod
280
- def validate_name(cls, v: str) -> str:
281
- if v.startswith("_computer_"):
282
- raise ValueError(
283
- f"Tool name '{v}' uses reserved prefix '_computer_'. "
284
- "This prefix is reserved for computer use actions."
285
- )
286
- return v
287
-
288
- @field_validator("parameters", mode="before")
289
- @classmethod
290
- def validate_parameters(cls, v: Any) -> dict[str, Any] | None:
291
- """Accept ToolParams objects and convert to dict for backwards compatibility."""
292
- if isinstance(v, ToolParams):
293
- return v.parameters
294
- return v
295
-
296
- def model_post_init(self, __context: Any) -> None:
297
- """
298
- After validation, if parameters came from ToolParams, also update required list.
299
- This is called by Pydantic after __init__.
300
- """
301
- # This is a bit tricky - we need to capture the required list from ToolParams
302
- # Since Pydantic has already converted it in the validator, we can't access it here
303
- # Instead, we'll handle this differently in the convenience constructors
304
- pass
305
-
306
- def _is_async(self) -> bool:
307
- return inspect.iscoroutinefunction(self.run)
308
-
309
- def call(self, **kwargs) -> str | list[ToolResultPart]:
310
- if self.run is None:
311
- raise ValueError("No run function provided")
312
-
313
- if self._is_async():
314
- coro: Coroutine = self.run(**kwargs) # type: ignore[arg-type]
315
- try:
316
- loop = asyncio.get_running_loop()
317
- assert loop
318
- except RuntimeError:
319
- # no loop → safe to block
320
- return asyncio.run(coro)
321
- else:
322
- # Loop is running → execute coroutine in a worker thread
323
- def _runner():
324
- return asyncio.run(coro)
325
-
326
- with ThreadPoolExecutor(max_workers=1) as executor:
327
- return executor.submit(_runner).result()
328
- else:
329
- # plain function
330
- return self.run(**kwargs)
331
-
332
- async def acall(self, **kwargs) -> str | list[ToolResultPart]:
333
- if self.run is None:
334
- raise ValueError("No run function provided")
335
-
336
- if self._is_async():
337
- return await self.run(**kwargs) # type: ignore[func-returns-value]
338
- else:
339
- loop = asyncio.get_running_loop()
340
- assert self.run is not None, "can't run None"
341
- return await loop.run_in_executor(None, lambda: self.run(**kwargs)) # type: ignore
342
-
343
- @classmethod
344
- def from_function(cls, func: Callable) -> "Tool":
345
- """Create a Tool from a function using introspection."""
346
- # Get function name
347
- name = func.__name__
348
-
349
- # Get docstring for description
350
- description = func.__doc__ or f"Call the {name} function"
351
- description = description.strip()
352
-
353
- # Get function signature and type hints
354
- sig = inspect.signature(func)
355
- type_hints = get_type_hints(func)
356
-
357
- # Build parameters and required list
358
- parameters = {}
359
- required = []
360
-
361
- for param_name, param in sig.parameters.items():
362
- # Skip *args and **kwargs
363
- if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
364
- continue
365
-
366
- # Get type hint
367
- param_type = type_hints.get(param_name, str)
368
-
369
- # Convert Python types to JSON Schema types
370
- json_type = _python_type_to_json_schema_enhanced(param_type)
371
-
372
- parameters[param_name] = json_type
373
-
374
- # Add to required if no default value
375
- if param.default is param.empty:
376
- required.append(param_name)
377
-
378
- return cls(
379
- name=name,
380
- description=description,
381
- parameters=parameters,
382
- required=required,
383
- run=func,
384
- )
385
-
386
- @classmethod
387
- async def from_mcp_config(
388
- cls,
389
- config: dict[str, Any],
390
- *,
391
- timeout: float | None = None,
392
- ) -> list["Tool"]:
393
- """
394
- config: full Claude-Desktop-style dict *or* just its "mcpServers" block
395
- Returns {server_key: [Tool, …], …}
396
- """
397
- # allow caller to pass either the whole desktop file or just the sub-dict
398
- servers_block = config.get("mcpServers", config)
399
-
400
- # FastMCP understands the whole config dict directly
401
- client = Client({"mcpServers": servers_block}, timeout=timeout)
402
- async with client:
403
- all_tools = await _load_all_mcp_tools(client)
404
-
405
- # bucket by prefix that FastMCP added (serverkey_toolname)
406
- return all_tools
407
-
408
- @classmethod
409
- async def from_mcp(
410
- cls,
411
- server_name: str,
412
- *,
413
- tool_name: str | None = None,
414
- timeout: float | None = None,
415
- **server_spec, # url="…" OR command="…" args=[…]
416
- ) -> Any: # Tool | list[Tool]
417
- """
418
- Thin wrapper for one server. Example uses:
419
-
420
- Tool.from_mcp(url="https://weather.example.com/mcp")
421
- Tool.from_mcp(command="python", args=["./assistant.py"], tool_name="answer_question")
422
- """
423
- # ensure at least one of command or url is defined
424
- if not (server_spec.get("url") or server_spec.get("command")):
425
- raise ValueError("most provide url or command")
426
- # build a one-server desktop-style dict
427
- cfg = {server_name: server_spec}
428
- tools = await cls.from_mcp_config(cfg, timeout=timeout)
429
- if tool_name is None:
430
- return tools
431
- for t in tools:
432
- if t.name.endswith(f"{tool_name}"): # prefixed by FastMCP
433
- return t
434
- raise ValueError(f"Tool '{tool_name}' not found on that server")
435
-
436
- @classmethod
437
- def from_params(
438
- cls,
439
- name: str,
440
- params: ToolParams,
441
- *,
442
- description: str | None = None,
443
- run: Callable | None = None,
444
- **kwargs,
445
- ) -> "Tool":
446
- """
447
- Create a Tool from a ToolParams object.
448
-
449
- Args:
450
- name: Tool name
451
- params: ToolParams object defining the parameter schema
452
- description: Optional description
453
- run: Optional callable to execute the tool
454
- **kwargs: Additional Tool arguments
455
-
456
- Example:
457
- params = ToolParams({"city": str, "age": int})
458
- tool = Tool.from_params("get_user", params, run=my_function)
459
- """
460
- return cls(
461
- name=name,
462
- description=description,
463
- parameters=params.parameters,
464
- required=params.required,
465
- run=run,
466
- **kwargs,
467
- )
468
-
469
- @classmethod
470
- def from_pydantic(
471
- cls,
472
- name: str,
473
- model: Type[BaseModel],
474
- *,
475
- description: str | None = None,
476
- run: Callable | None = None,
477
- **kwargs,
478
- ) -> "Tool":
479
- """
480
- Create a Tool from a Pydantic model.
481
-
482
- Args:
483
- name: Tool name
484
- model: Pydantic BaseModel class
485
- description: Optional description (defaults to model docstring)
486
- run: Optional callable to execute the tool
487
- **kwargs: Additional Tool arguments
488
-
489
- Example:
490
- class UserQuery(BaseModel):
491
- city: str
492
- age: int
493
-
494
- tool = Tool.from_pydantic("get_user", UserQuery, run=my_function)
495
- """
496
- params = ToolParams.from_pydantic(model)
497
-
498
- # Use model docstring as default description if not provided
499
- if description is None and model.__doc__:
500
- description = model.__doc__.strip()
501
-
502
- return cls(
503
- name=name,
504
- description=description,
505
- parameters=params.parameters,
506
- required=params.required,
507
- run=run,
508
- **kwargs,
509
- )
510
-
511
- @classmethod
512
- def from_typed_dict(
513
- cls,
514
- name: str,
515
- typed_dict: Type,
516
- *,
517
- description: str | None = None,
518
- run: Callable | None = None,
519
- **kwargs,
520
- ) -> "Tool":
521
- """
522
- Create a Tool from a TypedDict.
523
-
524
- Args:
525
- name: Tool name
526
- typed_dict: TypedDict class
527
- description: Optional description
528
- run: Optional callable to execute the tool
529
- **kwargs: Additional Tool arguments
530
-
531
- Example:
532
- class UserQuery(TypedDict):
533
- city: str
534
- age: int
535
-
536
- tool = Tool.from_typed_dict("get_user", UserQuery, run=my_function)
537
- """
538
- params = ToolParams.from_typed_dict(typed_dict)
539
-
540
- return cls(
541
- name=name,
542
- description=description,
543
- parameters=params.parameters,
544
- required=params.required,
545
- run=run,
546
- **kwargs,
547
- )
548
-
549
- @staticmethod
550
- def _tool_from_meta(meta: dict[str, Any], runner) -> "Tool":
551
- props = meta["inputSchema"].get("properties", {})
552
- req = meta["inputSchema"].get("required", [])
553
- addl = meta["inputSchema"].get("additionalProperties")
554
- return Tool(
555
- name=meta["name"],
556
- description=meta.get("description", ""),
557
- parameters=props,
558
- required=req,
559
- additionalProperties=addl,
560
- run=runner,
561
- )
562
-
563
- @staticmethod
564
- def _python_type_to_json_schema(python_type) -> dict[str, Any]:
565
- """
566
- Convert Python type to JSON Schema type definition.
567
- Now delegates to enhanced version for better type support.
568
- """
569
- return _python_type_to_json_schema_enhanced(python_type)
570
-
571
- def _is_strict_mode_compatible(self) -> bool:
572
- """
573
- Check if this tool's schema is compatible with OpenAI strict mode.
574
- Strict mode requires all objects to have defined properties.
575
- """
576
-
577
- def has_undefined_objects(schema: dict | list | Any) -> bool:
578
- """Recursively check for objects without defined properties."""
579
- if isinstance(schema, dict):
580
- # Check if this is an object type without properties
581
- if schema.get("type") == "object":
582
- # If additionalProperties is True or properties is missing/empty
583
- if schema.get("additionalProperties") is True:
584
- return True
585
- if "properties" not in schema or not schema["properties"]:
586
- return True
587
- # Recursively check nested schemas
588
- for value in schema.values():
589
- if has_undefined_objects(value):
590
- return True
591
- elif isinstance(schema, list):
592
- for item in schema:
593
- if has_undefined_objects(item):
594
- return True
595
- return False
596
-
597
- return not has_undefined_objects(self.parameters or {})
598
-
599
- def _json_schema(
600
- self, include_additional_properties=False, remove_defaults=False
601
- ) -> dict[str, Any]:
602
- def _add_additional_properties_recursive(
603
- schema: dict | list | Any, remove_defaults: bool = False
604
- ) -> dict | list | Any:
605
- """Recursively add additionalProperties: false to all object-type schemas.
606
- In strict mode (when remove_defaults=True), also makes all properties required."""
607
- if isinstance(schema, dict):
608
- # Copy the dictionary to avoid modifying the original
609
- new_schema = schema.copy()
610
-
611
- # make sure to label arrays and objects
612
- if "type" not in new_schema:
613
- if "properties" in new_schema:
614
- new_schema["type"] = "object"
615
- elif "items" in new_schema:
616
- new_schema["type"] = "array"
617
-
618
- # If this is an object type schema, set additionalProperties: false
619
- if new_schema.get("type") == "object":
620
- new_schema["additionalProperties"] = False
621
-
622
- # In strict mode, all properties must be required
623
- if remove_defaults and "properties" in new_schema:
624
- new_schema["required"] = list(new_schema["properties"].keys())
625
-
626
- # Remove default values if requested (for strict mode)
627
- if remove_defaults and "default" in new_schema:
628
- del new_schema["default"]
629
-
630
- # Recursively process all values in the dictionary
631
- for key, value in new_schema.items():
632
- new_schema[key] = _add_additional_properties_recursive(
633
- value, remove_defaults
634
- )
635
-
636
- return new_schema
637
- elif isinstance(schema, list):
638
- # Recursively process all items in the list
639
- return [
640
- _add_additional_properties_recursive(item, remove_defaults)
641
- for item in schema
642
- ]
643
- else:
644
- # Return primitive values as-is
645
- return schema
646
-
647
- # Start with the base schema structure
648
- if include_additional_properties and self.parameters:
649
- # Apply recursive additionalProperties processing to parameters
650
- processed_parameters = _add_additional_properties_recursive(
651
- self.parameters, remove_defaults
652
- )
653
- else:
654
- processed_parameters = self.parameters
655
-
656
- # Process definitions too
657
- if self.definitions and include_additional_properties:
658
- processed_definitions = _add_additional_properties_recursive(
659
- self.definitions, remove_defaults
660
- )
661
- else:
662
- processed_definitions = self.definitions
663
-
664
- res = {
665
- "type": "object",
666
- "properties": processed_parameters,
667
- "required": self.required, # Use the tool's actual required list
668
- }
669
-
670
- if include_additional_properties:
671
- res["additionalProperties"] = False
672
-
673
- # Include definitions if present (for $ref support)
674
- if processed_definitions:
675
- res["$defs"] = processed_definitions
676
-
677
- return res
678
-
679
- # ---------- dumpers ----------
680
- def for_openai_completions(
681
- self, *, strict: bool = True, **kwargs
682
- ) -> dict[str, Any]:
683
- if self.is_built_in:
684
- return {"type": self.type, **self.built_in_args, **kwargs}
685
-
686
- # Check if schema is compatible with strict mode
687
- if strict and not self._is_strict_mode_compatible():
688
- strict = False
689
-
690
- if strict:
691
- # For strict mode, remove defaults and make all parameters required
692
- schema = self._json_schema(
693
- include_additional_properties=True, remove_defaults=True
694
- )
695
- schema["required"] = list(
696
- (self.parameters or {}).keys()
697
- ) # All parameters required in strict mode
698
- else:
699
- # For non-strict mode, use the original required list
700
- schema = self._json_schema(include_additional_properties=True)
701
-
702
- return {
703
- "type": "function",
704
- "function": {
705
- "name": self.name,
706
- "description": self.description,
707
- "parameters": schema,
708
- "strict": strict,
709
- },
710
- }
711
-
712
- def for_openai(self, strict: bool = True, **kwargs):
713
- """just an alias for the above"""
714
- return self.for_openai_completions(strict=strict, **kwargs)
715
-
716
- def for_openai_responses(self, **kwargs) -> dict[str, Any]:
717
- if self.is_built_in:
718
- return {"type": self.type, **self.built_in_args, **kwargs}
719
- return {
720
- "type": "function",
721
- "name": self.name,
722
- "description": self.description,
723
- "parameters": self._json_schema(include_additional_properties=True),
724
- }
725
-
726
- def for_anthropic(self, **kwargs) -> dict[str, Any]:
727
- # built-in tools have "name", "type", maybe metadata
728
- if self.is_built_in:
729
- return {
730
- "name": self.name,
731
- "type": self.type,
732
- **self.built_in_args,
733
- **kwargs,
734
- }
735
- return {
736
- "name": self.name,
737
- "description": self.description,
738
- "input_schema": self._json_schema(),
739
- }
740
-
741
- def for_google(self) -> dict[str, Any]:
742
- """
743
- Shape used by google.genai docs.
744
- """
745
- return {
746
- "name": self.name,
747
- "description": self.description,
748
- "parameters": self._json_schema(),
749
- }
750
-
751
- def for_mistral(self) -> dict[str, Any]:
752
- return self.for_openai_completions()
753
-
754
- def dump_for(
755
- self,
756
- provider: Literal[
757
- "openai-responses", "openai-completions", "anthropic", "google"
758
- ],
759
- **kw,
760
- ) -> dict[str, Any]:
761
- if provider == "openai-responses":
762
- return self.for_openai_responses()
763
- if provider == "openai-completions":
764
- return self.for_openai_completions(**kw)
765
- if provider == "anthropic":
766
- return self.for_anthropic()
767
- if provider == "google":
768
- return self.for_google()
769
- raise ValueError(provider)
770
-
771
- @classmethod
772
- def built_in(cls, name: str, **kwargs):
773
- if "type" in kwargs:
774
- type = kwargs.pop("type")
775
- else:
776
- type = name
777
- return cls(name=name, type=type, is_built_in=True, built_in_args=kwargs)
778
-
779
-
780
- class OpenAIMCPSpec(TypedDict):
781
- type: str
782
- server_label: str
783
- server_url: str
784
- headers: dict | None
785
- require_approval: str
786
-
787
-
788
- class MCPServer(BaseModel):
789
- """
790
- Allow MCPServers to be passed directly, if provider supports it.
791
- Provider can directly call MCP instead of handling it client-side.
792
- Should work with Anthropic MCP connector and OpenAI responses API.
793
- """
794
-
795
- name: str
796
- url: str
797
- # anthropic-specific
798
- token: str | None = None
799
- configuration: dict | None = None
800
- # openai-specific
801
- headers: dict | None = None
802
-
803
- # tools cache
804
- _tools: list[Tool] | None = None
805
-
806
- @classmethod
807
- def from_openai(cls, spec: OpenAIMCPSpec):
808
- return cls(
809
- name=spec["server_label"],
810
- url=spec["server_url"],
811
- headers=spec.get("headers"),
812
- )
813
-
814
- def for_openai_responses(self):
815
- res: dict[str, Any] = {
816
- "type": "mcp",
817
- "server_label": self.name,
818
- "server_url": self.url,
819
- "require_approval": "never",
820
- }
821
- if self.headers:
822
- res["headers"] = self.headers
823
-
824
- return res
825
-
826
- def for_anthropic(self):
827
- res: dict[str, Any] = {
828
- "type": "url",
829
- "url": self.url,
830
- "name": self.name,
831
- }
832
- if self.token:
833
- res["authorization_token"] = self.token
834
- if self.configuration:
835
- res["tool_configuration"] = self.configuration
836
-
837
- return res
838
-
839
- async def to_tools(self) -> list[Tool]:
840
- """
841
- Compatible with ALL providers.
842
- Caches so we don't have to hit the server a ton of times.
843
- """
844
- if self._tools:
845
- return self._tools
846
- else:
847
- tools: list[Tool] = await Tool.from_mcp(self.name, url=self.url)
848
- self._tools = tools
849
- return tools