lm-deluge 0.0.74__tar.gz → 0.0.75__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lm_deluge-0.0.74/src/lm_deluge.egg-info → lm_deluge-0.0.75}/PKG-INFO +1 -1
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/pyproject.toml +1 -1
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/anthropic.py +10 -1
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/bedrock.py +1 -3
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/openai.py +16 -2
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/client.py +6 -6
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/request_context.py +5 -2
- lm_deluge-0.0.75/src/lm_deluge/util/schema.py +412 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75/src/lm_deluge.egg-info}/PKG-INFO +1 -1
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge.egg-info/SOURCES.txt +1 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/LICENSE +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/README.md +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/setup.cfg +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/__init__.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/__init__.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/base.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/chat_reasoning.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/common.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/gemini.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/mistral.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/api_requests/response.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/batches.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/built_in_tools/base.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/built_in_tools/openai.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/cache.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/cli.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/config.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/embed.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/errors.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/file.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/image.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/llm_tools/__init__.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/llm_tools/classify.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/llm_tools/extract.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/llm_tools/locate.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/llm_tools/ocr.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/llm_tools/score.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/llm_tools/translate.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/mock_openai.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/__init__.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/anthropic.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/bedrock.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/cerebras.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/cohere.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/deepseek.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/fireworks.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/google.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/grok.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/groq.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/kimi.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/meta.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/minimax.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/mistral.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/openai.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/openrouter.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/models/together.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/presets/cerebras.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/presets/meta.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/prompt.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/rerank.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/tool.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/tracker.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/usage.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/util/harmony.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/util/json.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/util/logprobs.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/util/spatial.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/util/validation.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/util/xml.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/warnings.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge.egg-info/requires.txt +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge.egg-info/top_level.txt +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/tests/test_builtin_tools.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/tests/test_file_upload.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/tests/test_mock_openai.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/tests/test_native_mcp_server.py +0 -0
- {lm_deluge-0.0.74 → lm_deluge-0.0.75}/tests/test_openrouter_generic.py +0 -0
|
@@ -12,6 +12,10 @@ from lm_deluge.prompt import (
|
|
|
12
12
|
from lm_deluge.request_context import RequestContext
|
|
13
13
|
from lm_deluge.tool import MCPServer, Tool
|
|
14
14
|
from lm_deluge.usage import Usage
|
|
15
|
+
from lm_deluge.util.schema import (
|
|
16
|
+
prepare_output_schema,
|
|
17
|
+
transform_schema_for_anthropic,
|
|
18
|
+
)
|
|
15
19
|
|
|
16
20
|
from ..models import APIModel
|
|
17
21
|
from .base import APIRequestBase, APIResponse
|
|
@@ -87,10 +91,15 @@ def _build_anthropic_request(
|
|
|
87
91
|
# Handle structured outputs (output_format)
|
|
88
92
|
if context.output_schema:
|
|
89
93
|
if model.supports_json:
|
|
94
|
+
base_schema = prepare_output_schema(context.output_schema)
|
|
95
|
+
|
|
96
|
+
# Apply Anthropic-specific transformations (move unsupported constraints to description)
|
|
97
|
+
transformed_schema = transform_schema_for_anthropic(base_schema)
|
|
98
|
+
|
|
90
99
|
_add_beta(base_headers, "structured-outputs-2025-11-13")
|
|
91
100
|
request_json["output_format"] = {
|
|
92
101
|
"type": "json_schema",
|
|
93
|
-
"schema":
|
|
102
|
+
"schema": transformed_schema,
|
|
94
103
|
}
|
|
95
104
|
else:
|
|
96
105
|
print(
|
|
@@ -197,9 +197,7 @@ async def _build_openai_bedrock_request(
|
|
|
197
197
|
request_tools = []
|
|
198
198
|
for tool in tools:
|
|
199
199
|
if isinstance(tool, Tool):
|
|
200
|
-
request_tools.append(
|
|
201
|
-
tool.dump_for("openai-completions", strict=False)
|
|
202
|
-
)
|
|
200
|
+
request_tools.append(tool.dump_for("openai-completions", strict=False))
|
|
203
201
|
elif isinstance(tool, MCPServer):
|
|
204
202
|
as_tools = await tool.to_tools()
|
|
205
203
|
request_tools.extend(
|
|
@@ -9,6 +9,10 @@ from aiohttp import ClientResponse
|
|
|
9
9
|
from lm_deluge.request_context import RequestContext
|
|
10
10
|
from lm_deluge.tool import MCPServer, Tool
|
|
11
11
|
from lm_deluge.warnings import maybe_warn
|
|
12
|
+
from lm_deluge.util.schema import (
|
|
13
|
+
prepare_output_schema,
|
|
14
|
+
transform_schema_for_openai,
|
|
15
|
+
)
|
|
12
16
|
|
|
13
17
|
from ..config import SamplingParams
|
|
14
18
|
from ..models import APIModel
|
|
@@ -87,11 +91,16 @@ async def _build_oa_chat_request(
|
|
|
87
91
|
# Handle structured outputs (output_schema takes precedence over json_mode)
|
|
88
92
|
if context.output_schema:
|
|
89
93
|
if model.supports_json:
|
|
94
|
+
base_schema = prepare_output_schema(context.output_schema)
|
|
95
|
+
|
|
96
|
+
# Apply OpenAI-specific transformations (currently passthrough with copy)
|
|
97
|
+
transformed_schema = transform_schema_for_openai(base_schema)
|
|
98
|
+
|
|
90
99
|
request_json["response_format"] = {
|
|
91
100
|
"type": "json_schema",
|
|
92
101
|
"json_schema": {
|
|
93
102
|
"name": "response",
|
|
94
|
-
"schema":
|
|
103
|
+
"schema": transformed_schema,
|
|
95
104
|
"strict": True,
|
|
96
105
|
},
|
|
97
106
|
}
|
|
@@ -326,11 +335,16 @@ async def _build_oa_responses_request(
|
|
|
326
335
|
# Handle structured outputs (output_schema takes precedence over json_mode)
|
|
327
336
|
if context.output_schema:
|
|
328
337
|
if model.supports_json:
|
|
338
|
+
base_schema = prepare_output_schema(context.output_schema)
|
|
339
|
+
|
|
340
|
+
# Apply OpenAI-specific transformations (currently passthrough with copy)
|
|
341
|
+
transformed_schema = transform_schema_for_openai(base_schema)
|
|
342
|
+
|
|
329
343
|
request_json["text"] = {
|
|
330
344
|
"format": {
|
|
331
345
|
"type": "json_schema",
|
|
332
346
|
"name": "response",
|
|
333
|
-
"schema":
|
|
347
|
+
"schema": transformed_schema,
|
|
334
348
|
"strict": True,
|
|
335
349
|
}
|
|
336
350
|
}
|
|
@@ -561,7 +561,7 @@ class _LLMClient(BaseModel):
|
|
|
561
561
|
return_completions_only: Literal[True],
|
|
562
562
|
show_progress: bool = ...,
|
|
563
563
|
tools: list[Tool | dict | MCPServer] | None = ...,
|
|
564
|
-
output_schema: dict | None = ...,
|
|
564
|
+
output_schema: type[BaseModel] | dict | None = ...,
|
|
565
565
|
cache: CachePattern | None = ...,
|
|
566
566
|
service_tier: Literal["auto", "default", "flex", "priority"] | None = ...,
|
|
567
567
|
) -> list[str | None]: ...
|
|
@@ -574,7 +574,7 @@ class _LLMClient(BaseModel):
|
|
|
574
574
|
return_completions_only: Literal[False] = ...,
|
|
575
575
|
show_progress: bool = ...,
|
|
576
576
|
tools: list[Tool | dict | MCPServer] | None = ...,
|
|
577
|
-
output_schema: dict | None = ...,
|
|
577
|
+
output_schema: type[BaseModel] | dict | None = ...,
|
|
578
578
|
cache: CachePattern | None = ...,
|
|
579
579
|
service_tier: Literal["auto", "default", "flex", "priority"] | None = ...,
|
|
580
580
|
) -> list[APIResponse]: ...
|
|
@@ -586,7 +586,7 @@ class _LLMClient(BaseModel):
|
|
|
586
586
|
return_completions_only: bool = False,
|
|
587
587
|
show_progress: bool = True,
|
|
588
588
|
tools: list[Tool | dict | MCPServer] | None = None,
|
|
589
|
-
output_schema: dict | None = None,
|
|
589
|
+
output_schema: type[BaseModel] | dict | None = None,
|
|
590
590
|
cache: CachePattern | None = None,
|
|
591
591
|
service_tier: Literal["auto", "default", "flex", "priority"] | None = None,
|
|
592
592
|
) -> list[APIResponse] | list[str | None] | dict[str, int]:
|
|
@@ -661,7 +661,7 @@ class _LLMClient(BaseModel):
|
|
|
661
661
|
return_completions_only: bool = False,
|
|
662
662
|
show_progress=True,
|
|
663
663
|
tools: list[Tool | dict | MCPServer] | None = None,
|
|
664
|
-
output_schema: dict | None = None,
|
|
664
|
+
output_schema: type[BaseModel] | dict | None = None,
|
|
665
665
|
cache: CachePattern | None = None,
|
|
666
666
|
):
|
|
667
667
|
return asyncio.run(
|
|
@@ -694,7 +694,7 @@ class _LLMClient(BaseModel):
|
|
|
694
694
|
prompt: Prompt,
|
|
695
695
|
*,
|
|
696
696
|
tools: list[Tool | dict | MCPServer] | None = None,
|
|
697
|
-
output_schema: dict | None = None,
|
|
697
|
+
output_schema: type[BaseModel] | dict | None = None,
|
|
698
698
|
cache: CachePattern | None = None,
|
|
699
699
|
service_tier: Literal["auto", "default", "flex", "priority"] | None = None,
|
|
700
700
|
) -> int:
|
|
@@ -731,7 +731,7 @@ class _LLMClient(BaseModel):
|
|
|
731
731
|
prompt: Prompt,
|
|
732
732
|
*,
|
|
733
733
|
tools: list[Tool | dict | MCPServer] | None = None,
|
|
734
|
-
output_schema: dict | None = None,
|
|
734
|
+
output_schema: type[BaseModel] | dict | None = None,
|
|
735
735
|
cache: CachePattern | None = None,
|
|
736
736
|
service_tier: Literal["auto", "default", "flex", "priority"] | None = None,
|
|
737
737
|
) -> APIResponse:
|
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
from dataclasses import dataclass, field
|
|
2
2
|
from functools import cached_property
|
|
3
|
-
from typing import Any, Callable
|
|
3
|
+
from typing import Any, Callable, TYPE_CHECKING
|
|
4
4
|
|
|
5
5
|
from .config import SamplingParams
|
|
6
6
|
from .prompt import CachePattern, Conversation
|
|
7
7
|
from .tracker import StatusTracker
|
|
8
8
|
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from pydantic import BaseModel
|
|
11
|
+
|
|
9
12
|
|
|
10
13
|
@dataclass
|
|
11
14
|
class RequestContext:
|
|
@@ -32,7 +35,7 @@ class RequestContext:
|
|
|
32
35
|
|
|
33
36
|
# Optional features
|
|
34
37
|
tools: list | None = None
|
|
35
|
-
output_schema: dict | None = None
|
|
38
|
+
output_schema: "type[BaseModel] | dict | None" = None
|
|
36
39
|
cache: CachePattern | None = None
|
|
37
40
|
use_responses_api: bool = False
|
|
38
41
|
background: bool = False
|
|
@@ -0,0 +1,412 @@
|
|
|
1
|
+
"""Schema transformation utilities for structured outputs.
|
|
2
|
+
|
|
3
|
+
This module provides utilities for transforming Pydantic models and JSON schemas
|
|
4
|
+
to be compatible with provider-specific structured output requirements (OpenAI, Anthropic).
|
|
5
|
+
|
|
6
|
+
Key functions:
|
|
7
|
+
- to_strict_json_schema: Convert Pydantic model to strict JSON schema
|
|
8
|
+
- transform_schema_for_openai: Apply OpenAI-specific transformations
|
|
9
|
+
- transform_schema_for_anthropic: Apply Anthropic-specific transformations
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import copy
|
|
15
|
+
import inspect
|
|
16
|
+
from typing import Any, TypeGuard, TYPE_CHECKING, Type
|
|
17
|
+
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from pydantic import BaseModel
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
import pydantic
|
|
23
|
+
from pydantic import BaseModel as _BaseModel
|
|
24
|
+
except ImportError:
|
|
25
|
+
pydantic = None
|
|
26
|
+
_BaseModel = None # type: ignore
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def is_pydantic_model(obj: Any) -> bool:
|
|
30
|
+
"""Check if an object is a Pydantic model class."""
|
|
31
|
+
if pydantic is None or _BaseModel is None:
|
|
32
|
+
return False
|
|
33
|
+
return inspect.isclass(obj) and issubclass(obj, _BaseModel)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def is_dict(obj: object) -> TypeGuard[dict[str, object]]:
|
|
37
|
+
"""Type guard for dictionaries."""
|
|
38
|
+
return isinstance(obj, dict)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:
|
|
42
|
+
"""Check if a dictionary has more than n keys."""
|
|
43
|
+
i = 0
|
|
44
|
+
for _ in obj.keys():
|
|
45
|
+
i += 1
|
|
46
|
+
if i > n:
|
|
47
|
+
return True
|
|
48
|
+
return False
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def resolve_ref(*, root: dict[str, object], ref: str) -> object:
|
|
52
|
+
"""Resolve a JSON Schema $ref pointer.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
root: The root schema object
|
|
56
|
+
ref: The $ref string (e.g., "#/$defs/MyType")
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
The resolved schema object
|
|
60
|
+
|
|
61
|
+
Raises:
|
|
62
|
+
ValueError: If the $ref format is invalid or cannot be resolved
|
|
63
|
+
"""
|
|
64
|
+
if not ref.startswith("#/"):
|
|
65
|
+
raise ValueError(f"Unexpected $ref format {ref!r}; Does not start with #/")
|
|
66
|
+
|
|
67
|
+
path = ref[2:].split("/")
|
|
68
|
+
resolved = root
|
|
69
|
+
for key in path:
|
|
70
|
+
value = resolved[key]
|
|
71
|
+
if not is_dict(value):
|
|
72
|
+
raise ValueError(
|
|
73
|
+
f"Encountered non-dictionary entry while resolving {ref} - {resolved}"
|
|
74
|
+
)
|
|
75
|
+
resolved = value
|
|
76
|
+
|
|
77
|
+
return resolved
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def to_strict_json_schema(model: Type["BaseModel"]) -> dict[str, Any]:
|
|
81
|
+
"""Convert a Pydantic model to a strict JSON schema.
|
|
82
|
+
|
|
83
|
+
This function extracts the JSON schema from a Pydantic model and ensures
|
|
84
|
+
it conforms to the strict mode requirements for structured outputs.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
model: A Pydantic BaseModel class
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
A JSON schema dict that conforms to strict mode requirements
|
|
91
|
+
|
|
92
|
+
Raises:
|
|
93
|
+
TypeError: If the model is not a Pydantic BaseModel
|
|
94
|
+
ImportError: If pydantic is not installed
|
|
95
|
+
"""
|
|
96
|
+
if pydantic is None or _BaseModel is None:
|
|
97
|
+
raise ImportError(
|
|
98
|
+
"pydantic is required for Pydantic model support. "
|
|
99
|
+
"Install it with: pip install pydantic"
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
if not is_pydantic_model(model):
|
|
103
|
+
raise TypeError(
|
|
104
|
+
f"Expected a Pydantic BaseModel class, got {type(model).__name__}"
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
schema = model.model_json_schema()
|
|
108
|
+
return _ensure_strict_json_schema(schema, path=(), root=schema)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def prepare_output_schema(
|
|
112
|
+
schema_obj: Type["BaseModel"] | dict[str, Any],
|
|
113
|
+
) -> dict[str, Any]:
|
|
114
|
+
"""Normalize a user-provided schema into strict JSON schema form.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
schema_obj: Either a Pydantic BaseModel subclass or a JSON schema dict.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
A strict JSON schema suitable for provider-specific transformation.
|
|
121
|
+
|
|
122
|
+
Notes:
|
|
123
|
+
Dict schemas are deep-copied before normalization so the caller's
|
|
124
|
+
original object is left untouched.
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
if is_pydantic_model(schema_obj):
|
|
128
|
+
return to_strict_json_schema(schema_obj) # type: ignore[arg-type]
|
|
129
|
+
|
|
130
|
+
if is_dict(schema_obj):
|
|
131
|
+
schema_copy = copy.deepcopy(schema_obj)
|
|
132
|
+
return _ensure_strict_json_schema(
|
|
133
|
+
schema_copy,
|
|
134
|
+
path=(),
|
|
135
|
+
root=schema_copy,
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
raise TypeError(
|
|
139
|
+
"output_schema must be a Pydantic BaseModel subclass or a JSON schema dict"
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _ensure_strict_json_schema(
|
|
144
|
+
json_schema: object,
|
|
145
|
+
*,
|
|
146
|
+
path: tuple[str, ...],
|
|
147
|
+
root: dict[str, object],
|
|
148
|
+
) -> dict[str, Any]:
|
|
149
|
+
"""Recursively ensure a JSON schema conforms to strict mode requirements.
|
|
150
|
+
|
|
151
|
+
This function:
|
|
152
|
+
- Adds additionalProperties: false to all objects
|
|
153
|
+
- Makes all properties required
|
|
154
|
+
- Removes unsupported constraints and adds them to descriptions
|
|
155
|
+
- Expands $refs that are mixed with other properties
|
|
156
|
+
- Processes $defs, anyOf, allOf, etc.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
json_schema: The schema to transform
|
|
160
|
+
path: Current path in the schema (for error messages)
|
|
161
|
+
root: The root schema (for resolving $refs)
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
The transformed schema
|
|
165
|
+
"""
|
|
166
|
+
if not is_dict(json_schema):
|
|
167
|
+
raise TypeError(f"Expected {json_schema} to be a dictionary; path={path}")
|
|
168
|
+
|
|
169
|
+
# Process $defs recursively
|
|
170
|
+
defs = json_schema.get("$defs")
|
|
171
|
+
if is_dict(defs):
|
|
172
|
+
for def_name, def_schema in defs.items():
|
|
173
|
+
_ensure_strict_json_schema(
|
|
174
|
+
def_schema, path=(*path, "$defs", def_name), root=root
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
# Process definitions recursively
|
|
178
|
+
definitions = json_schema.get("definitions")
|
|
179
|
+
if is_dict(definitions):
|
|
180
|
+
for definition_name, definition_schema in definitions.items():
|
|
181
|
+
_ensure_strict_json_schema(
|
|
182
|
+
definition_schema,
|
|
183
|
+
path=(*path, "definitions", definition_name),
|
|
184
|
+
root=root,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
typ = json_schema.get("type")
|
|
188
|
+
|
|
189
|
+
# Object types - add additionalProperties: false and make all fields required
|
|
190
|
+
if typ == "object" and "additionalProperties" not in json_schema:
|
|
191
|
+
json_schema["additionalProperties"] = False
|
|
192
|
+
|
|
193
|
+
properties = json_schema.get("properties")
|
|
194
|
+
if is_dict(properties):
|
|
195
|
+
# Make all properties required
|
|
196
|
+
json_schema["required"] = list(properties.keys())
|
|
197
|
+
|
|
198
|
+
# Process each property recursively
|
|
199
|
+
json_schema["properties"] = {
|
|
200
|
+
key: _ensure_strict_json_schema(
|
|
201
|
+
prop_schema, path=(*path, "properties", key), root=root
|
|
202
|
+
)
|
|
203
|
+
for key, prop_schema in properties.items()
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
# Arrays - process items schema
|
|
207
|
+
items = json_schema.get("items")
|
|
208
|
+
if is_dict(items):
|
|
209
|
+
json_schema["items"] = _ensure_strict_json_schema(
|
|
210
|
+
items, path=(*path, "items"), root=root
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
# Unions - process each variant
|
|
214
|
+
any_of = json_schema.get("anyOf")
|
|
215
|
+
if isinstance(any_of, list):
|
|
216
|
+
json_schema["anyOf"] = [
|
|
217
|
+
_ensure_strict_json_schema(
|
|
218
|
+
variant, path=(*path, "anyOf", str(i)), root=root
|
|
219
|
+
)
|
|
220
|
+
for i, variant in enumerate(any_of)
|
|
221
|
+
]
|
|
222
|
+
|
|
223
|
+
# Intersections - process each entry
|
|
224
|
+
all_of = json_schema.get("allOf")
|
|
225
|
+
if isinstance(all_of, list):
|
|
226
|
+
if len(all_of) == 1:
|
|
227
|
+
# Flatten single-element allOf
|
|
228
|
+
json_schema.update(
|
|
229
|
+
_ensure_strict_json_schema(
|
|
230
|
+
all_of[0], path=(*path, "allOf", "0"), root=root
|
|
231
|
+
)
|
|
232
|
+
)
|
|
233
|
+
json_schema.pop("allOf")
|
|
234
|
+
else:
|
|
235
|
+
json_schema["allOf"] = [
|
|
236
|
+
_ensure_strict_json_schema(
|
|
237
|
+
entry, path=(*path, "allOf", str(i)), root=root
|
|
238
|
+
)
|
|
239
|
+
for i, entry in enumerate(all_of)
|
|
240
|
+
]
|
|
241
|
+
|
|
242
|
+
# Remove None defaults (redundant with nullable)
|
|
243
|
+
if "default" in json_schema and json_schema["default"] is None:
|
|
244
|
+
json_schema.pop("default")
|
|
245
|
+
|
|
246
|
+
# Expand $refs that are mixed with other properties
|
|
247
|
+
ref = json_schema.get("$ref")
|
|
248
|
+
if ref and has_more_than_n_keys(json_schema, 1):
|
|
249
|
+
if not isinstance(ref, str):
|
|
250
|
+
raise ValueError(f"Received non-string $ref - {ref}")
|
|
251
|
+
|
|
252
|
+
resolved = resolve_ref(root=root, ref=ref)
|
|
253
|
+
if not is_dict(resolved):
|
|
254
|
+
raise ValueError(
|
|
255
|
+
f"Expected `$ref: {ref}` to resolve to a dictionary but got {resolved}"
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
# Properties from json_schema take priority over $ref
|
|
259
|
+
json_schema.update({**resolved, **json_schema})
|
|
260
|
+
json_schema.pop("$ref")
|
|
261
|
+
|
|
262
|
+
# Re-process the expanded schema
|
|
263
|
+
return _ensure_strict_json_schema(json_schema, path=path, root=root)
|
|
264
|
+
|
|
265
|
+
return json_schema
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def _move_constraints_to_description(
|
|
269
|
+
json_schema: dict[str, Any],
|
|
270
|
+
constraint_keys: list[str],
|
|
271
|
+
) -> dict[str, Any]:
|
|
272
|
+
"""Move unsupported constraints to the description field.
|
|
273
|
+
|
|
274
|
+
This helps the model follow constraints even when they can't be enforced
|
|
275
|
+
by the grammar.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
json_schema: The schema to modify
|
|
279
|
+
constraint_keys: List of constraint keys to move to description
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
The modified schema
|
|
283
|
+
"""
|
|
284
|
+
constraints_found = {}
|
|
285
|
+
|
|
286
|
+
for key in constraint_keys:
|
|
287
|
+
if key in json_schema:
|
|
288
|
+
constraints_found[key] = json_schema.pop(key)
|
|
289
|
+
|
|
290
|
+
if constraints_found:
|
|
291
|
+
description = json_schema.get("description", "")
|
|
292
|
+
constraint_str = ", ".join(
|
|
293
|
+
f"{key}: {value}" for key, value in constraints_found.items()
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
if description:
|
|
297
|
+
json_schema["description"] = f"{description}\n\n{{{constraint_str}}}"
|
|
298
|
+
else:
|
|
299
|
+
json_schema["description"] = f"{{{constraint_str}}}"
|
|
300
|
+
|
|
301
|
+
return json_schema
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def transform_schema_for_openai(schema: dict[str, Any]) -> dict[str, Any]:
|
|
305
|
+
"""Return a deep copy of the schema for OpenAI requests.
|
|
306
|
+
|
|
307
|
+
OpenAI Structured Outputs currently support the standard constraints we
|
|
308
|
+
rely on (min/max length, numeric bounds, etc.), so we intentionally leave
|
|
309
|
+
the schema untouched apart from copying it to prevent downstream mutation.
|
|
310
|
+
"""
|
|
311
|
+
|
|
312
|
+
return copy.deepcopy(schema)
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def _transform_schema_recursive_anthropic(
|
|
316
|
+
json_schema: dict[str, Any],
|
|
317
|
+
root: dict[str, Any],
|
|
318
|
+
) -> dict[str, Any]:
|
|
319
|
+
"""Recursively strip unsupported constraints for Anthropic."""
|
|
320
|
+
if not is_dict(json_schema):
|
|
321
|
+
return json_schema
|
|
322
|
+
|
|
323
|
+
# Process $defs
|
|
324
|
+
if "$defs" in json_schema and is_dict(json_schema["$defs"]):
|
|
325
|
+
for def_name, def_schema in json_schema["$defs"].items():
|
|
326
|
+
if is_dict(def_schema):
|
|
327
|
+
_transform_schema_recursive_anthropic(def_schema, root)
|
|
328
|
+
|
|
329
|
+
# Process definitions
|
|
330
|
+
if "definitions" in json_schema and is_dict(json_schema["definitions"]):
|
|
331
|
+
for def_name, def_schema in json_schema["definitions"].items():
|
|
332
|
+
if is_dict(def_schema):
|
|
333
|
+
_transform_schema_recursive_anthropic(def_schema, root)
|
|
334
|
+
|
|
335
|
+
typ = json_schema.get("type")
|
|
336
|
+
|
|
337
|
+
# Handle unsupported constraints based on type
|
|
338
|
+
if typ == "string":
|
|
339
|
+
_move_constraints_to_description(
|
|
340
|
+
json_schema,
|
|
341
|
+
["minLength", "maxLength", "pattern"],
|
|
342
|
+
)
|
|
343
|
+
elif typ in ("number", "integer"):
|
|
344
|
+
_move_constraints_to_description(
|
|
345
|
+
json_schema,
|
|
346
|
+
[
|
|
347
|
+
"minimum",
|
|
348
|
+
"maximum",
|
|
349
|
+
"exclusiveMinimum",
|
|
350
|
+
"exclusiveMaximum",
|
|
351
|
+
"multipleOf",
|
|
352
|
+
],
|
|
353
|
+
)
|
|
354
|
+
elif typ == "array":
|
|
355
|
+
_move_constraints_to_description(
|
|
356
|
+
json_schema,
|
|
357
|
+
[
|
|
358
|
+
"minItems",
|
|
359
|
+
"maxItems",
|
|
360
|
+
],
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
# Recursively process nested schemas
|
|
364
|
+
if "properties" in json_schema and is_dict(json_schema["properties"]):
|
|
365
|
+
for prop_name, prop_schema in json_schema["properties"].items():
|
|
366
|
+
if is_dict(prop_schema):
|
|
367
|
+
_transform_schema_recursive_anthropic(prop_schema, root)
|
|
368
|
+
|
|
369
|
+
if "items" in json_schema and is_dict(json_schema["items"]):
|
|
370
|
+
_transform_schema_recursive_anthropic(json_schema["items"], root)
|
|
371
|
+
|
|
372
|
+
if "anyOf" in json_schema and isinstance(json_schema["anyOf"], list):
|
|
373
|
+
for variant in json_schema["anyOf"]:
|
|
374
|
+
if is_dict(variant):
|
|
375
|
+
_transform_schema_recursive_anthropic(variant, root)
|
|
376
|
+
|
|
377
|
+
if "allOf" in json_schema and isinstance(json_schema["allOf"], list):
|
|
378
|
+
for entry in json_schema["allOf"]:
|
|
379
|
+
if is_dict(entry):
|
|
380
|
+
_transform_schema_recursive_anthropic(entry, root)
|
|
381
|
+
|
|
382
|
+
return json_schema
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
def transform_schema_for_anthropic(schema: dict[str, Any]) -> dict[str, Any]:
|
|
386
|
+
"""Transform a JSON schema for Anthropic's structured output requirements."""
|
|
387
|
+
|
|
388
|
+
schema_copy = copy.deepcopy(schema)
|
|
389
|
+
return _transform_schema_recursive_anthropic(schema_copy, schema_copy)
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
def get_json_schema(obj: Type["BaseModel"] | dict[str, Any]) -> dict[str, Any]:
|
|
393
|
+
"""Get JSON schema from a Pydantic model or dict.
|
|
394
|
+
|
|
395
|
+
This is a convenience function that handles both Pydantic models
|
|
396
|
+
and raw dictionaries.
|
|
397
|
+
|
|
398
|
+
Args:
|
|
399
|
+
obj: Either a Pydantic BaseModel class or a dict
|
|
400
|
+
|
|
401
|
+
Returns:
|
|
402
|
+
The JSON schema dict
|
|
403
|
+
"""
|
|
404
|
+
if is_pydantic_model(obj):
|
|
405
|
+
# Type narrowing: if is_pydantic_model returns True, obj must have model_json_schema
|
|
406
|
+
return obj.model_json_schema() # type: ignore
|
|
407
|
+
elif is_dict(obj):
|
|
408
|
+
return obj # type: ignore
|
|
409
|
+
else:
|
|
410
|
+
raise TypeError(
|
|
411
|
+
f"Expected Pydantic BaseModel or dict, got {type(obj).__name__}"
|
|
412
|
+
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lm_deluge-0.0.74 → lm_deluge-0.0.75}/src/lm_deluge/built_in_tools/anthropic/computer_use.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|