nous-genai 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. nous/__init__.py +3 -0
  2. nous/genai/__init__.py +56 -0
  3. nous/genai/__main__.py +3 -0
  4. nous/genai/_internal/__init__.py +1 -0
  5. nous/genai/_internal/capability_rules.py +476 -0
  6. nous/genai/_internal/config.py +102 -0
  7. nous/genai/_internal/errors.py +63 -0
  8. nous/genai/_internal/http.py +951 -0
  9. nous/genai/_internal/json_schema.py +54 -0
  10. nous/genai/cli.py +1316 -0
  11. nous/genai/client.py +719 -0
  12. nous/genai/mcp_cli.py +275 -0
  13. nous/genai/mcp_server.py +1080 -0
  14. nous/genai/providers/__init__.py +15 -0
  15. nous/genai/providers/aliyun.py +535 -0
  16. nous/genai/providers/anthropic.py +483 -0
  17. nous/genai/providers/gemini.py +1606 -0
  18. nous/genai/providers/openai.py +1909 -0
  19. nous/genai/providers/tuzi.py +1158 -0
  20. nous/genai/providers/volcengine.py +273 -0
  21. nous/genai/reference/__init__.py +17 -0
  22. nous/genai/reference/catalog.py +206 -0
  23. nous/genai/reference/mappings.py +467 -0
  24. nous/genai/reference/mode_overrides.py +26 -0
  25. nous/genai/reference/model_catalog.py +82 -0
  26. nous/genai/reference/model_catalog_data/__init__.py +1 -0
  27. nous/genai/reference/model_catalog_data/aliyun.py +98 -0
  28. nous/genai/reference/model_catalog_data/anthropic.py +10 -0
  29. nous/genai/reference/model_catalog_data/google.py +45 -0
  30. nous/genai/reference/model_catalog_data/openai.py +44 -0
  31. nous/genai/reference/model_catalog_data/tuzi_anthropic.py +21 -0
  32. nous/genai/reference/model_catalog_data/tuzi_google.py +19 -0
  33. nous/genai/reference/model_catalog_data/tuzi_openai.py +75 -0
  34. nous/genai/reference/model_catalog_data/tuzi_web.py +136 -0
  35. nous/genai/reference/model_catalog_data/volcengine.py +107 -0
  36. nous/genai/tools/__init__.py +13 -0
  37. nous/genai/tools/output_parser.py +119 -0
  38. nous/genai/types.py +416 -0
  39. nous/py.typed +1 -0
  40. nous_genai-0.1.0.dist-info/METADATA +200 -0
  41. nous_genai-0.1.0.dist-info/RECORD +45 -0
  42. nous_genai-0.1.0.dist-info/WHEEL +5 -0
  43. nous_genai-0.1.0.dist-info/entry_points.txt +4 -0
  44. nous_genai-0.1.0.dist-info/licenses/LICENSE +190 -0
  45. nous_genai-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,45 @@
1
+ from __future__ import annotations
2
+
3
+ MODELS: list[str] = [
4
+ "aqa",
5
+ "deep-research-pro-preview-12-2025",
6
+ "gemini-2.0-flash",
7
+ "gemini-2.0-flash-lite",
8
+ "gemini-2.5-computer-use-preview-10-2025",
9
+ "gemini-2.5-flash",
10
+ "gemini-2.5-flash-lite",
11
+ "gemini-2.5-flash-native-audio-latest",
12
+ "gemini-2.5-pro",
13
+ "gemini-3-flash-preview",
14
+ "gemini-3-pro-preview",
15
+ "gemini-exp-1206",
16
+ "gemini-flash-latest",
17
+ "gemini-flash-lite-latest",
18
+ "gemini-pro-latest",
19
+ "gemini-robotics-er-1.5-preview",
20
+ "gemma-3-12b-it",
21
+ "gemma-3-1b-it",
22
+ "gemma-3-27b-it",
23
+ "gemma-3-4b-it",
24
+ "gemma-3n-e2b-it",
25
+ "gemma-3n-e4b-it",
26
+ "gemini-2.5-flash-image",
27
+ "gemini-3-pro-image-preview",
28
+ "imagen-4.0-fast-generate-001",
29
+ "imagen-4.0-generate-001",
30
+ "imagen-4.0-generate-preview-06-06",
31
+ "imagen-4.0-ultra-generate-001",
32
+ "veo-2.0-generate-001",
33
+ "veo-3.0-fast-generate-001",
34
+ "veo-3.0-generate-001",
35
+ "veo-3.1-fast-generate-preview",
36
+ "veo-3.1-generate-preview",
37
+ "gemini-2.5-flash-preview-tts",
38
+ "gemini-2.5-pro-preview-tts",
39
+ "embedding-001",
40
+ "embedding-gecko-001",
41
+ "gemini-embedding-001",
42
+ "gemini-embedding-exp",
43
+ "gemini-embedding-exp-03-07",
44
+ "text-embedding-004",
45
+ ]
@@ -0,0 +1,44 @@
1
+ from __future__ import annotations
2
+
3
+ MODELS: list[str] = [
4
+ "gpt-4.1",
5
+ "gpt-4.1-mini",
6
+ "gpt-4.1-nano",
7
+ "gpt-4o",
8
+ "gpt-4o-mini",
9
+ "chatgpt-4o-latest",
10
+ "gpt-5",
11
+ "gpt-5-codex",
12
+ "gpt-5-mini",
13
+ "gpt-5-nano",
14
+ "gpt-5-pro",
15
+ "gpt-5.1",
16
+ "gpt-5.1-codex",
17
+ "gpt-5.1-codex-max",
18
+ "gpt-5.1-codex-mini",
19
+ "gpt-5.2",
20
+ "gpt-5.2-pro",
21
+ "o1",
22
+ "o1-pro",
23
+ "o3",
24
+ "o3-mini",
25
+ "o4-mini",
26
+ "gpt-4o-mini-transcribe",
27
+ "gpt-4o-transcribe",
28
+ "gpt-4o-transcribe-diarize",
29
+ "whisper-1",
30
+ "dall-e-2",
31
+ "dall-e-3",
32
+ "gpt-image-1",
33
+ "gpt-image-1-mini",
34
+ "gpt-image-1.5",
35
+ "chatgpt-image-latest",
36
+ "sora-2",
37
+ "sora-2-pro",
38
+ "gpt-4o-mini-tts",
39
+ "tts-1",
40
+ "tts-1-hd",
41
+ "text-embedding-3-large",
42
+ "text-embedding-3-small",
43
+ "text-embedding-ada-002",
44
+ ]
@@ -0,0 +1,21 @@
1
+ from __future__ import annotations
2
+
3
+ MODELS: list[str] = [
4
+ "claude-instant-1.2",
5
+ "claude-3-5-haiku-latest",
6
+ "claude-3-5-sonnet-latest",
7
+ "claude-3-7-sonnet-latest",
8
+ "claude-3-7-sonnet-latest-thinking",
9
+ "claude-sonnet-4-0",
10
+ "claude-sonnet-4-0-thinking",
11
+ "claude-opus-4-0",
12
+ "claude-opus-4-0-thinking",
13
+ "claude-opus-4-1",
14
+ "claude-haiku-4-5",
15
+ "claude-sonnet-4-5",
16
+ "claude-opus-4-5",
17
+ "Cursor-c4",
18
+ "Cursor-c4-thinking",
19
+ "Cursor-co4",
20
+ "Cursor-co4-thinking",
21
+ ]
@@ -0,0 +1,19 @@
1
+ from __future__ import annotations
2
+
3
+ MODELS: list[str] = [
4
+ "gemini-2.5-flash",
5
+ "gemini-2.5-flash-lite",
6
+ "gemini-2.5-flash-nothink",
7
+ "gemini-2.5-flash-search",
8
+ "gemini-2.5-pro",
9
+ "gemini-2.5-pro-search",
10
+ "gemini-3-flash-preview",
11
+ "gemini-3-pro-preview",
12
+ "gemini-3-pro-preview-search",
13
+ "gemini-2.0-flash-exp-image-generation",
14
+ "gemini-2.0-flash-preview-image-generation",
15
+ "gemini-2.5-flash-image",
16
+ "gemini-3-pro-image-preview",
17
+ "gemini-embedding-001",
18
+ "gemini-embedding-exp-03-07",
19
+ ]
@@ -0,0 +1,75 @@
1
+ from __future__ import annotations
2
+
3
+ MODELS: list[str] = [
4
+ "computer-use-preview",
5
+ "gpt-3.5-turbo",
6
+ "gpt-3.5-turbo-16k",
7
+ "gpt-3.5-turbo-instruct",
8
+ "gpt-4",
9
+ "gpt-4-32k",
10
+ "gpt-4-turbo",
11
+ "gpt-4-vision-preview",
12
+ "gpt-4.1",
13
+ "gpt-4.1-mini",
14
+ "gpt-4.1-nano",
15
+ "gpt-4.5-preview",
16
+ "gpt-4o",
17
+ "gpt-4o-audio-preview",
18
+ "gpt-4o-mini",
19
+ "gpt-4o-mini-audio-preview",
20
+ "gpt-4o-mini-realtime-preview",
21
+ "gpt-4o-mini-search-preview",
22
+ "gpt-4o-realtime-preview",
23
+ "gpt-4o-search-preview",
24
+ "gpt-5",
25
+ "gpt-5-codex",
26
+ "gpt-5-mini",
27
+ "gpt-5-nano",
28
+ "gpt-5-pro",
29
+ "gpt-5.1",
30
+ "gpt-5.1-codex",
31
+ "gpt-5.1-codex-max",
32
+ "gpt-5.1-codex-mini",
33
+ "gpt-5.2",
34
+ "gpt-5.2-pro",
35
+ "gpt-oss-120b",
36
+ "GPT-OSS-20B",
37
+ "MiniMax-Text-01",
38
+ "o1",
39
+ "o1-mini",
40
+ "o1-pro",
41
+ "o3",
42
+ "o3-mini",
43
+ "o3-mini-high",
44
+ "o3-mini-low",
45
+ "o3-mini-medium",
46
+ "o3-pro",
47
+ "o4-mini",
48
+ "openai/gpt-oss-120b",
49
+ "openai/gpt-oss-20b",
50
+ "distil-whisper-large-v3-en",
51
+ "gpt-4o-mini-transcribe",
52
+ "gpt-4o-transcribe",
53
+ "gpt-4o-transcribe-diarize",
54
+ "whisper-1",
55
+ "whisper-large-v3",
56
+ "whisper-large-v3-turbo",
57
+ "dall-e-2",
58
+ "dall-e-3",
59
+ "FLUX.1-Kontext-pro",
60
+ "gpt-4o-image",
61
+ "gpt-image-1",
62
+ "gpt-image-1.5",
63
+ "sora-2",
64
+ "sora-2-pro",
65
+ "gpt-4o-mini-tts",
66
+ "tts-1",
67
+ "tts-1-hd",
68
+ "doubao-embedding-large-text-240915",
69
+ "doubao-embedding-text-240715",
70
+ "text-embedding-004",
71
+ "text-embedding-3-large",
72
+ "text-embedding-3-small",
73
+ "text-embedding-ada-002",
74
+ "text-embedding-v4",
75
+ ]
@@ -0,0 +1,136 @@
1
+ from __future__ import annotations
2
+
3
+ MODELS: list[str] = [
4
+ "claude-haiku-4-5-20251001",
5
+ "claude-haiku-4-5-20251001-thinking",
6
+ "claude-sonnet-4-5-20250929",
7
+ "claude-sonnet-4-5-20250929-thinking",
8
+ "claude-opus-4-5-20251101",
9
+ "claude-opus-4-5-20251101-thinking",
10
+ "deepseek-chat",
11
+ "deepseek-coder",
12
+ "deepseek-r1",
13
+ "deepseek-r1-all",
14
+ "deepseek-r1-search",
15
+ "deepseek-r1-search-pro",
16
+ "deepseek-r1-search-pro-thinking",
17
+ "deepseek-search",
18
+ "deepseek-v3",
19
+ "deepseek-v3.2",
20
+ "deepseek-v3.2-exp-thinking",
21
+ "deepseek-v3.2-thinking",
22
+ "doubao-seed-1-8-251228",
23
+ "doubao-seed-1-8-251228-thinking",
24
+ "gemini-2.5-flash",
25
+ "gemini-2.5-flash-deepsearch",
26
+ "gemini-2.5-pro",
27
+ "gemini-2.5-pro-deepsearch",
28
+ "gemini-3-flash-preview",
29
+ "gemini-3-flash-preview-all",
30
+ "gemini-3-flash-preview-thinking",
31
+ "gemini-3-pro",
32
+ "gemini-3-pro-deepsearch",
33
+ "gemini-3-pro-preview",
34
+ "gemini-3-pro-preview-thinking",
35
+ "gemini-3-pro-thinking",
36
+ "glm-4.5",
37
+ "glm-4.5-air",
38
+ "glm-4.5-airx",
39
+ "glm-4.5-flash",
40
+ "glm-4.5-x",
41
+ "glm-4v",
42
+ "gpt-4o",
43
+ "gpt-4o-mini",
44
+ "gpt-4o-search",
45
+ "gpt-4o-study",
46
+ "gpt-5",
47
+ "gpt-5-mini",
48
+ "gpt-5-nano",
49
+ "gpt-5-pro",
50
+ "gpt-5-thinking-all",
51
+ "gpt-5.1",
52
+ "gpt-5.1-thinking",
53
+ "gpt-5.1-thinking-all",
54
+ "gpt-5.2",
55
+ "gpt-oss-120b",
56
+ "gpt-oss-20b",
57
+ "grok-3",
58
+ "grok-3-deepersearch",
59
+ "grok-3-deepsearch",
60
+ "grok-3-reasoner",
61
+ "grok-3-think",
62
+ "grok-4",
63
+ "grok-4.1",
64
+ "kimi-k2-instruct",
65
+ "o1",
66
+ "o1-mini",
67
+ "o1-pro",
68
+ "o1-pro-all",
69
+ "o3",
70
+ "o3-all",
71
+ "o3-mini",
72
+ "o3-mini-high",
73
+ "o3-pro",
74
+ "o4-mini",
75
+ "qwen2.5-72b-instruct",
76
+ "qwen3-235b-a22b",
77
+ "qwen3-32b",
78
+ "search",
79
+ "search-gpts",
80
+ "search-gpts-chat",
81
+ "dall-e-3",
82
+ "doubao-seedream-4-0-250828",
83
+ "doubao-seedream-4-5-251128",
84
+ "flux-kontext-max",
85
+ "flux-kontext-pro",
86
+ "gemini-2.5-flash-image",
87
+ "gemini-3-pro-image-preview",
88
+ "gemini-3-pro-image-preview-2k",
89
+ "gemini-3-pro-image-preview-4k",
90
+ "gpt-image-1",
91
+ "gpt-image-1.5",
92
+ "kling_image",
93
+ "seededit",
94
+ "seedream-3.0",
95
+ "seedream-4-0-250828",
96
+ "seedream-v4",
97
+ "kling-video-o1",
98
+ "kling-video-o1-edit",
99
+ "kling_video",
100
+ "sora-16:9-480p-10s",
101
+ "sora-16:9-480p-5s",
102
+ "sora-16:9-720p-5s",
103
+ "sora-1:1-480p-10s",
104
+ "sora-1:1-480p-5s",
105
+ "sora-1:1-720p-5s",
106
+ "sora-2",
107
+ "sora-2-character",
108
+ "sora-2-pro",
109
+ "sora-2-pro-character",
110
+ "sora-9:16-480p-10",
111
+ "sora-9:16-480p-5s",
112
+ "sora-9:16-720p-5s",
113
+ "veo2",
114
+ "veo2-fast",
115
+ "veo2-fast-frames",
116
+ "veo2-pro",
117
+ "veo3",
118
+ "veo3-fast",
119
+ "veo3-fast-frames",
120
+ "veo3-frames",
121
+ "veo3-pro",
122
+ "veo3-pro-frames",
123
+ "veo3.1",
124
+ "veo3.1-components",
125
+ "veo3.1-pro",
126
+ "chirp-auk",
127
+ "chirp-bluejay",
128
+ "chirp-crow",
129
+ "chirp-v4",
130
+ "suno-v3",
131
+ "suno_lyrics",
132
+ "sonar-medium-chat",
133
+ "sonar-medium-online",
134
+ "sonar-small-chat",
135
+ "sonar-small-online",
136
+ ]
@@ -0,0 +1,107 @@
1
+ from __future__ import annotations
2
+
3
+ MODELS: list[str] = [
4
+ "deepseek-r1-250120",
5
+ "deepseek-r1-250528",
6
+ "deepseek-r1-distill-qwen-32b-250120",
7
+ "deepseek-r1-distill-qwen-7b-250120",
8
+ "deepseek-v3-1-250821",
9
+ "deepseek-v3-1-terminus",
10
+ "deepseek-v3-2-251201",
11
+ "deepseek-v3-241226",
12
+ "deepseek-v3-250324",
13
+ "doubao-1-5-lite-32k-250115",
14
+ "doubao-1-5-pro-256k-250115",
15
+ "doubao-1-5-pro-32k-250115",
16
+ "doubao-1-5-pro-32k-character-250228",
17
+ "doubao-1-5-pro-32k-character-250715",
18
+ "doubao-1-5-thinking-pro-250415",
19
+ "doubao-1-5-thinking-pro-m-250415",
20
+ "doubao-1-5-thinking-pro-m-250428",
21
+ "doubao-1-5-thinking-vision-pro-250428",
22
+ "doubao-1-5-ui-tars-250428",
23
+ "doubao-1-5-vision-pro-32k-250115",
24
+ "doubao-1.5-ui-tars-250328",
25
+ "doubao-1.5-vision-lite-250315",
26
+ "doubao-1.5-vision-pro-250328",
27
+ "doubao-embedding-vision-241215",
28
+ "doubao-embedding-vision-250328",
29
+ "doubao-embedding-vision-250615",
30
+ "doubao-embedding-vision-251215",
31
+ "doubao-lite-128k-240428",
32
+ "doubao-lite-128k-240828",
33
+ "doubao-lite-32k-240428",
34
+ "doubao-lite-32k-240628",
35
+ "doubao-lite-32k-240828",
36
+ "doubao-lite-32k-character-241015",
37
+ "doubao-lite-32k-character-250228",
38
+ "doubao-lite-4k-240328",
39
+ "doubao-lite-4k-character-240515",
40
+ "doubao-lite-4k-character-240828",
41
+ "doubao-lite-4k-pretrain-character-240516",
42
+ "doubao-pro-128k-240515",
43
+ "doubao-pro-128k-240628",
44
+ "doubao-pro-256k-241115",
45
+ "doubao-pro-32k-240615",
46
+ "doubao-pro-32k-240828",
47
+ "doubao-pro-32k-241215",
48
+ "doubao-pro-32k-browsing-240615",
49
+ "doubao-pro-32k-browsing-240828",
50
+ "doubao-pro-32k-browsing-241115",
51
+ "doubao-pro-32k-character-240528",
52
+ "doubao-pro-32k-character-240828",
53
+ "doubao-pro-32k-character-241215",
54
+ "doubao-pro-32k-functioncall-240515",
55
+ "doubao-pro-32k-functioncall-240815",
56
+ "doubao-pro-32k-functioncall-241028",
57
+ "doubao-pro-32k-functioncall-preview",
58
+ "doubao-pro-4k-240515",
59
+ "doubao-pro-4k-browsing-240524",
60
+ "doubao-pro-4k-character-240515",
61
+ "doubao-pro-4k-character-240728",
62
+ "doubao-pro-4k-functioncall-240515",
63
+ "doubao-pro-4k-functioncall-240615",
64
+ "doubao-seaweed-241128",
65
+ "doubao-seed-1-6-250615",
66
+ "doubao-seed-1-6-251015",
67
+ "doubao-seed-1-6-flash-250615",
68
+ "doubao-seed-1-6-flash-250715",
69
+ "doubao-seed-1-6-flash-250828",
70
+ "doubao-seed-1-6-lite-251015",
71
+ "doubao-seed-1-6-thinking-250615",
72
+ "doubao-seed-1-6-thinking-250715",
73
+ "doubao-seed-1-6-vision-250815",
74
+ "doubao-seed-1-8-251228",
75
+ "doubao-seed-code-preview-251028",
76
+ "doubao-seed-translation-250915",
77
+ "doubao-seed3d-1-0-250928",
78
+ "doubao-seedance-1-0-pro-250528",
79
+ "doubao-seedance-1-0-pro-fast-251015",
80
+ "doubao-seedance-1-5-pro-251215",
81
+ "doubao-seededit-3-0-i2i-250628",
82
+ "doubao-smart-router-250928",
83
+ "doubao-vision-lite-32k-241015",
84
+ "doubao-vision-pro-32k-241028",
85
+ "glm-4-5-air-20250728",
86
+ "kimi-k2-250711",
87
+ "kimi-k2-250905",
88
+ "kimi-k2-thinking-251104",
89
+ "mistral-7b-instruct-v0.2",
90
+ "qwen2-5-72b-20240919",
91
+ "qwen3-0-6b-20250429",
92
+ "qwen3-14b-20250429",
93
+ "qwen3-32b-20250429",
94
+ "qwen3-8b-20250429",
95
+ "wan2-1-14b-flf2v-250417",
96
+ "wan2-1-14b-i2v-250225",
97
+ "wan2-1-14b-t2v-250225",
98
+ "doubao-seedream-3-0-t2i-250415",
99
+ "doubao-seedream-4-0-250828",
100
+ "doubao-seedream-4-5-251128",
101
+ "doubao-seedance-1-0-lite-i2v-250428",
102
+ "doubao-seedance-1-0-lite-t2v-250428",
103
+ "doubao-embedding-large-text-240915",
104
+ "doubao-embedding-large-text-250515",
105
+ "doubao-embedding-text-240515",
106
+ "doubao-embedding-text-240715",
107
+ ]
@@ -0,0 +1,13 @@
1
+ from .output_parser import (
2
+ DEFAULT_OUTPUT_PARSER_TOOL_NAME,
3
+ build_output_parser_tool,
4
+ extract_output_from_response,
5
+ parse_output,
6
+ )
7
+
8
+ __all__ = [
9
+ "DEFAULT_OUTPUT_PARSER_TOOL_NAME",
10
+ "build_output_parser_tool",
11
+ "extract_output_from_response",
12
+ "parse_output",
13
+ ]
@@ -0,0 +1,119 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ from .._internal.errors import invalid_request_error, not_supported_error
6
+ from .._internal.json_schema import normalize_json_schema
7
+ from ..client import Client
8
+ from ..types import (
9
+ GenerateRequest,
10
+ GenerateResponse,
11
+ Message,
12
+ OutputSpec,
13
+ Part,
14
+ Tool,
15
+ ToolChoice,
16
+ )
17
+
18
+ DEFAULT_OUTPUT_PARSER_TOOL_NAME = "nous_output_parser"
19
+
20
+ _DEFAULT_WRAPPER_KEY = "output"
21
+
22
+
23
+ def build_output_parser_tool(
24
+ json_schema: Any,
25
+ *,
26
+ name: str = DEFAULT_OUTPUT_PARSER_TOOL_NAME,
27
+ description: str | None = None,
28
+ ) -> Tool:
29
+ """
30
+ Build a provider-agnostic function tool used for "structured outputs".
31
+
32
+ The model is expected to return the final answer by calling this tool with
33
+ a single argument key `"output"` matching the given JSON Schema.
34
+ """
35
+ tool_name = name.strip()
36
+ if not tool_name:
37
+ raise invalid_request_error("output parser tool name must be non-empty")
38
+ schema = normalize_json_schema(json_schema)
39
+ parameters = {
40
+ "type": "object",
41
+ "properties": {_DEFAULT_WRAPPER_KEY: schema},
42
+ "required": [_DEFAULT_WRAPPER_KEY],
43
+ "additionalProperties": False,
44
+ }
45
+ desc = description
46
+ if not isinstance(desc, str) or not desc.strip():
47
+ desc = (
48
+ "Return the final result by calling this tool with a single JSON argument "
49
+ f"`{_DEFAULT_WRAPPER_KEY}` matching the schema. Do not output extra text."
50
+ )
51
+ return Tool(name=tool_name, description=desc, parameters=parameters, strict=True)
52
+
53
+
54
+ def extract_output_from_response(
55
+ response: GenerateResponse,
56
+ *,
57
+ tool_name: str = DEFAULT_OUTPUT_PARSER_TOOL_NAME,
58
+ ) -> Any:
59
+ """
60
+ Extract the structured output object from a provider response that contains
61
+ a tool_call part for `tool_name`.
62
+ """
63
+ name = tool_name.strip()
64
+ if not name:
65
+ raise invalid_request_error("tool_name must be non-empty")
66
+
67
+ for msg in response.output:
68
+ for part in msg.content:
69
+ if part.type != "tool_call":
70
+ continue
71
+ if part.meta.get("name") != name:
72
+ continue
73
+ arguments = part.meta.get("arguments")
74
+ if not isinstance(arguments, dict):
75
+ raise invalid_request_error(
76
+ "output parser tool_call arguments must be an object"
77
+ )
78
+ if _DEFAULT_WRAPPER_KEY not in arguments:
79
+ raise invalid_request_error(
80
+ f"output parser tool_call missing '{_DEFAULT_WRAPPER_KEY}'"
81
+ )
82
+ return arguments[_DEFAULT_WRAPPER_KEY]
83
+
84
+ raise invalid_request_error(f"missing output parser tool_call: {name}")
85
+
86
+
87
+ def parse_output(
88
+ client: Client,
89
+ *,
90
+ model: str,
91
+ text: str,
92
+ json_schema: Any,
93
+ tool_name: str = DEFAULT_OUTPUT_PARSER_TOOL_NAME,
94
+ ) -> Any:
95
+ """
96
+ Parse plain text into a structured object by forcing a single tool call.
97
+ """
98
+ if not isinstance(text, str) or not text.strip():
99
+ raise invalid_request_error("text must be a non-empty string")
100
+ cap = client.capabilities(model)
101
+ if not cap.supports_tools:
102
+ raise not_supported_error("this model does not support tools")
103
+
104
+ tool = build_output_parser_tool(json_schema, name=tool_name)
105
+ prompt = (
106
+ "Convert the following text into structured output by calling the tool. "
107
+ "Call the tool only; do not output any other text.\n\n" + text.strip()
108
+ )
109
+ req = GenerateRequest(
110
+ model=model,
111
+ input=[Message(role="user", content=[Part.from_text(prompt)])],
112
+ output=OutputSpec(modalities=["text"]),
113
+ tools=[tool],
114
+ tool_choice=ToolChoice(mode="tool", name=tool.name),
115
+ )
116
+ resp = client.generate(req)
117
+ if not isinstance(resp, GenerateResponse):
118
+ raise not_supported_error("streaming responses are not supported")
119
+ return extract_output_from_response(resp, tool_name=tool.name)