nous-genai 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nous/__init__.py +3 -0
- nous/genai/__init__.py +56 -0
- nous/genai/__main__.py +3 -0
- nous/genai/_internal/__init__.py +1 -0
- nous/genai/_internal/capability_rules.py +476 -0
- nous/genai/_internal/config.py +102 -0
- nous/genai/_internal/errors.py +63 -0
- nous/genai/_internal/http.py +951 -0
- nous/genai/_internal/json_schema.py +54 -0
- nous/genai/cli.py +1316 -0
- nous/genai/client.py +719 -0
- nous/genai/mcp_cli.py +275 -0
- nous/genai/mcp_server.py +1080 -0
- nous/genai/providers/__init__.py +15 -0
- nous/genai/providers/aliyun.py +535 -0
- nous/genai/providers/anthropic.py +483 -0
- nous/genai/providers/gemini.py +1606 -0
- nous/genai/providers/openai.py +1909 -0
- nous/genai/providers/tuzi.py +1158 -0
- nous/genai/providers/volcengine.py +273 -0
- nous/genai/reference/__init__.py +17 -0
- nous/genai/reference/catalog.py +206 -0
- nous/genai/reference/mappings.py +467 -0
- nous/genai/reference/mode_overrides.py +26 -0
- nous/genai/reference/model_catalog.py +82 -0
- nous/genai/reference/model_catalog_data/__init__.py +1 -0
- nous/genai/reference/model_catalog_data/aliyun.py +98 -0
- nous/genai/reference/model_catalog_data/anthropic.py +10 -0
- nous/genai/reference/model_catalog_data/google.py +45 -0
- nous/genai/reference/model_catalog_data/openai.py +44 -0
- nous/genai/reference/model_catalog_data/tuzi_anthropic.py +21 -0
- nous/genai/reference/model_catalog_data/tuzi_google.py +19 -0
- nous/genai/reference/model_catalog_data/tuzi_openai.py +75 -0
- nous/genai/reference/model_catalog_data/tuzi_web.py +136 -0
- nous/genai/reference/model_catalog_data/volcengine.py +107 -0
- nous/genai/tools/__init__.py +13 -0
- nous/genai/tools/output_parser.py +119 -0
- nous/genai/types.py +416 -0
- nous/py.typed +1 -0
- nous_genai-0.1.0.dist-info/METADATA +200 -0
- nous_genai-0.1.0.dist-info/RECORD +45 -0
- nous_genai-0.1.0.dist-info/WHEEL +5 -0
- nous_genai-0.1.0.dist-info/entry_points.txt +4 -0
- nous_genai-0.1.0.dist-info/licenses/LICENSE +190 -0
- nous_genai-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,483 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Any, Iterator, Literal
|
|
7
|
+
from uuid import uuid4
|
|
8
|
+
|
|
9
|
+
from .._internal.capability_rules import claude_input_modalities
|
|
10
|
+
from .._internal.errors import (
|
|
11
|
+
invalid_request_error,
|
|
12
|
+
not_supported_error,
|
|
13
|
+
provider_error,
|
|
14
|
+
)
|
|
15
|
+
from .._internal.http import download_to_tempfile, request_json, request_stream_json_sse
|
|
16
|
+
from ..types import (
|
|
17
|
+
Capability,
|
|
18
|
+
GenerateEvent,
|
|
19
|
+
GenerateRequest,
|
|
20
|
+
GenerateResponse,
|
|
21
|
+
Message,
|
|
22
|
+
Part,
|
|
23
|
+
PartSourceBytes,
|
|
24
|
+
PartSourcePath,
|
|
25
|
+
PartSourceUrl,
|
|
26
|
+
Usage,
|
|
27
|
+
bytes_to_base64,
|
|
28
|
+
detect_mime_type,
|
|
29
|
+
file_to_bytes,
|
|
30
|
+
normalize_reasoning_effort,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
_ANTHROPIC_DEFAULT_BASE_URL = "https://api.anthropic.com"
|
|
35
|
+
|
|
36
|
+
_INLINE_BYTES_LIMIT = 20 * 1024 * 1024
|
|
37
|
+
|
|
38
|
+
_DEFAULT_VERSION = "2023-06-01"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
_EFFORT_TO_THINKING_BUDGET_TOKENS: dict[str, int] = {
|
|
42
|
+
"minimal": 1_024,
|
|
43
|
+
"low": 1_024,
|
|
44
|
+
"medium": 2_048,
|
|
45
|
+
"high": 4_096,
|
|
46
|
+
"xhigh": 8_192,
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclass(frozen=True, slots=True)
|
|
51
|
+
class AnthropicAdapter:
|
|
52
|
+
api_key: str
|
|
53
|
+
base_url: str = _ANTHROPIC_DEFAULT_BASE_URL
|
|
54
|
+
provider_name: str = "anthropic"
|
|
55
|
+
auth_mode: Literal["x-api-key", "bearer"] = "x-api-key"
|
|
56
|
+
version: str = _DEFAULT_VERSION
|
|
57
|
+
proxy_url: str | None = None
|
|
58
|
+
|
|
59
|
+
def capabilities(self, model_id: str) -> Capability:
|
|
60
|
+
mid = model_id.strip()
|
|
61
|
+
if not mid:
|
|
62
|
+
raise invalid_request_error("model_id must not be empty")
|
|
63
|
+
return Capability(
|
|
64
|
+
input_modalities=claude_input_modalities(mid),
|
|
65
|
+
output_modalities={"text"},
|
|
66
|
+
supports_stream=True,
|
|
67
|
+
supports_job=False,
|
|
68
|
+
supports_tools=True,
|
|
69
|
+
supports_json_schema=False,
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
def list_models(self, *, timeout_ms: int | None = None) -> list[str]:
|
|
73
|
+
"""
|
|
74
|
+
Fetch remote model ids via Anthropic GET /v1/models.
|
|
75
|
+
"""
|
|
76
|
+
url = f"{self.base_url.rstrip('/')}/v1/models"
|
|
77
|
+
obj = request_json(
|
|
78
|
+
method="GET",
|
|
79
|
+
url=url,
|
|
80
|
+
headers=self._headers(),
|
|
81
|
+
timeout_ms=timeout_ms,
|
|
82
|
+
proxy_url=self.proxy_url,
|
|
83
|
+
)
|
|
84
|
+
data = obj.get("data")
|
|
85
|
+
if not isinstance(data, list):
|
|
86
|
+
return []
|
|
87
|
+
out: list[str] = []
|
|
88
|
+
for item in data:
|
|
89
|
+
if not isinstance(item, dict):
|
|
90
|
+
continue
|
|
91
|
+
mid = item.get("id")
|
|
92
|
+
if isinstance(mid, str) and mid:
|
|
93
|
+
out.append(mid)
|
|
94
|
+
return sorted(set(out))
|
|
95
|
+
|
|
96
|
+
def generate(
|
|
97
|
+
self, request: GenerateRequest, *, stream: bool
|
|
98
|
+
) -> GenerateResponse | Iterator[GenerateEvent]:
|
|
99
|
+
if set(request.output.modalities) != {"text"}:
|
|
100
|
+
raise not_supported_error("Anthropic only supports text output in this SDK")
|
|
101
|
+
if request.output.text and (
|
|
102
|
+
request.output.text.format != "text"
|
|
103
|
+
or request.output.text.json_schema is not None
|
|
104
|
+
):
|
|
105
|
+
raise not_supported_error(
|
|
106
|
+
"Anthropic json output is not supported in this SDK"
|
|
107
|
+
)
|
|
108
|
+
if request.params.seed is not None:
|
|
109
|
+
raise not_supported_error("Anthropic does not support seed in this SDK")
|
|
110
|
+
if stream:
|
|
111
|
+
return self._messages_stream(request)
|
|
112
|
+
return self._messages(request)
|
|
113
|
+
|
|
114
|
+
def _headers(self, request: GenerateRequest | None = None) -> dict[str, str]:
|
|
115
|
+
headers: dict[str, str] = {
|
|
116
|
+
"Content-Type": "application/json",
|
|
117
|
+
"anthropic-version": self.version,
|
|
118
|
+
}
|
|
119
|
+
if self.auth_mode == "bearer":
|
|
120
|
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
|
121
|
+
else:
|
|
122
|
+
headers["x-api-key"] = self.api_key
|
|
123
|
+
if request and request.params.idempotency_key:
|
|
124
|
+
headers["Idempotency-Key"] = request.params.idempotency_key
|
|
125
|
+
return headers
|
|
126
|
+
|
|
127
|
+
def _messages(self, request: GenerateRequest) -> GenerateResponse:
|
|
128
|
+
url = f"{self.base_url.rstrip('/')}/v1/messages"
|
|
129
|
+
body = self._messages_body(request, stream=False)
|
|
130
|
+
obj = request_json(
|
|
131
|
+
method="POST",
|
|
132
|
+
url=url,
|
|
133
|
+
headers=self._headers(request),
|
|
134
|
+
json_body=body,
|
|
135
|
+
timeout_ms=request.params.timeout_ms,
|
|
136
|
+
proxy_url=self.proxy_url,
|
|
137
|
+
)
|
|
138
|
+
return self._parse_message(obj, model_id=request.model_id())
|
|
139
|
+
|
|
140
|
+
def _messages_stream(self, request: GenerateRequest) -> Iterator[GenerateEvent]:
|
|
141
|
+
url = f"{self.base_url.rstrip('/')}/v1/messages"
|
|
142
|
+
body = self._messages_body(request, stream=True)
|
|
143
|
+
events = request_stream_json_sse(
|
|
144
|
+
method="POST",
|
|
145
|
+
url=url,
|
|
146
|
+
headers=self._headers(request),
|
|
147
|
+
json_body=body,
|
|
148
|
+
timeout_ms=request.params.timeout_ms,
|
|
149
|
+
proxy_url=self.proxy_url,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
def _iter() -> Iterator[GenerateEvent]:
|
|
153
|
+
for obj in events:
|
|
154
|
+
if isinstance(obj.get("data"), dict):
|
|
155
|
+
obj = obj["data"]
|
|
156
|
+
if not isinstance(obj, dict):
|
|
157
|
+
continue
|
|
158
|
+
if obj.get("type") == "content_block_delta":
|
|
159
|
+
delta = obj.get("delta")
|
|
160
|
+
if isinstance(delta, dict) and delta.get("type") == "text_delta":
|
|
161
|
+
text = delta.get("text")
|
|
162
|
+
if isinstance(text, str) and text:
|
|
163
|
+
yield GenerateEvent(
|
|
164
|
+
type="output.text.delta", data={"delta": text}
|
|
165
|
+
)
|
|
166
|
+
yield GenerateEvent(type="done", data={})
|
|
167
|
+
|
|
168
|
+
return _iter()
|
|
169
|
+
|
|
170
|
+
def _messages_body(
|
|
171
|
+
self, request: GenerateRequest, *, stream: bool
|
|
172
|
+
) -> dict[str, Any]:
|
|
173
|
+
model_id = request.model_id()
|
|
174
|
+
max_tokens = _max_tokens(request)
|
|
175
|
+
system = _extract_system_text(request)
|
|
176
|
+
|
|
177
|
+
messages: list[dict[str, Any]] = []
|
|
178
|
+
for m in request.input:
|
|
179
|
+
if m.role == "system":
|
|
180
|
+
continue
|
|
181
|
+
if m.role not in {"user", "assistant", "tool"}:
|
|
182
|
+
raise not_supported_error(f"Anthropic does not support role: {m.role}")
|
|
183
|
+
if m.role == "user" and any(p.type == "tool_result" for p in m.content):
|
|
184
|
+
raise invalid_request_error(
|
|
185
|
+
"tool_result parts must be sent as role='tool' for Anthropic"
|
|
186
|
+
)
|
|
187
|
+
if m.role == "user" and any(p.type == "tool_call" for p in m.content):
|
|
188
|
+
raise invalid_request_error(
|
|
189
|
+
"tool_call parts are only allowed in assistant messages"
|
|
190
|
+
)
|
|
191
|
+
if m.role == "assistant" and any(
|
|
192
|
+
p.type == "tool_result" for p in m.content
|
|
193
|
+
):
|
|
194
|
+
raise invalid_request_error(
|
|
195
|
+
"tool_result parts must be sent as role='tool' for Anthropic"
|
|
196
|
+
)
|
|
197
|
+
if m.role == "tool" and any(p.type != "tool_result" for p in m.content):
|
|
198
|
+
raise invalid_request_error(
|
|
199
|
+
"tool messages may only contain tool_result parts"
|
|
200
|
+
)
|
|
201
|
+
blocks = [
|
|
202
|
+
_part_to_block(
|
|
203
|
+
p, timeout_ms=request.params.timeout_ms, proxy_url=self.proxy_url
|
|
204
|
+
)
|
|
205
|
+
for p in m.content
|
|
206
|
+
]
|
|
207
|
+
role = "user" if m.role == "tool" else m.role
|
|
208
|
+
messages.append({"role": role, "content": blocks})
|
|
209
|
+
|
|
210
|
+
if not messages:
|
|
211
|
+
raise invalid_request_error(
|
|
212
|
+
"request.input must contain at least one non-system message"
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
body: dict[str, Any] = {
|
|
216
|
+
"model": model_id,
|
|
217
|
+
"max_tokens": max_tokens,
|
|
218
|
+
"messages": messages,
|
|
219
|
+
"stream": stream,
|
|
220
|
+
}
|
|
221
|
+
if system:
|
|
222
|
+
body["system"] = system
|
|
223
|
+
|
|
224
|
+
params = request.params
|
|
225
|
+
if params.temperature is not None:
|
|
226
|
+
body["temperature"] = params.temperature
|
|
227
|
+
if params.top_p is not None:
|
|
228
|
+
body["top_p"] = params.top_p
|
|
229
|
+
if params.stop is not None:
|
|
230
|
+
body["stop_sequences"] = params.stop
|
|
231
|
+
thinking = _thinking_param(request, max_tokens=max_tokens)
|
|
232
|
+
if thinking is not None:
|
|
233
|
+
body["thinking"] = thinking
|
|
234
|
+
|
|
235
|
+
if request.tools:
|
|
236
|
+
tools: list[dict[str, Any]] = []
|
|
237
|
+
for t in request.tools:
|
|
238
|
+
name = t.name.strip()
|
|
239
|
+
if not name:
|
|
240
|
+
raise invalid_request_error("tool.name must be non-empty")
|
|
241
|
+
tool_obj: dict[str, Any] = {
|
|
242
|
+
"name": name,
|
|
243
|
+
"input_schema": t.parameters
|
|
244
|
+
if t.parameters is not None
|
|
245
|
+
else {"type": "object"},
|
|
246
|
+
}
|
|
247
|
+
if isinstance(t.description, str) and t.description.strip():
|
|
248
|
+
tool_obj["description"] = t.description.strip()
|
|
249
|
+
tools.append(tool_obj)
|
|
250
|
+
body["tools"] = tools
|
|
251
|
+
|
|
252
|
+
if request.tool_choice is not None:
|
|
253
|
+
choice = request.tool_choice.normalized()
|
|
254
|
+
if choice.mode in {"required", "tool"} and not request.tools:
|
|
255
|
+
raise invalid_request_error("tool_choice requires request.tools")
|
|
256
|
+
if choice.mode == "required":
|
|
257
|
+
body["tool_choice"] = {"type": "any"}
|
|
258
|
+
elif choice.mode == "tool":
|
|
259
|
+
body["tool_choice"] = {"type": "tool", "name": choice.name}
|
|
260
|
+
else:
|
|
261
|
+
body["tool_choice"] = {"type": choice.mode}
|
|
262
|
+
|
|
263
|
+
opts = request.provider_options.get(self.provider_name)
|
|
264
|
+
if isinstance(opts, dict):
|
|
265
|
+
for k, v in opts.items():
|
|
266
|
+
if k in body:
|
|
267
|
+
raise invalid_request_error(
|
|
268
|
+
f"provider_options cannot override body.{k}"
|
|
269
|
+
)
|
|
270
|
+
body[k] = v
|
|
271
|
+
return body
|
|
272
|
+
|
|
273
|
+
def _parse_message(self, obj: dict[str, Any], *, model_id: str) -> GenerateResponse:
|
|
274
|
+
if isinstance(obj.get("data"), dict):
|
|
275
|
+
obj = obj["data"]
|
|
276
|
+
content = obj.get("content")
|
|
277
|
+
if not isinstance(content, list):
|
|
278
|
+
raise provider_error("anthropic response missing content")
|
|
279
|
+
parts: list[Part] = []
|
|
280
|
+
for item in content:
|
|
281
|
+
if not isinstance(item, dict):
|
|
282
|
+
continue
|
|
283
|
+
typ = item.get("type")
|
|
284
|
+
if typ == "text":
|
|
285
|
+
t = item.get("text")
|
|
286
|
+
if isinstance(t, str):
|
|
287
|
+
parts.append(Part.from_text(t))
|
|
288
|
+
continue
|
|
289
|
+
if typ == "tool_use":
|
|
290
|
+
tool_use_id = item.get("id")
|
|
291
|
+
name = item.get("name")
|
|
292
|
+
tool_input = item.get("input")
|
|
293
|
+
if (
|
|
294
|
+
isinstance(tool_use_id, str)
|
|
295
|
+
and tool_use_id
|
|
296
|
+
and isinstance(name, str)
|
|
297
|
+
and name
|
|
298
|
+
and isinstance(tool_input, dict)
|
|
299
|
+
):
|
|
300
|
+
parts.append(
|
|
301
|
+
Part.tool_call(
|
|
302
|
+
tool_call_id=tool_use_id, name=name, arguments=tool_input
|
|
303
|
+
)
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
usage_obj = obj.get("usage")
|
|
307
|
+
usage = None
|
|
308
|
+
if isinstance(usage_obj, dict):
|
|
309
|
+
usage = Usage(
|
|
310
|
+
input_tokens=usage_obj.get("input_tokens"),
|
|
311
|
+
output_tokens=usage_obj.get("output_tokens"),
|
|
312
|
+
total_tokens=usage_obj.get("input_tokens", 0)
|
|
313
|
+
+ usage_obj.get("output_tokens", 0)
|
|
314
|
+
if isinstance(usage_obj.get("input_tokens"), int)
|
|
315
|
+
and isinstance(usage_obj.get("output_tokens"), int)
|
|
316
|
+
else None,
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
raw_id = obj.get("id")
|
|
320
|
+
resp_id = raw_id if isinstance(raw_id, str) and raw_id else f"sdk_{uuid4().hex}"
|
|
321
|
+
|
|
322
|
+
return GenerateResponse(
|
|
323
|
+
id=resp_id,
|
|
324
|
+
provider=self.provider_name,
|
|
325
|
+
model=f"{self.provider_name}:{model_id}",
|
|
326
|
+
status="completed",
|
|
327
|
+
output=[
|
|
328
|
+
Message(
|
|
329
|
+
role="assistant", content=parts if parts else [Part.from_text("")]
|
|
330
|
+
)
|
|
331
|
+
],
|
|
332
|
+
usage=usage,
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
def _max_tokens(request: GenerateRequest) -> int:
|
|
337
|
+
spec = request.output.text
|
|
338
|
+
if spec and spec.max_output_tokens is not None:
|
|
339
|
+
return max(1, int(spec.max_output_tokens))
|
|
340
|
+
if request.params.max_output_tokens is not None:
|
|
341
|
+
return max(1, int(request.params.max_output_tokens))
|
|
342
|
+
return 1024
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
def _thinking_param(
|
|
346
|
+
request: GenerateRequest, *, max_tokens: int
|
|
347
|
+
) -> dict[str, Any] | None:
|
|
348
|
+
reasoning = request.params.reasoning
|
|
349
|
+
if reasoning is None:
|
|
350
|
+
return None
|
|
351
|
+
if reasoning.effort is None:
|
|
352
|
+
return None
|
|
353
|
+
effort = normalize_reasoning_effort(reasoning.effort)
|
|
354
|
+
if effort == "none":
|
|
355
|
+
return None
|
|
356
|
+
budget = _EFFORT_TO_THINKING_BUDGET_TOKENS[effort]
|
|
357
|
+
if budget >= max_tokens:
|
|
358
|
+
raise invalid_request_error(
|
|
359
|
+
f"Anthropic thinking budget_tokens must be < max_tokens ({budget} >= {max_tokens}); "
|
|
360
|
+
"increase output.text.max_output_tokens or params.max_output_tokens"
|
|
361
|
+
)
|
|
362
|
+
return {"type": "enabled", "budget_tokens": budget}
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
def _extract_system_text(request: GenerateRequest) -> str | None:
|
|
366
|
+
chunks: list[str] = []
|
|
367
|
+
for m in request.input:
|
|
368
|
+
if m.role != "system":
|
|
369
|
+
continue
|
|
370
|
+
for p in m.content:
|
|
371
|
+
if p.type != "text":
|
|
372
|
+
raise not_supported_error("Anthropic system messages only support text")
|
|
373
|
+
t = p.require_text().strip()
|
|
374
|
+
if t:
|
|
375
|
+
chunks.append(t)
|
|
376
|
+
if not chunks:
|
|
377
|
+
return None
|
|
378
|
+
return "\n\n".join(chunks)
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
def _part_to_block(
|
|
382
|
+
part: Part, *, timeout_ms: int | None, proxy_url: str | None
|
|
383
|
+
) -> dict[str, Any]:
|
|
384
|
+
if part.type == "text":
|
|
385
|
+
return {"type": "text", "text": part.require_text()}
|
|
386
|
+
if part.type == "tool_call":
|
|
387
|
+
tool_call_id = part.meta.get("tool_call_id")
|
|
388
|
+
name = part.meta.get("name")
|
|
389
|
+
arguments = part.meta.get("arguments")
|
|
390
|
+
if not isinstance(tool_call_id, str) or not tool_call_id:
|
|
391
|
+
raise invalid_request_error(
|
|
392
|
+
"tool_call.meta.tool_call_id required for Anthropic tool_use"
|
|
393
|
+
)
|
|
394
|
+
if not isinstance(name, str) or not name.strip():
|
|
395
|
+
raise invalid_request_error(
|
|
396
|
+
"tool_call.meta.name must be a non-empty string"
|
|
397
|
+
)
|
|
398
|
+
if not isinstance(arguments, dict):
|
|
399
|
+
raise invalid_request_error(
|
|
400
|
+
"Anthropic tool_call.meta.arguments must be an object"
|
|
401
|
+
)
|
|
402
|
+
return {
|
|
403
|
+
"type": "tool_use",
|
|
404
|
+
"id": tool_call_id,
|
|
405
|
+
"name": name.strip(),
|
|
406
|
+
"input": arguments,
|
|
407
|
+
}
|
|
408
|
+
if part.type == "tool_result":
|
|
409
|
+
tool_call_id = part.meta.get("tool_call_id")
|
|
410
|
+
name = part.meta.get("name")
|
|
411
|
+
result = part.meta.get("result")
|
|
412
|
+
is_error = part.meta.get("is_error")
|
|
413
|
+
if not isinstance(tool_call_id, str) or not tool_call_id:
|
|
414
|
+
raise invalid_request_error(
|
|
415
|
+
"tool_result.meta.tool_call_id required for Anthropic tool_result"
|
|
416
|
+
)
|
|
417
|
+
if not isinstance(name, str) or not name.strip():
|
|
418
|
+
raise invalid_request_error(
|
|
419
|
+
"tool_result.meta.name must be a non-empty string"
|
|
420
|
+
)
|
|
421
|
+
if is_error is not None and not isinstance(is_error, bool):
|
|
422
|
+
raise invalid_request_error("tool_result.meta.is_error must be a bool")
|
|
423
|
+
out = (
|
|
424
|
+
result
|
|
425
|
+
if isinstance(result, str)
|
|
426
|
+
else json.dumps(result, ensure_ascii=False, separators=(",", ":"))
|
|
427
|
+
)
|
|
428
|
+
block: dict[str, Any] = {
|
|
429
|
+
"type": "tool_result",
|
|
430
|
+
"tool_use_id": tool_call_id,
|
|
431
|
+
"content": out,
|
|
432
|
+
}
|
|
433
|
+
if is_error is not None:
|
|
434
|
+
block["is_error"] = is_error
|
|
435
|
+
return block
|
|
436
|
+
if part.type == "image":
|
|
437
|
+
source = part.require_source()
|
|
438
|
+
mime_type = part.mime_type
|
|
439
|
+
if mime_type is None and isinstance(source, PartSourcePath):
|
|
440
|
+
mime_type = detect_mime_type(source.path)
|
|
441
|
+
if not mime_type or not mime_type.startswith("image/"):
|
|
442
|
+
raise invalid_request_error("anthropic image requires image/* mime_type")
|
|
443
|
+
|
|
444
|
+
if isinstance(source, PartSourceUrl):
|
|
445
|
+
tmp = download_to_tempfile(
|
|
446
|
+
url=source.url,
|
|
447
|
+
timeout_ms=timeout_ms,
|
|
448
|
+
max_bytes=_INLINE_BYTES_LIMIT,
|
|
449
|
+
proxy_url=proxy_url,
|
|
450
|
+
)
|
|
451
|
+
try:
|
|
452
|
+
data = file_to_bytes(tmp, _INLINE_BYTES_LIMIT)
|
|
453
|
+
finally:
|
|
454
|
+
try:
|
|
455
|
+
os.unlink(tmp)
|
|
456
|
+
except OSError:
|
|
457
|
+
pass
|
|
458
|
+
data_b64 = bytes_to_base64(data)
|
|
459
|
+
elif isinstance(source, PartSourcePath):
|
|
460
|
+
data = file_to_bytes(source.path, _INLINE_BYTES_LIMIT)
|
|
461
|
+
data_b64 = bytes_to_base64(data)
|
|
462
|
+
elif isinstance(source, PartSourceBytes) and source.encoding == "base64":
|
|
463
|
+
raw_b64 = source.data
|
|
464
|
+
if not isinstance(raw_b64, str) or not raw_b64:
|
|
465
|
+
raise invalid_request_error("image base64 data must be non-empty")
|
|
466
|
+
data_b64 = raw_b64
|
|
467
|
+
else:
|
|
468
|
+
assert isinstance(source, PartSourceBytes)
|
|
469
|
+
raw = source.data
|
|
470
|
+
if not isinstance(raw, bytes):
|
|
471
|
+
raise invalid_request_error("image bytes data must be bytes")
|
|
472
|
+
data = raw
|
|
473
|
+
if len(data) > _INLINE_BYTES_LIMIT:
|
|
474
|
+
raise not_supported_error(
|
|
475
|
+
f"inline bytes too large ({len(data)} > {_INLINE_BYTES_LIMIT})"
|
|
476
|
+
)
|
|
477
|
+
data_b64 = bytes_to_base64(data)
|
|
478
|
+
|
|
479
|
+
return {
|
|
480
|
+
"type": "image",
|
|
481
|
+
"source": {"type": "base64", "media_type": mime_type, "data": data_b64},
|
|
482
|
+
}
|
|
483
|
+
raise not_supported_error(f"Anthropic does not support part type: {part.type}")
|