pydantic-ai-slim 0.7.4__tar.gz → 0.7.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/PKG-INFO +4 -4
- pydantic_ai_slim-0.7.6/pydantic_ai/_otel_messages.py +67 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/agent/__init__.py +11 -4
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/builtin_tools.py +1 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/durable_exec/temporal/_model.py +4 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/messages.py +109 -18
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/__init__.py +27 -9
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/anthropic.py +20 -8
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/bedrock.py +16 -10
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/cohere.py +3 -1
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/function.py +5 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/gemini.py +8 -1
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/google.py +21 -4
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/groq.py +8 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/huggingface.py +8 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/instrumented.py +103 -42
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/mistral.py +8 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/openai.py +80 -36
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/test.py +7 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/__init__.py +1 -1
- pydantic_ai_slim-0.7.6/pydantic_ai/profiles/harmony.py +13 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/openai.py +6 -1
- pydantic_ai_slim-0.7.6/pydantic_ai/profiles/qwen.py +19 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/__init__.py +5 -1
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/anthropic.py +11 -8
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/azure.py +1 -1
- pydantic_ai_slim-0.7.6/pydantic_ai/providers/cerebras.py +96 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/cohere.py +2 -2
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/deepseek.py +4 -4
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/fireworks.py +3 -3
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/github.py +4 -4
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/grok.py +3 -3
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/groq.py +3 -3
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/heroku.py +3 -3
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/mistral.py +3 -3
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/moonshotai.py +3 -6
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/ollama.py +1 -1
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/openrouter.py +4 -4
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/together.py +3 -3
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/vercel.py +4 -4
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/retries.py +154 -42
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pyproject.toml +1 -1
- pydantic_ai_slim-0.7.4/pydantic_ai/profiles/qwen.py +0 -11
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/.gitignore +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/LICENSE +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/README.md +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_agent_graph.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_function_schema.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_mcp.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_run_context.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_thinking_part.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_tool_manager.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/ag_ui.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/agent/abstract.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/agent/wrapper.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/direct.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/durable_exec/__init__.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/durable_exec/temporal/_agent.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/ext/aci.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/ext/langchain.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/mcp_sampling.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/output.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/_json_schema.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/deepseek.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/google.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/groq.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/profiles/moonshotai.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/huggingface.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/run.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/toolsets/__init__.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/toolsets/_dynamic.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/toolsets/abstract.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/toolsets/combined.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/toolsets/deferred.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/toolsets/filtered.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/toolsets/function.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/toolsets/prefixed.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/toolsets/prepared.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/toolsets/renamed.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/toolsets/wrapper.py +0 -0
- {pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/usage.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.7.
|
|
3
|
+
Version: 0.7.6
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
@@ -35,7 +35,7 @@ Requires-Dist: genai-prices>=0.0.22
|
|
|
35
35
|
Requires-Dist: griffe>=1.3.2
|
|
36
36
|
Requires-Dist: httpx>=0.27
|
|
37
37
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
38
|
-
Requires-Dist: pydantic-graph==0.7.
|
|
38
|
+
Requires-Dist: pydantic-graph==0.7.6
|
|
39
39
|
Requires-Dist: pydantic>=2.10
|
|
40
40
|
Requires-Dist: typing-inspection>=0.4.0
|
|
41
41
|
Provides-Extra: a2a
|
|
@@ -57,9 +57,9 @@ Requires-Dist: cohere>=5.16.0; (platform_system != 'Emscripten') and extra == 'c
|
|
|
57
57
|
Provides-Extra: duckduckgo
|
|
58
58
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
59
59
|
Provides-Extra: evals
|
|
60
|
-
Requires-Dist: pydantic-evals==0.7.
|
|
60
|
+
Requires-Dist: pydantic-evals==0.7.6; extra == 'evals'
|
|
61
61
|
Provides-Extra: google
|
|
62
|
-
Requires-Dist: google-genai>=1.
|
|
62
|
+
Requires-Dist: google-genai>=1.31.0; extra == 'google'
|
|
63
63
|
Provides-Extra: groq
|
|
64
64
|
Requires-Dist: groq>=0.25.0; extra == 'groq'
|
|
65
65
|
Provides-Extra: huggingface
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""Type definitions of OpenTelemetry GenAI spec message parts.
|
|
2
|
+
|
|
3
|
+
Based on https://github.com/lmolkova/semantic-conventions/blob/eccd1f806e426a32c98271c3ce77585492d26de2/docs/gen-ai/non-normative/models.ipynb
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Literal
|
|
9
|
+
|
|
10
|
+
from pydantic import JsonValue
|
|
11
|
+
from typing_extensions import NotRequired, TypeAlias, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class TextPart(TypedDict):
|
|
15
|
+
type: Literal['text']
|
|
16
|
+
content: NotRequired[str]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ToolCallPart(TypedDict):
|
|
20
|
+
type: Literal['tool_call']
|
|
21
|
+
id: str
|
|
22
|
+
name: str
|
|
23
|
+
arguments: NotRequired[JsonValue]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ToolCallResponsePart(TypedDict):
|
|
27
|
+
type: Literal['tool_call_response']
|
|
28
|
+
id: str
|
|
29
|
+
name: str
|
|
30
|
+
result: NotRequired[JsonValue]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class MediaUrlPart(TypedDict):
|
|
34
|
+
type: Literal['image-url', 'audio-url', 'video-url', 'document-url']
|
|
35
|
+
url: NotRequired[str]
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class BinaryDataPart(TypedDict):
|
|
39
|
+
type: Literal['binary']
|
|
40
|
+
media_type: str
|
|
41
|
+
content: NotRequired[str]
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class ThinkingPart(TypedDict):
|
|
45
|
+
type: Literal['thinking']
|
|
46
|
+
content: NotRequired[str]
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
MessagePart: TypeAlias = 'TextPart | ToolCallPart | ToolCallResponsePart | MediaUrlPart | BinaryDataPart | ThinkingPart'
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
Role = Literal['system', 'user', 'assistant']
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ChatMessage(TypedDict):
|
|
56
|
+
role: Role
|
|
57
|
+
parts: list[MessagePart]
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
InputMessages: TypeAlias = list[ChatMessage]
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class OutputMessage(ChatMessage):
|
|
64
|
+
finish_reason: NotRequired[str]
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
OutputMessages: TypeAlias = list[OutputMessage]
|
|
@@ -677,16 +677,23 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
677
677
|
def _run_span_end_attributes(
|
|
678
678
|
self, state: _agent_graph.GraphAgentState, usage: _usage.RunUsage, settings: InstrumentationSettings
|
|
679
679
|
):
|
|
680
|
+
if settings.version == 1:
|
|
681
|
+
attr_name = 'all_messages_events'
|
|
682
|
+
value = [
|
|
683
|
+
InstrumentedModel.event_to_dict(e) for e in settings.messages_to_otel_events(state.message_history)
|
|
684
|
+
]
|
|
685
|
+
else:
|
|
686
|
+
attr_name = 'pydantic_ai.all_messages'
|
|
687
|
+
value = settings.messages_to_otel_messages(state.message_history)
|
|
688
|
+
|
|
680
689
|
return {
|
|
681
690
|
**usage.opentelemetry_attributes(),
|
|
682
|
-
|
|
683
|
-
[InstrumentedModel.event_to_dict(e) for e in settings.messages_to_otel_events(state.message_history)]
|
|
684
|
-
),
|
|
691
|
+
attr_name: json.dumps(value),
|
|
685
692
|
'logfire.json_schema': json.dumps(
|
|
686
693
|
{
|
|
687
694
|
'type': 'object',
|
|
688
695
|
'properties': {
|
|
689
|
-
|
|
696
|
+
attr_name: {'type': 'array'},
|
|
690
697
|
'final_result': {'type': 'object'},
|
|
691
698
|
},
|
|
692
699
|
}
|
{pydantic_ai_slim-0.7.4 → pydantic_ai_slim-0.7.6}/pydantic_ai/durable_exec/temporal/_model.py
RENAMED
|
@@ -55,6 +55,10 @@ class TemporalStreamedResponse(StreamedResponse):
|
|
|
55
55
|
def model_name(self) -> str:
|
|
56
56
|
return self.response.model_name or '' # pragma: no cover
|
|
57
57
|
|
|
58
|
+
@property
|
|
59
|
+
def provider_name(self) -> str:
|
|
60
|
+
return self.response.provider_name or '' # pragma: no cover
|
|
61
|
+
|
|
58
62
|
@property
|
|
59
63
|
def timestamp(self) -> datetime:
|
|
60
64
|
return self.response.timestamp # pragma: no cover
|
|
@@ -10,10 +10,11 @@ from typing import TYPE_CHECKING, Annotated, Any, Literal, Union, cast, overload
|
|
|
10
10
|
|
|
11
11
|
import pydantic
|
|
12
12
|
import pydantic_core
|
|
13
|
+
from genai_prices import calc_price, types as genai_types
|
|
13
14
|
from opentelemetry._events import Event # pyright: ignore[reportPrivateImportUsage]
|
|
14
15
|
from typing_extensions import TypeAlias, deprecated
|
|
15
16
|
|
|
16
|
-
from . import _utils
|
|
17
|
+
from . import _otel_messages, _utils
|
|
17
18
|
from ._utils import (
|
|
18
19
|
generate_tool_call_id as _generate_tool_call_id,
|
|
19
20
|
now_utc as _now_utc,
|
|
@@ -82,6 +83,9 @@ class SystemPromptPart:
|
|
|
82
83
|
body={'role': 'system', **({'content': self.content} if settings.include_content else {})},
|
|
83
84
|
)
|
|
84
85
|
|
|
86
|
+
def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_messages.MessagePart]:
|
|
87
|
+
return [_otel_messages.TextPart(type='text', **{'content': self.content} if settings.include_content else {})]
|
|
88
|
+
|
|
85
89
|
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
86
90
|
|
|
87
91
|
|
|
@@ -504,25 +508,41 @@ class UserPromptPart:
|
|
|
504
508
|
"""Part type identifier, this is available on all parts as a discriminator."""
|
|
505
509
|
|
|
506
510
|
def otel_event(self, settings: InstrumentationSettings) -> Event:
|
|
507
|
-
content
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
for part in
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
content.append({'kind': part.kind, **({'url': part.url} if settings.include_content else {})})
|
|
517
|
-
elif isinstance(part, BinaryContent):
|
|
518
|
-
converted_part = {'kind': part.kind, 'media_type': part.media_type}
|
|
519
|
-
if settings.include_content and settings.include_binary_content:
|
|
520
|
-
converted_part['binary_content'] = base64.b64encode(part.data).decode()
|
|
521
|
-
content.append(converted_part)
|
|
522
|
-
else:
|
|
523
|
-
content.append({'kind': part.kind}) # pragma: no cover
|
|
511
|
+
content = [{'kind': part.pop('type'), **part} for part in self.otel_message_parts(settings)]
|
|
512
|
+
for part in content:
|
|
513
|
+
if part['kind'] == 'binary' and 'content' in part:
|
|
514
|
+
part['binary_content'] = part.pop('content')
|
|
515
|
+
content = [
|
|
516
|
+
part['content'] if part == {'kind': 'text', 'content': part.get('content')} else part for part in content
|
|
517
|
+
]
|
|
518
|
+
if content in ([{'kind': 'text'}], [self.content]):
|
|
519
|
+
content = content[0]
|
|
524
520
|
return Event('gen_ai.user.message', body={'content': content, 'role': 'user'})
|
|
525
521
|
|
|
522
|
+
def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_messages.MessagePart]:
|
|
523
|
+
parts: list[_otel_messages.MessagePart] = []
|
|
524
|
+
content: Sequence[UserContent] = [self.content] if isinstance(self.content, str) else self.content
|
|
525
|
+
for part in content:
|
|
526
|
+
if isinstance(part, str):
|
|
527
|
+
parts.append(
|
|
528
|
+
_otel_messages.TextPart(type='text', **({'content': part} if settings.include_content else {}))
|
|
529
|
+
)
|
|
530
|
+
elif isinstance(part, (ImageUrl, AudioUrl, DocumentUrl, VideoUrl)):
|
|
531
|
+
parts.append(
|
|
532
|
+
_otel_messages.MediaUrlPart(
|
|
533
|
+
type=part.kind,
|
|
534
|
+
**{'url': part.url} if settings.include_content else {},
|
|
535
|
+
)
|
|
536
|
+
)
|
|
537
|
+
elif isinstance(part, BinaryContent):
|
|
538
|
+
converted_part = _otel_messages.BinaryDataPart(type='binary', media_type=part.media_type)
|
|
539
|
+
if settings.include_content and settings.include_binary_content:
|
|
540
|
+
converted_part['content'] = base64.b64encode(part.data).decode()
|
|
541
|
+
parts.append(converted_part)
|
|
542
|
+
else:
|
|
543
|
+
parts.append({'type': part.kind}) # pragma: no cover
|
|
544
|
+
return parts
|
|
545
|
+
|
|
526
546
|
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
527
547
|
|
|
528
548
|
|
|
@@ -576,6 +596,18 @@ class BaseToolReturnPart:
|
|
|
576
596
|
},
|
|
577
597
|
)
|
|
578
598
|
|
|
599
|
+
def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_messages.MessagePart]:
|
|
600
|
+
from .models.instrumented import InstrumentedModel
|
|
601
|
+
|
|
602
|
+
return [
|
|
603
|
+
_otel_messages.ToolCallResponsePart(
|
|
604
|
+
type='tool_call_response',
|
|
605
|
+
id=self.tool_call_id,
|
|
606
|
+
name=self.tool_name,
|
|
607
|
+
**({'result': InstrumentedModel.serialize_any(self.content)} if settings.include_content else {}),
|
|
608
|
+
)
|
|
609
|
+
]
|
|
610
|
+
|
|
579
611
|
def has_content(self) -> bool:
|
|
580
612
|
"""Return `True` if the tool return has content."""
|
|
581
613
|
return self.content is not None # pragma: no cover
|
|
@@ -669,6 +701,19 @@ class RetryPromptPart:
|
|
|
669
701
|
},
|
|
670
702
|
)
|
|
671
703
|
|
|
704
|
+
def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_messages.MessagePart]:
|
|
705
|
+
if self.tool_name is None:
|
|
706
|
+
return [_otel_messages.TextPart(type='text', content=self.model_response())]
|
|
707
|
+
else:
|
|
708
|
+
return [
|
|
709
|
+
_otel_messages.ToolCallResponsePart(
|
|
710
|
+
type='tool_call_response',
|
|
711
|
+
id=self.tool_call_id,
|
|
712
|
+
name=self.tool_name,
|
|
713
|
+
**({'result': self.model_response()} if settings.include_content else {}),
|
|
714
|
+
)
|
|
715
|
+
]
|
|
716
|
+
|
|
672
717
|
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
673
718
|
|
|
674
719
|
|
|
@@ -848,6 +893,9 @@ class ModelResponse:
|
|
|
848
893
|
kind: Literal['response'] = 'response'
|
|
849
894
|
"""Message type identifier, this is available on all parts as a discriminator."""
|
|
850
895
|
|
|
896
|
+
provider_name: str | None = None
|
|
897
|
+
"""The name of the LLM provider that generated the response."""
|
|
898
|
+
|
|
851
899
|
provider_details: dict[str, Any] | None = field(default=None)
|
|
852
900
|
"""Additional provider-specific details in a serializable format.
|
|
853
901
|
|
|
@@ -858,6 +906,19 @@ class ModelResponse:
|
|
|
858
906
|
provider_request_id: str | None = None
|
|
859
907
|
"""request ID as specified by the model provider. This can be used to track the specific request to the model."""
|
|
860
908
|
|
|
909
|
+
def price(self) -> genai_types.PriceCalculation:
|
|
910
|
+
"""Calculate the price of the usage.
|
|
911
|
+
|
|
912
|
+
Uses [`genai-prices`](https://github.com/pydantic/genai-prices).
|
|
913
|
+
"""
|
|
914
|
+
assert self.model_name, 'Model name is required to calculate price'
|
|
915
|
+
return calc_price(
|
|
916
|
+
self.usage,
|
|
917
|
+
self.model_name,
|
|
918
|
+
provider_id=self.provider_name,
|
|
919
|
+
genai_request_timestamp=self.timestamp,
|
|
920
|
+
)
|
|
921
|
+
|
|
861
922
|
def otel_events(self, settings: InstrumentationSettings) -> list[Event]:
|
|
862
923
|
"""Return OpenTelemetry events for the response."""
|
|
863
924
|
result: list[Event] = []
|
|
@@ -894,6 +955,36 @@ class ModelResponse:
|
|
|
894
955
|
|
|
895
956
|
return result
|
|
896
957
|
|
|
958
|
+
def otel_message_parts(self, settings: InstrumentationSettings) -> list[_otel_messages.MessagePart]:
|
|
959
|
+
parts: list[_otel_messages.MessagePart] = []
|
|
960
|
+
for part in self.parts:
|
|
961
|
+
if isinstance(part, TextPart):
|
|
962
|
+
parts.append(
|
|
963
|
+
_otel_messages.TextPart(
|
|
964
|
+
type='text',
|
|
965
|
+
**({'content': part.content} if settings.include_content else {}),
|
|
966
|
+
)
|
|
967
|
+
)
|
|
968
|
+
elif isinstance(part, ThinkingPart):
|
|
969
|
+
parts.append(
|
|
970
|
+
_otel_messages.ThinkingPart(
|
|
971
|
+
type='thinking',
|
|
972
|
+
**({'content': part.content} if settings.include_content else {}),
|
|
973
|
+
)
|
|
974
|
+
)
|
|
975
|
+
elif isinstance(part, ToolCallPart):
|
|
976
|
+
call_part = _otel_messages.ToolCallPart(type='tool_call', id=part.tool_call_id, name=part.tool_name)
|
|
977
|
+
if settings.include_content and part.args is not None:
|
|
978
|
+
from .models.instrumented import InstrumentedModel
|
|
979
|
+
|
|
980
|
+
if isinstance(part.args, str):
|
|
981
|
+
call_part['arguments'] = part.args
|
|
982
|
+
else:
|
|
983
|
+
call_part['arguments'] = {k: InstrumentedModel.serialize_any(v) for k, v in part.args.items()}
|
|
984
|
+
|
|
985
|
+
parts.append(call_part)
|
|
986
|
+
return parts
|
|
987
|
+
|
|
897
988
|
@property
|
|
898
989
|
@deprecated('`vendor_details` is deprecated, use `provider_details` instead')
|
|
899
990
|
def vendor_details(self) -> dict[str, Any] | None:
|
|
@@ -111,6 +111,15 @@ KnownModelName = TypeAliasType(
|
|
|
111
111
|
'bedrock:mistral.mixtral-8x7b-instruct-v0:1',
|
|
112
112
|
'bedrock:mistral.mistral-large-2402-v1:0',
|
|
113
113
|
'bedrock:mistral.mistral-large-2407-v1:0',
|
|
114
|
+
'cerebras:gpt-oss-120b',
|
|
115
|
+
'cerebras:llama3.1-8b',
|
|
116
|
+
'cerebras:llama-3.3-70b',
|
|
117
|
+
'cerebras:llama-4-scout-17b-16e-instruct',
|
|
118
|
+
'cerebras:llama-4-maverick-17b-128e-instruct',
|
|
119
|
+
'cerebras:qwen-3-235b-a22b-instruct-2507',
|
|
120
|
+
'cerebras:qwen-3-32b',
|
|
121
|
+
'cerebras:qwen-3-coder-480b',
|
|
122
|
+
'cerebras:qwen-3-235b-a22b-thinking-2507',
|
|
114
123
|
'claude-3-5-haiku-20241022',
|
|
115
124
|
'claude-3-5-haiku-latest',
|
|
116
125
|
'claude-3-5-sonnet-20240620',
|
|
@@ -598,6 +607,7 @@ class StreamedResponse(ABC):
|
|
|
598
607
|
model_name=self.model_name,
|
|
599
608
|
timestamp=self.timestamp,
|
|
600
609
|
usage=self.usage(),
|
|
610
|
+
provider_name=self.provider_name,
|
|
601
611
|
)
|
|
602
612
|
|
|
603
613
|
def usage(self) -> RequestUsage:
|
|
@@ -610,6 +620,12 @@ class StreamedResponse(ABC):
|
|
|
610
620
|
"""Get the model name of the response."""
|
|
611
621
|
raise NotImplementedError()
|
|
612
622
|
|
|
623
|
+
@property
|
|
624
|
+
@abstractmethod
|
|
625
|
+
def provider_name(self) -> str | None:
|
|
626
|
+
"""Get the provider name."""
|
|
627
|
+
raise NotImplementedError()
|
|
628
|
+
|
|
613
629
|
@property
|
|
614
630
|
@abstractmethod
|
|
615
631
|
def timestamp(self) -> datetime:
|
|
@@ -688,21 +704,23 @@ def infer_model(model: Model | KnownModelName | str) -> Model: # noqa: C901
|
|
|
688
704
|
|
|
689
705
|
return CohereModel(model_name, provider=provider)
|
|
690
706
|
elif provider in (
|
|
691
|
-
'openai',
|
|
692
|
-
'deepseek',
|
|
693
707
|
'azure',
|
|
694
|
-
'
|
|
695
|
-
'
|
|
708
|
+
'deepseek',
|
|
709
|
+
'cerebras',
|
|
710
|
+
'fireworks',
|
|
711
|
+
'github',
|
|
696
712
|
'grok',
|
|
713
|
+
'heroku',
|
|
697
714
|
'moonshotai',
|
|
698
|
-
'
|
|
715
|
+
'openai',
|
|
716
|
+
'openai-chat',
|
|
717
|
+
'openrouter',
|
|
699
718
|
'together',
|
|
700
|
-
'
|
|
701
|
-
'github',
|
|
719
|
+
'vercel',
|
|
702
720
|
):
|
|
703
|
-
from .openai import
|
|
721
|
+
from .openai import OpenAIChatModel
|
|
704
722
|
|
|
705
|
-
return
|
|
723
|
+
return OpenAIChatModel(model_name, provider=provider)
|
|
706
724
|
elif provider == 'openai-responses':
|
|
707
725
|
from .openai import OpenAIResponsesModel
|
|
708
726
|
|
|
@@ -37,12 +37,13 @@ from ..messages import (
|
|
|
37
37
|
)
|
|
38
38
|
from ..profiles import ModelProfileSpec
|
|
39
39
|
from ..providers import Provider, infer_provider
|
|
40
|
+
from ..providers.anthropic import AsyncAnthropicClient
|
|
40
41
|
from ..settings import ModelSettings
|
|
41
42
|
from ..tools import ToolDefinition
|
|
42
43
|
from . import Model, ModelRequestParameters, StreamedResponse, check_allow_model_requests, download_item, get_user_agent
|
|
43
44
|
|
|
44
45
|
try:
|
|
45
|
-
from anthropic import NOT_GIVEN, APIStatusError,
|
|
46
|
+
from anthropic import NOT_GIVEN, APIStatusError, AsyncStream
|
|
46
47
|
from anthropic.types.beta import (
|
|
47
48
|
BetaBase64PDFBlockParam,
|
|
48
49
|
BetaBase64PDFSourceParam,
|
|
@@ -134,16 +135,16 @@ class AnthropicModel(Model):
|
|
|
134
135
|
Apart from `__init__`, all methods are private or match those of the base class.
|
|
135
136
|
"""
|
|
136
137
|
|
|
137
|
-
client:
|
|
138
|
+
client: AsyncAnthropicClient = field(repr=False)
|
|
138
139
|
|
|
139
140
|
_model_name: AnthropicModelName = field(repr=False)
|
|
140
|
-
_provider: Provider[
|
|
141
|
+
_provider: Provider[AsyncAnthropicClient] = field(repr=False)
|
|
141
142
|
|
|
142
143
|
def __init__(
|
|
143
144
|
self,
|
|
144
145
|
model_name: AnthropicModelName,
|
|
145
146
|
*,
|
|
146
|
-
provider: Literal['anthropic'] | Provider[
|
|
147
|
+
provider: Literal['anthropic'] | Provider[AsyncAnthropicClient] = 'anthropic',
|
|
147
148
|
profile: ModelProfileSpec | None = None,
|
|
148
149
|
settings: ModelSettings | None = None,
|
|
149
150
|
):
|
|
@@ -153,7 +154,7 @@ class AnthropicModel(Model):
|
|
|
153
154
|
model_name: The name of the Anthropic model to use. List of model names available
|
|
154
155
|
[here](https://docs.anthropic.com/en/docs/about-claude/models).
|
|
155
156
|
provider: The provider to use for the Anthropic API. Can be either the string 'anthropic' or an
|
|
156
|
-
instance of `Provider[
|
|
157
|
+
instance of `Provider[AsyncAnthropicClient]`. If not provided, the other parameters will be used.
|
|
157
158
|
profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
|
|
158
159
|
settings: Default model settings for this model instance.
|
|
159
160
|
"""
|
|
@@ -326,7 +327,11 @@ class AnthropicModel(Model):
|
|
|
326
327
|
)
|
|
327
328
|
|
|
328
329
|
return ModelResponse(
|
|
329
|
-
items,
|
|
330
|
+
items,
|
|
331
|
+
usage=_map_usage(response),
|
|
332
|
+
model_name=response.model,
|
|
333
|
+
provider_request_id=response.id,
|
|
334
|
+
provider_name=self._provider.name,
|
|
330
335
|
)
|
|
331
336
|
|
|
332
337
|
async def _process_streamed_response(
|
|
@@ -344,6 +349,7 @@ class AnthropicModel(Model):
|
|
|
344
349
|
_model_name=self._model_name,
|
|
345
350
|
_response=peekable_response,
|
|
346
351
|
_timestamp=timestamp,
|
|
352
|
+
_provider_name=self._provider.name,
|
|
347
353
|
)
|
|
348
354
|
|
|
349
355
|
def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[BetaToolParam]:
|
|
@@ -469,9 +475,9 @@ class AnthropicModel(Model):
|
|
|
469
475
|
anthropic_messages.append(BetaMessageParam(role='assistant', content=assistant_content_params))
|
|
470
476
|
else:
|
|
471
477
|
assert_never(m)
|
|
472
|
-
system_prompt = '\n\n'.join(system_prompt_parts)
|
|
473
478
|
if instructions := self._get_instructions(messages):
|
|
474
|
-
|
|
479
|
+
system_prompt_parts.insert(0, instructions)
|
|
480
|
+
system_prompt = '\n\n'.join(system_prompt_parts)
|
|
475
481
|
return system_prompt, anthropic_messages
|
|
476
482
|
|
|
477
483
|
@staticmethod
|
|
@@ -574,6 +580,7 @@ class AnthropicStreamedResponse(StreamedResponse):
|
|
|
574
580
|
_model_name: AnthropicModelName
|
|
575
581
|
_response: AsyncIterable[BetaRawMessageStreamEvent]
|
|
576
582
|
_timestamp: datetime
|
|
583
|
+
_provider_name: str
|
|
577
584
|
|
|
578
585
|
async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901
|
|
579
586
|
current_block: BetaContentBlock | None = None
|
|
@@ -655,6 +662,11 @@ class AnthropicStreamedResponse(StreamedResponse):
|
|
|
655
662
|
"""Get the model name of the response."""
|
|
656
663
|
return self._model_name
|
|
657
664
|
|
|
665
|
+
@property
|
|
666
|
+
def provider_name(self) -> str:
|
|
667
|
+
"""Get the provider name."""
|
|
668
|
+
return self._provider_name
|
|
669
|
+
|
|
658
670
|
@property
|
|
659
671
|
def timestamp(self) -> datetime:
|
|
660
672
|
"""Get the timestamp of the response."""
|
|
@@ -240,10 +240,7 @@ class BedrockConverseModel(Model):
|
|
|
240
240
|
|
|
241
241
|
@staticmethod
|
|
242
242
|
def _map_tool_definition(f: ToolDefinition) -> ToolTypeDef:
|
|
243
|
-
tool_spec: ToolSpecificationTypeDef = {
|
|
244
|
-
'name': f.name,
|
|
245
|
-
'inputSchema': {'json': f.parameters_json_schema},
|
|
246
|
-
}
|
|
243
|
+
tool_spec: ToolSpecificationTypeDef = {'name': f.name, 'inputSchema': {'json': f.parameters_json_schema}}
|
|
247
244
|
|
|
248
245
|
if f.description: # pragma: no branch
|
|
249
246
|
tool_spec['description'] = f.description
|
|
@@ -275,6 +272,7 @@ class BedrockConverseModel(Model):
|
|
|
275
272
|
model_request_parameters=model_request_parameters,
|
|
276
273
|
_model_name=self.model_name,
|
|
277
274
|
_event_stream=response,
|
|
275
|
+
_provider_name=self._provider.name,
|
|
278
276
|
)
|
|
279
277
|
|
|
280
278
|
async def _process_response(self, response: ConverseResponseTypeDef) -> ModelResponse:
|
|
@@ -304,7 +302,9 @@ class BedrockConverseModel(Model):
|
|
|
304
302
|
output_tokens=response['usage']['outputTokens'],
|
|
305
303
|
)
|
|
306
304
|
vendor_id = response.get('ResponseMetadata', {}).get('RequestId', None)
|
|
307
|
-
return ModelResponse(
|
|
305
|
+
return ModelResponse(
|
|
306
|
+
items, usage=u, model_name=self.model_name, provider_request_id=vendor_id, provider_name=self._provider.name
|
|
307
|
+
)
|
|
308
308
|
|
|
309
309
|
@overload
|
|
310
310
|
async def _messages_create(
|
|
@@ -423,7 +423,7 @@ class BedrockConverseModel(Model):
|
|
|
423
423
|
for message in messages:
|
|
424
424
|
if isinstance(message, ModelRequest):
|
|
425
425
|
for part in message.parts:
|
|
426
|
-
if isinstance(part, SystemPromptPart):
|
|
426
|
+
if isinstance(part, SystemPromptPart) and part.content:
|
|
427
427
|
system_prompt.append({'text': part.content})
|
|
428
428
|
elif isinstance(part, UserPromptPart):
|
|
429
429
|
bedrock_messages.extend(await self._map_user_prompt(part, document_count))
|
|
@@ -594,6 +594,7 @@ class BedrockStreamedResponse(StreamedResponse):
|
|
|
594
594
|
|
|
595
595
|
_model_name: BedrockModelName
|
|
596
596
|
_event_stream: EventStream[ConverseStreamOutputTypeDef]
|
|
597
|
+
_provider_name: str
|
|
597
598
|
_timestamp: datetime = field(default_factory=_utils.now_utc)
|
|
598
599
|
|
|
599
600
|
async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901
|
|
@@ -660,15 +661,20 @@ class BedrockStreamedResponse(StreamedResponse):
|
|
|
660
661
|
if maybe_event: # pragma: no branch
|
|
661
662
|
yield maybe_event
|
|
662
663
|
|
|
663
|
-
@property
|
|
664
|
-
def timestamp(self) -> datetime:
|
|
665
|
-
return self._timestamp
|
|
666
|
-
|
|
667
664
|
@property
|
|
668
665
|
def model_name(self) -> str:
|
|
669
666
|
"""Get the model name of the response."""
|
|
670
667
|
return self._model_name
|
|
671
668
|
|
|
669
|
+
@property
|
|
670
|
+
def provider_name(self) -> str:
|
|
671
|
+
"""Get the provider name."""
|
|
672
|
+
return self._provider_name
|
|
673
|
+
|
|
674
|
+
@property
|
|
675
|
+
def timestamp(self) -> datetime:
|
|
676
|
+
return self._timestamp
|
|
677
|
+
|
|
672
678
|
def _map_usage(self, metadata: ConverseStreamMetadataEventTypeDef) -> usage.RequestUsage:
|
|
673
679
|
return usage.RequestUsage(
|
|
674
680
|
input_tokens=metadata['usage']['inputTokens'],
|
|
@@ -205,7 +205,9 @@ class CohereModel(Model):
|
|
|
205
205
|
tool_call_id=c.id or _generate_tool_call_id(),
|
|
206
206
|
)
|
|
207
207
|
)
|
|
208
|
-
return ModelResponse(
|
|
208
|
+
return ModelResponse(
|
|
209
|
+
parts=parts, usage=_map_usage(response), model_name=self._model_name, provider_name=self._provider.name
|
|
210
|
+
)
|
|
209
211
|
|
|
210
212
|
def _map_messages(self, messages: list[ModelMessage]) -> list[ChatMessageV2]:
|
|
211
213
|
"""Just maps a `pydantic_ai.Message` to a `cohere.ChatMessageV2`."""
|
|
@@ -304,6 +304,11 @@ class FunctionStreamedResponse(StreamedResponse):
|
|
|
304
304
|
"""Get the model name of the response."""
|
|
305
305
|
return self._model_name
|
|
306
306
|
|
|
307
|
+
@property
|
|
308
|
+
def provider_name(self) -> None:
|
|
309
|
+
"""Get the provider name."""
|
|
310
|
+
return None
|
|
311
|
+
|
|
307
312
|
@property
|
|
308
313
|
def timestamp(self) -> datetime:
|
|
309
314
|
"""Get the timestamp of the response."""
|
|
@@ -305,6 +305,7 @@ class GeminiModel(Model):
|
|
|
305
305
|
_model_name=self._model_name,
|
|
306
306
|
_content=content,
|
|
307
307
|
_stream=aiter_bytes,
|
|
308
|
+
_provider_name=self._provider.name,
|
|
308
309
|
)
|
|
309
310
|
|
|
310
311
|
async def _message_to_gemini_content(
|
|
@@ -425,6 +426,7 @@ class GeminiStreamedResponse(StreamedResponse):
|
|
|
425
426
|
_model_name: GeminiModelName
|
|
426
427
|
_content: bytearray
|
|
427
428
|
_stream: AsyncIterator[bytes]
|
|
429
|
+
_provider_name: str
|
|
428
430
|
_timestamp: datetime = field(default_factory=_utils.now_utc, init=False)
|
|
429
431
|
|
|
430
432
|
async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
|
|
@@ -495,6 +497,11 @@ class GeminiStreamedResponse(StreamedResponse):
|
|
|
495
497
|
"""Get the model name of the response."""
|
|
496
498
|
return self._model_name
|
|
497
499
|
|
|
500
|
+
@property
|
|
501
|
+
def provider_name(self) -> str:
|
|
502
|
+
"""Get the provider name."""
|
|
503
|
+
return self._provider_name
|
|
504
|
+
|
|
498
505
|
@property
|
|
499
506
|
def timestamp(self) -> datetime:
|
|
500
507
|
"""Get the timestamp of the response."""
|
|
@@ -883,7 +890,7 @@ def _metadata_as_usage(response: _GeminiResponse) -> usage.RequestUsage:
|
|
|
883
890
|
|
|
884
891
|
return usage.RequestUsage(
|
|
885
892
|
input_tokens=metadata.get('prompt_token_count', 0),
|
|
886
|
-
output_tokens=metadata.get('candidates_token_count', 0),
|
|
893
|
+
output_tokens=metadata.get('candidates_token_count', 0) + thoughts_token_count,
|
|
887
894
|
cache_read_tokens=cached_content_token_count,
|
|
888
895
|
input_audio_tokens=input_audio_tokens,
|
|
889
896
|
output_audio_tokens=output_audio_tokens,
|