pydantic-ai-slim 0.0.31__tar.gz → 0.0.32__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/PKG-INFO +3 -4
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/_agent_graph.py +39 -38
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/agent.py +24 -21
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/__init__.py +4 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/anthropic.py +3 -1
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/gemini.py +1 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/instrumented.py +25 -27
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/result.py +19 -27
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pyproject.toml +3 -4
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/.gitignore +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/README.md +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/_pydantic.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/_result.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/messages.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/mistral.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/openai.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/vertexai.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.0.31 → pydantic_ai_slim-0.0.32}/pydantic_ai/usage.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.32
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -28,12 +28,11 @@ Requires-Dist: eval-type-backport>=0.2.0
|
|
|
28
28
|
Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
|
-
Requires-Dist: logfire-api>=1.2.0
|
|
32
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.0.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.0.32
|
|
34
33
|
Requires-Dist: pydantic>=2.10
|
|
35
34
|
Provides-Extra: anthropic
|
|
36
|
-
Requires-Dist: anthropic>=0.
|
|
35
|
+
Requires-Dist: anthropic>=0.49.0; extra == 'anthropic'
|
|
37
36
|
Provides-Extra: cohere
|
|
38
37
|
Requires-Dist: cohere>=5.13.11; extra == 'cohere'
|
|
39
38
|
Provides-Extra: duckduckgo
|
|
@@ -2,13 +2,14 @@ from __future__ import annotations as _annotations
|
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import dataclasses
|
|
5
|
+
import json
|
|
5
6
|
from collections.abc import AsyncIterator, Iterator, Sequence
|
|
6
7
|
from contextlib import asynccontextmanager, contextmanager
|
|
7
8
|
from contextvars import ContextVar
|
|
8
9
|
from dataclasses import field
|
|
9
10
|
from typing import Any, Generic, Literal, Union, cast
|
|
10
11
|
|
|
11
|
-
import
|
|
12
|
+
from opentelemetry.trace import Span, Tracer
|
|
12
13
|
from typing_extensions import TypeGuard, TypeVar, assert_never
|
|
13
14
|
|
|
14
15
|
from pydantic_graph import BaseNode, Graph, GraphRunContext
|
|
@@ -42,17 +43,6 @@ __all__ = (
|
|
|
42
43
|
'capture_run_messages',
|
|
43
44
|
)
|
|
44
45
|
|
|
45
|
-
_logfire = logfire_api.Logfire(otel_scope='pydantic-ai')
|
|
46
|
-
|
|
47
|
-
# while waiting for https://github.com/pydantic/logfire/issues/745
|
|
48
|
-
try:
|
|
49
|
-
import logfire._internal.stack_info
|
|
50
|
-
except ImportError:
|
|
51
|
-
pass
|
|
52
|
-
else:
|
|
53
|
-
from pathlib import Path
|
|
54
|
-
|
|
55
|
-
logfire._internal.stack_info.NON_USER_CODE_PREFIXES += (str(Path(__file__).parent.absolute()),)
|
|
56
46
|
|
|
57
47
|
T = TypeVar('T')
|
|
58
48
|
S = TypeVar('S')
|
|
@@ -105,7 +95,8 @@ class GraphAgentDeps(Generic[DepsT, ResultDataT]):
|
|
|
105
95
|
|
|
106
96
|
function_tools: dict[str, Tool[DepsT]] = dataclasses.field(repr=False)
|
|
107
97
|
|
|
108
|
-
run_span:
|
|
98
|
+
run_span: Span
|
|
99
|
+
tracer: Tracer
|
|
109
100
|
|
|
110
101
|
|
|
111
102
|
class AgentNode(BaseNode[GraphAgentState, GraphAgentDeps[DepsT, Any], result.FinalResult[NodeRunEndT]]):
|
|
@@ -330,7 +321,9 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
330
321
|
ctx.state.run_step += 1
|
|
331
322
|
|
|
332
323
|
model_settings = merge_model_settings(ctx.deps.model_settings, None)
|
|
333
|
-
with
|
|
324
|
+
with ctx.deps.tracer.start_as_current_span(
|
|
325
|
+
'preparing model request params', attributes=dict(run_step=ctx.state.run_step)
|
|
326
|
+
):
|
|
334
327
|
model_request_parameters = await _prepare_request_parameters(ctx)
|
|
335
328
|
return model_settings, model_request_parameters
|
|
336
329
|
|
|
@@ -380,26 +373,12 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
380
373
|
self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
|
|
381
374
|
) -> AsyncIterator[AsyncIterator[_messages.HandleResponseEvent]]:
|
|
382
375
|
"""Process the model response and yield events for the start and end of each function tool call."""
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
yield stream
|
|
376
|
+
stream = self._run_stream(ctx)
|
|
377
|
+
yield stream
|
|
386
378
|
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
# Set the next node based on the final state of the stream
|
|
392
|
-
next_node = self._next_node
|
|
393
|
-
if isinstance(next_node, End):
|
|
394
|
-
handle_span.set_attribute('result', next_node.data)
|
|
395
|
-
handle_span.message = 'handle model response -> final result'
|
|
396
|
-
elif tool_responses := self._tool_responses:
|
|
397
|
-
# TODO: We could drop `self._tool_responses` if we drop this set_attribute
|
|
398
|
-
# I'm thinking it might be better to just create a span for the handling of each tool
|
|
399
|
-
# than to set an attribute here.
|
|
400
|
-
handle_span.set_attribute('tool_responses', tool_responses)
|
|
401
|
-
tool_responses_str = ' '.join(r.part_kind for r in tool_responses)
|
|
402
|
-
handle_span.message = f'handle model response -> {tool_responses_str}'
|
|
379
|
+
# Run the stream to completion if it was not finished:
|
|
380
|
+
async for _event in stream:
|
|
381
|
+
pass
|
|
403
382
|
|
|
404
383
|
async def _run_stream(
|
|
405
384
|
self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
|
|
@@ -494,10 +473,29 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
494
473
|
if tool_responses:
|
|
495
474
|
messages.append(_messages.ModelRequest(parts=tool_responses))
|
|
496
475
|
|
|
497
|
-
run_span.
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
476
|
+
run_span.set_attributes(
|
|
477
|
+
{
|
|
478
|
+
**usage.opentelemetry_attributes(),
|
|
479
|
+
'all_messages_events': json.dumps(
|
|
480
|
+
[InstrumentedModel.event_to_dict(e) for e in InstrumentedModel.messages_to_otel_events(messages)]
|
|
481
|
+
),
|
|
482
|
+
'final_result': final_result.data
|
|
483
|
+
if isinstance(final_result.data, str)
|
|
484
|
+
else json.dumps(InstrumentedModel.serialize_any(final_result.data)),
|
|
485
|
+
}
|
|
486
|
+
)
|
|
487
|
+
run_span.set_attributes(
|
|
488
|
+
{
|
|
489
|
+
'logfire.json_schema': json.dumps(
|
|
490
|
+
{
|
|
491
|
+
'type': 'object',
|
|
492
|
+
'properties': {
|
|
493
|
+
'all_messages_events': {'type': 'array'},
|
|
494
|
+
'final_result': {'type': 'object'},
|
|
495
|
+
},
|
|
496
|
+
}
|
|
497
|
+
),
|
|
498
|
+
}
|
|
501
499
|
)
|
|
502
500
|
|
|
503
501
|
# End the run with self.data
|
|
@@ -619,7 +617,10 @@ async def process_function_tools(
|
|
|
619
617
|
|
|
620
618
|
# Run all tool tasks in parallel
|
|
621
619
|
results_by_index: dict[int, _messages.ModelRequestPart] = {}
|
|
622
|
-
|
|
620
|
+
tool_names = [call.tool_name for _, call in calls_to_run]
|
|
621
|
+
with ctx.deps.tracer.start_as_current_span(
|
|
622
|
+
'running tools', attributes={'tools': tool_names, 'logfire.msg': f'running tools: {", ".join(tool_names)}'}
|
|
623
|
+
):
|
|
623
624
|
# TODO: Should we wrap each individual tool call in a dedicated span?
|
|
624
625
|
tasks = [asyncio.create_task(tool.run(call, run_context), name=call.tool_name) for tool, call in calls_to_run]
|
|
625
626
|
pending = tasks
|
|
@@ -8,7 +8,7 @@ from copy import deepcopy
|
|
|
8
8
|
from types import FrameType
|
|
9
9
|
from typing import Any, Callable, Generic, cast, final, overload
|
|
10
10
|
|
|
11
|
-
import
|
|
11
|
+
from opentelemetry.trace import NoOpTracer, use_span
|
|
12
12
|
from typing_extensions import TypeGuard, TypeVar, deprecated
|
|
13
13
|
|
|
14
14
|
from pydantic_graph import End, Graph, GraphRun, GraphRunContext
|
|
@@ -58,17 +58,6 @@ __all__ = (
|
|
|
58
58
|
'UserPromptNode',
|
|
59
59
|
)
|
|
60
60
|
|
|
61
|
-
_logfire = logfire_api.Logfire(otel_scope='pydantic-ai')
|
|
62
|
-
|
|
63
|
-
# while waiting for https://github.com/pydantic/logfire/issues/745
|
|
64
|
-
try:
|
|
65
|
-
import logfire._internal.stack_info
|
|
66
|
-
except ImportError:
|
|
67
|
-
pass
|
|
68
|
-
else:
|
|
69
|
-
from pathlib import Path
|
|
70
|
-
|
|
71
|
-
logfire._internal.stack_info.NON_USER_CODE_PREFIXES += (str(Path(__file__).parent.absolute()),)
|
|
72
61
|
|
|
73
62
|
T = TypeVar('T')
|
|
74
63
|
S = TypeVar('S')
|
|
@@ -123,6 +112,9 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
123
112
|
The type of the result data, used to validate the result data, defaults to `str`.
|
|
124
113
|
"""
|
|
125
114
|
|
|
115
|
+
instrument: bool
|
|
116
|
+
"""Automatically instrument with OpenTelemetry. Will use Logfire if it's configured."""
|
|
117
|
+
|
|
126
118
|
_deps_type: type[AgentDepsT] = dataclasses.field(repr=False)
|
|
127
119
|
_result_tool_name: str = dataclasses.field(repr=False)
|
|
128
120
|
_result_tool_description: str | None = dataclasses.field(repr=False)
|
|
@@ -155,6 +147,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
155
147
|
tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = (),
|
|
156
148
|
defer_model_check: bool = False,
|
|
157
149
|
end_strategy: EndStrategy = 'early',
|
|
150
|
+
instrument: bool = False,
|
|
158
151
|
):
|
|
159
152
|
"""Create an agent.
|
|
160
153
|
|
|
@@ -184,6 +177,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
184
177
|
[override the model][pydantic_ai.Agent.override] for testing.
|
|
185
178
|
end_strategy: Strategy for handling tool calls that are requested alongside a final result.
|
|
186
179
|
See [`EndStrategy`][pydantic_ai.agent.EndStrategy] for more information.
|
|
180
|
+
instrument: Automatically instrument with OpenTelemetry. Will use Logfire if it's configured.
|
|
187
181
|
"""
|
|
188
182
|
if model is None or defer_model_check:
|
|
189
183
|
self.model = model
|
|
@@ -194,6 +188,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
194
188
|
self.name = name
|
|
195
189
|
self.model_settings = model_settings
|
|
196
190
|
self.result_type = result_type
|
|
191
|
+
self.instrument = instrument
|
|
197
192
|
|
|
198
193
|
self._deps_type = deps_type
|
|
199
194
|
|
|
@@ -396,6 +391,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
396
391
|
if infer_name and self.name is None:
|
|
397
392
|
self._infer_name(inspect.currentframe())
|
|
398
393
|
model_used = self._get_model(model)
|
|
394
|
+
del model
|
|
399
395
|
|
|
400
396
|
deps = self._get_deps(deps)
|
|
401
397
|
new_message_index = len(message_history) if message_history else 0
|
|
@@ -425,14 +421,20 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
425
421
|
model_settings = merge_model_settings(self.model_settings, model_settings)
|
|
426
422
|
usage_limits = usage_limits or _usage.UsageLimits()
|
|
427
423
|
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
424
|
+
if isinstance(model_used, InstrumentedModel):
|
|
425
|
+
tracer = model_used.tracer
|
|
426
|
+
else:
|
|
427
|
+
tracer = NoOpTracer()
|
|
428
|
+
agent_name = self.name or 'agent'
|
|
429
|
+
run_span = tracer.start_span(
|
|
430
|
+
'agent run',
|
|
431
|
+
attributes={
|
|
432
|
+
'model_name': model_used.model_name if model_used else 'no-model',
|
|
433
|
+
'agent_name': agent_name,
|
|
434
|
+
'logfire.msg': f'{agent_name} run',
|
|
435
|
+
},
|
|
435
436
|
)
|
|
437
|
+
|
|
436
438
|
graph_deps = _agent_graph.GraphAgentDeps[AgentDepsT, RunResultDataT](
|
|
437
439
|
user_deps=deps,
|
|
438
440
|
prompt=user_prompt,
|
|
@@ -447,6 +449,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
447
449
|
result_validators=result_validators,
|
|
448
450
|
function_tools=self._function_tools,
|
|
449
451
|
run_span=run_span,
|
|
452
|
+
tracer=tracer,
|
|
450
453
|
)
|
|
451
454
|
start_node = _agent_graph.UserPromptNode[AgentDepsT](
|
|
452
455
|
user_prompt=user_prompt,
|
|
@@ -460,7 +463,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
460
463
|
state=state,
|
|
461
464
|
deps=graph_deps,
|
|
462
465
|
infer_name=False,
|
|
463
|
-
span=run_span,
|
|
466
|
+
span=use_span(run_span, end_on_exit=True),
|
|
464
467
|
) as graph_run:
|
|
465
468
|
yield AgentRun(graph_run)
|
|
466
469
|
|
|
@@ -1116,7 +1119,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
1116
1119
|
else:
|
|
1117
1120
|
raise exceptions.UserError('`model` must be set either when creating the agent or when calling it.')
|
|
1118
1121
|
|
|
1119
|
-
if not isinstance(model_, InstrumentedModel):
|
|
1122
|
+
if self.instrument and not isinstance(model_, InstrumentedModel):
|
|
1120
1123
|
model_ = InstrumentedModel(model_)
|
|
1121
1124
|
|
|
1122
1125
|
return model_
|
|
@@ -28,9 +28,11 @@ if TYPE_CHECKING:
|
|
|
28
28
|
|
|
29
29
|
|
|
30
30
|
KnownModelName = Literal[
|
|
31
|
+
'anthropic:claude-3-7-sonnet-latest',
|
|
31
32
|
'anthropic:claude-3-5-haiku-latest',
|
|
32
33
|
'anthropic:claude-3-5-sonnet-latest',
|
|
33
34
|
'anthropic:claude-3-opus-latest',
|
|
35
|
+
'claude-3-7-sonnet-latest',
|
|
34
36
|
'claude-3-5-haiku-latest',
|
|
35
37
|
'claude-3-5-sonnet-latest',
|
|
36
38
|
'claude-3-opus-latest',
|
|
@@ -56,6 +58,7 @@ KnownModelName = Literal[
|
|
|
56
58
|
'google-gla:gemini-exp-1206',
|
|
57
59
|
'google-gla:gemini-2.0-flash',
|
|
58
60
|
'google-gla:gemini-2.0-flash-lite-preview-02-05',
|
|
61
|
+
'google-gla:gemini-2.0-pro-exp-02-05',
|
|
59
62
|
'google-vertex:gemini-1.0-pro',
|
|
60
63
|
'google-vertex:gemini-1.5-flash',
|
|
61
64
|
'google-vertex:gemini-1.5-flash-8b',
|
|
@@ -65,6 +68,7 @@ KnownModelName = Literal[
|
|
|
65
68
|
'google-vertex:gemini-exp-1206',
|
|
66
69
|
'google-vertex:gemini-2.0-flash',
|
|
67
70
|
'google-vertex:gemini-2.0-flash-lite-preview-02-05',
|
|
71
|
+
'google-vertex:gemini-2.0-pro-exp-02-05',
|
|
68
72
|
'gpt-3.5-turbo',
|
|
69
73
|
'gpt-3.5-turbo-0125',
|
|
70
74
|
'gpt-3.5-turbo-0301',
|
|
@@ -42,6 +42,7 @@ from . import (
|
|
|
42
42
|
try:
|
|
43
43
|
from anthropic import NOT_GIVEN, APIStatusError, AsyncAnthropic, AsyncStream
|
|
44
44
|
from anthropic.types import (
|
|
45
|
+
ContentBlock,
|
|
45
46
|
ImageBlockParam,
|
|
46
47
|
Message as AnthropicMessage,
|
|
47
48
|
MessageParam,
|
|
@@ -69,6 +70,7 @@ except ImportError as _import_error:
|
|
|
69
70
|
) from _import_error
|
|
70
71
|
|
|
71
72
|
LatestAnthropicModelNames = Literal[
|
|
73
|
+
'claude-3-7-sonnet-latest',
|
|
72
74
|
'claude-3-5-haiku-latest',
|
|
73
75
|
'claude-3-5-sonnet-latest',
|
|
74
76
|
'claude-3-opus-latest',
|
|
@@ -423,7 +425,7 @@ class AnthropicStreamedResponse(StreamedResponse):
|
|
|
423
425
|
_timestamp: datetime
|
|
424
426
|
|
|
425
427
|
async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
|
|
426
|
-
current_block:
|
|
428
|
+
current_block: ContentBlock | None = None
|
|
427
429
|
current_json: str = ''
|
|
428
430
|
|
|
429
431
|
async for event in self._response:
|
|
@@ -6,7 +6,6 @@ from contextlib import asynccontextmanager, contextmanager
|
|
|
6
6
|
from dataclasses import dataclass, field
|
|
7
7
|
from typing import Any, Callable, Literal
|
|
8
8
|
|
|
9
|
-
import logfire_api
|
|
10
9
|
from opentelemetry._events import Event, EventLogger, EventLoggerProvider, get_event_logger_provider
|
|
11
10
|
from opentelemetry.trace import Span, Tracer, TracerProvider, get_tracer_provider
|
|
12
11
|
from opentelemetry.util.types import AttributeValue
|
|
@@ -59,27 +58,15 @@ class InstrumentedModel(WrapperModel):
|
|
|
59
58
|
event_logger_provider: EventLoggerProvider | None = None,
|
|
60
59
|
event_mode: Literal['attributes', 'logs'] = 'attributes',
|
|
61
60
|
):
|
|
61
|
+
from pydantic_ai import __version__
|
|
62
|
+
|
|
62
63
|
super().__init__(wrapped)
|
|
63
64
|
tracer_provider = tracer_provider or get_tracer_provider()
|
|
64
65
|
event_logger_provider = event_logger_provider or get_event_logger_provider()
|
|
65
|
-
self.tracer = tracer_provider.get_tracer('pydantic-ai')
|
|
66
|
-
self.event_logger = event_logger_provider.get_event_logger('pydantic-ai')
|
|
66
|
+
self.tracer = tracer_provider.get_tracer('pydantic-ai', __version__)
|
|
67
|
+
self.event_logger = event_logger_provider.get_event_logger('pydantic-ai', __version__)
|
|
67
68
|
self.event_mode = event_mode
|
|
68
69
|
|
|
69
|
-
@classmethod
|
|
70
|
-
def from_logfire(
|
|
71
|
-
cls,
|
|
72
|
-
wrapped: Model | KnownModelName,
|
|
73
|
-
logfire_instance: logfire_api.Logfire = logfire_api.DEFAULT_LOGFIRE_INSTANCE,
|
|
74
|
-
event_mode: Literal['attributes', 'logs'] = 'attributes',
|
|
75
|
-
) -> InstrumentedModel:
|
|
76
|
-
if hasattr(logfire_instance.config, 'get_event_logger_provider'):
|
|
77
|
-
event_provider = logfire_instance.config.get_event_logger_provider()
|
|
78
|
-
else:
|
|
79
|
-
event_provider = None
|
|
80
|
-
tracer_provider = logfire_instance.config.get_tracer_provider()
|
|
81
|
-
return cls(wrapped, tracer_provider, event_provider, event_mode)
|
|
82
|
-
|
|
83
70
|
async def request(
|
|
84
71
|
self,
|
|
85
72
|
messages: list[ModelMessage],
|
|
@@ -199,19 +186,30 @@ class InstrumentedModel(WrapperModel):
|
|
|
199
186
|
@staticmethod
|
|
200
187
|
def messages_to_otel_events(messages: list[ModelMessage]) -> list[Event]:
|
|
201
188
|
result: list[Event] = []
|
|
202
|
-
for message in messages:
|
|
189
|
+
for message_index, message in enumerate(messages):
|
|
190
|
+
message_events: list[Event] = []
|
|
203
191
|
if isinstance(message, ModelRequest):
|
|
204
192
|
for part in message.parts:
|
|
205
193
|
if hasattr(part, 'otel_event'):
|
|
206
|
-
|
|
194
|
+
message_events.append(part.otel_event())
|
|
207
195
|
elif isinstance(message, ModelResponse):
|
|
208
|
-
|
|
196
|
+
message_events = message.otel_events()
|
|
197
|
+
for event in message_events:
|
|
198
|
+
event.attributes = {
|
|
199
|
+
'gen_ai.message.index': message_index,
|
|
200
|
+
**(event.attributes or {}),
|
|
201
|
+
}
|
|
202
|
+
result.extend(message_events)
|
|
209
203
|
for event in result:
|
|
210
|
-
|
|
211
|
-
event.body = ANY_ADAPTER.dump_python(event.body, mode='json')
|
|
212
|
-
except Exception:
|
|
213
|
-
try:
|
|
214
|
-
event.body = str(event.body)
|
|
215
|
-
except Exception:
|
|
216
|
-
event.body = 'Unable to serialize event body'
|
|
204
|
+
event.body = InstrumentedModel.serialize_any(event.body)
|
|
217
205
|
return result
|
|
206
|
+
|
|
207
|
+
@staticmethod
|
|
208
|
+
def serialize_any(value: Any) -> str:
|
|
209
|
+
try:
|
|
210
|
+
return ANY_ADAPTER.dump_python(value, mode='json')
|
|
211
|
+
except Exception:
|
|
212
|
+
try:
|
|
213
|
+
return str(value)
|
|
214
|
+
except Exception as e:
|
|
215
|
+
return f'Unable to serialize: {e}'
|
|
@@ -6,7 +6,6 @@ from dataclasses import dataclass, field
|
|
|
6
6
|
from datetime import datetime
|
|
7
7
|
from typing import Generic, Union, cast
|
|
8
8
|
|
|
9
|
-
import logfire_api
|
|
10
9
|
from typing_extensions import TypeVar, assert_type
|
|
11
10
|
|
|
12
11
|
from . import _result, _utils, exceptions, messages as _messages, models
|
|
@@ -49,8 +48,6 @@ A function that always takes and returns the same type of data (which is the res
|
|
|
49
48
|
Usage `ResultValidatorFunc[AgentDepsT, T]`.
|
|
50
49
|
"""
|
|
51
50
|
|
|
52
|
-
_logfire = logfire_api.Logfire(otel_scope='pydantic-ai')
|
|
53
|
-
|
|
54
51
|
|
|
55
52
|
@dataclass
|
|
56
53
|
class AgentStream(Generic[AgentDepsT, ResultDataT]):
|
|
@@ -302,17 +299,14 @@ class StreamedRunResult(Generic[AgentDepsT, ResultDataT]):
|
|
|
302
299
|
if self._result_schema and not self._result_schema.allow_text_result:
|
|
303
300
|
raise exceptions.UserError('stream_text() can only be used with text responses')
|
|
304
301
|
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
combined_validated_text =
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
yield combined_validated_text
|
|
314
|
-
lf_span.set_attribute('combined_text', combined_validated_text)
|
|
315
|
-
await self._marked_completed(self._stream_response.get())
|
|
302
|
+
if delta:
|
|
303
|
+
async for text in self._stream_response_text(delta=delta, debounce_by=debounce_by):
|
|
304
|
+
yield text
|
|
305
|
+
else:
|
|
306
|
+
async for text in self._stream_response_text(delta=delta, debounce_by=debounce_by):
|
|
307
|
+
combined_validated_text = await self._validate_text_result(text)
|
|
308
|
+
yield combined_validated_text
|
|
309
|
+
await self._marked_completed(self._stream_response.get())
|
|
316
310
|
|
|
317
311
|
async def stream_structured(
|
|
318
312
|
self, *, debounce_by: float | None = 0.1
|
|
@@ -327,22 +321,20 @@ class StreamedRunResult(Generic[AgentDepsT, ResultDataT]):
|
|
|
327
321
|
Returns:
|
|
328
322
|
An async iterable of the structured response message and whether that is the last message.
|
|
329
323
|
"""
|
|
330
|
-
with
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
if part.has_content():
|
|
335
|
-
yield msg, False
|
|
336
|
-
break
|
|
337
|
-
|
|
338
|
-
async for msg in self._stream_response_structured(debounce_by=debounce_by):
|
|
324
|
+
# if the message currently has any parts with content, yield before streaming
|
|
325
|
+
msg = self._stream_response.get()
|
|
326
|
+
for part in msg.parts:
|
|
327
|
+
if part.has_content():
|
|
339
328
|
yield msg, False
|
|
329
|
+
break
|
|
340
330
|
|
|
341
|
-
|
|
342
|
-
yield msg,
|
|
331
|
+
async for msg in self._stream_response_structured(debounce_by=debounce_by):
|
|
332
|
+
yield msg, False
|
|
333
|
+
|
|
334
|
+
msg = self._stream_response.get()
|
|
335
|
+
yield msg, True
|
|
343
336
|
|
|
344
|
-
|
|
345
|
-
await self._marked_completed(msg)
|
|
337
|
+
await self._marked_completed(msg)
|
|
346
338
|
|
|
347
339
|
async def get_data(self) -> ResultDataT:
|
|
348
340
|
"""Stream the whole response, validate and return it."""
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pydantic-ai-slim"
|
|
7
|
-
version = "0.0.
|
|
7
|
+
version = "0.0.32"
|
|
8
8
|
description = "Agent Framework / shim to use Pydantic with LLMs, slim package"
|
|
9
9
|
authors = [{ name = "Samuel Colvin", email = "samuel@pydantic.dev" }]
|
|
10
10
|
license = "MIT"
|
|
@@ -35,9 +35,8 @@ dependencies = [
|
|
|
35
35
|
"eval-type-backport>=0.2.0",
|
|
36
36
|
"griffe>=1.3.2",
|
|
37
37
|
"httpx>=0.27",
|
|
38
|
-
"logfire-api>=1.2.0",
|
|
39
38
|
"pydantic>=2.10",
|
|
40
|
-
"pydantic-graph==0.0.
|
|
39
|
+
"pydantic-graph==0.0.32",
|
|
41
40
|
"exceptiongroup; python_version < '3.11'",
|
|
42
41
|
"opentelemetry-api>=1.28.0",
|
|
43
42
|
]
|
|
@@ -49,7 +48,7 @@ logfire = ["logfire>=2.3"]
|
|
|
49
48
|
openai = ["openai>=1.65.1"]
|
|
50
49
|
cohere = ["cohere>=5.13.11"]
|
|
51
50
|
vertexai = ["google-auth>=2.36.0", "requests>=2.32.3"]
|
|
52
|
-
anthropic = ["anthropic>=0.
|
|
51
|
+
anthropic = ["anthropic>=0.49.0"]
|
|
53
52
|
groq = ["groq>=0.12.0"]
|
|
54
53
|
mistral = ["mistralai>=1.2.5"]
|
|
55
54
|
# Tools
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|