pydantic-ai-slim 0.8.0__tar.gz → 0.8.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (119) hide show
  1. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/PKG-INFO +4 -4
  2. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/agent/__init__.py +14 -8
  3. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/durable_exec/temporal/__init__.py +67 -16
  4. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/durable_exec/temporal/_logfire.py +5 -2
  5. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/messages.py +8 -3
  6. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/__init__.py +14 -3
  7. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/anthropic.py +1 -1
  8. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/bedrock.py +6 -2
  9. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/gemini.py +1 -1
  10. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/google.py +1 -1
  11. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/groq.py +1 -1
  12. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/huggingface.py +1 -1
  13. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/instrumented.py +14 -5
  14. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/mistral.py +2 -2
  15. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/openai.py +2 -2
  16. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/result.py +31 -13
  17. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pyproject.toml +1 -1
  18. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/.gitignore +0 -0
  19. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/LICENSE +0 -0
  20. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/README.md +0 -0
  21. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/__init__.py +0 -0
  22. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/__main__.py +0 -0
  23. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_a2a.py +0 -0
  24. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_agent_graph.py +0 -0
  25. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_cli.py +0 -0
  26. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_function_schema.py +0 -0
  27. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_griffe.py +0 -0
  28. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_mcp.py +0 -0
  29. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_otel_messages.py +0 -0
  30. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_output.py +0 -0
  31. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_parts_manager.py +0 -0
  32. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_run_context.py +0 -0
  33. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_system_prompt.py +0 -0
  34. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_thinking_part.py +0 -0
  35. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_tool_manager.py +0 -0
  36. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/_utils.py +0 -0
  37. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/ag_ui.py +0 -0
  38. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/agent/abstract.py +0 -0
  39. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/agent/wrapper.py +0 -0
  40. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/builtin_tools.py +0 -0
  41. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/common_tools/__init__.py +0 -0
  42. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  43. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/common_tools/tavily.py +0 -0
  44. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/direct.py +0 -0
  45. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/durable_exec/__init__.py +0 -0
  46. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/durable_exec/temporal/_agent.py +0 -0
  47. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  48. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  49. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
  50. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  51. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
  52. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/exceptions.py +0 -0
  53. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/ext/__init__.py +0 -0
  54. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/ext/aci.py +0 -0
  55. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/ext/langchain.py +0 -0
  56. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/format_prompt.py +0 -0
  57. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/mcp.py +0 -0
  58. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/cohere.py +0 -0
  59. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/fallback.py +0 -0
  60. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/function.py +0 -0
  61. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/mcp_sampling.py +0 -0
  62. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/test.py +0 -0
  63. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/models/wrapper.py +0 -0
  64. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/output.py +0 -0
  65. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/__init__.py +0 -0
  66. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/_json_schema.py +0 -0
  67. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/amazon.py +0 -0
  68. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/anthropic.py +0 -0
  69. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/cohere.py +0 -0
  70. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/deepseek.py +0 -0
  71. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/google.py +0 -0
  72. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/grok.py +0 -0
  73. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/groq.py +0 -0
  74. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/harmony.py +0 -0
  75. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/meta.py +0 -0
  76. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/mistral.py +0 -0
  77. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/moonshotai.py +0 -0
  78. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/openai.py +0 -0
  79. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/profiles/qwen.py +0 -0
  80. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/__init__.py +0 -0
  81. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/anthropic.py +0 -0
  82. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/azure.py +0 -0
  83. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/bedrock.py +0 -0
  84. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/cerebras.py +0 -0
  85. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/cohere.py +0 -0
  86. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/deepseek.py +0 -0
  87. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/fireworks.py +0 -0
  88. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/github.py +0 -0
  89. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/google.py +0 -0
  90. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/google_gla.py +0 -0
  91. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/google_vertex.py +0 -0
  92. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/grok.py +0 -0
  93. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/groq.py +0 -0
  94. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/heroku.py +0 -0
  95. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/huggingface.py +0 -0
  96. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/mistral.py +0 -0
  97. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/moonshotai.py +0 -0
  98. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/ollama.py +0 -0
  99. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/openai.py +0 -0
  100. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/openrouter.py +0 -0
  101. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/together.py +0 -0
  102. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/providers/vercel.py +0 -0
  103. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/py.typed +0 -0
  104. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/retries.py +0 -0
  105. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/run.py +0 -0
  106. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/settings.py +0 -0
  107. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/tools.py +0 -0
  108. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/toolsets/__init__.py +0 -0
  109. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/toolsets/_dynamic.py +0 -0
  110. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/toolsets/abstract.py +0 -0
  111. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/toolsets/combined.py +0 -0
  112. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/toolsets/deferred.py +0 -0
  113. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/toolsets/filtered.py +0 -0
  114. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/toolsets/function.py +0 -0
  115. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/toolsets/prefixed.py +0 -0
  116. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/toolsets/prepared.py +0 -0
  117. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/toolsets/renamed.py +0 -0
  118. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/toolsets/wrapper.py +0 -0
  119. {pydantic_ai_slim-0.8.0 → pydantic_ai_slim-0.8.1}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.8.0
3
+ Version: 0.8.1
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -35,7 +35,7 @@ Requires-Dist: genai-prices>=0.0.22
35
35
  Requires-Dist: griffe>=1.3.2
36
36
  Requires-Dist: httpx>=0.27
37
37
  Requires-Dist: opentelemetry-api>=1.28.0
38
- Requires-Dist: pydantic-graph==0.8.0
38
+ Requires-Dist: pydantic-graph==0.8.1
39
39
  Requires-Dist: pydantic>=2.10
40
40
  Requires-Dist: typing-inspection>=0.4.0
41
41
  Provides-Extra: a2a
@@ -57,7 +57,7 @@ Requires-Dist: cohere>=5.16.0; (platform_system != 'Emscripten') and extra == 'c
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==0.8.0; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==0.8.1; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -77,7 +77,7 @@ Requires-Dist: tenacity>=8.2.3; extra == 'retries'
77
77
  Provides-Extra: tavily
78
78
  Requires-Dist: tavily-python>=0.5.0; extra == 'tavily'
79
79
  Provides-Extra: temporal
80
- Requires-Dist: temporalio==1.15.0; extra == 'temporal'
80
+ Requires-Dist: temporalio==1.16.0; extra == 'temporal'
81
81
  Provides-Extra: vertexai
82
82
  Requires-Dist: google-auth>=2.36.0; extra == 'vertexai'
83
83
  Requires-Dist: requests>=2.32.2; extra == 'vertexai'
@@ -678,22 +678,28 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
678
678
  self, state: _agent_graph.GraphAgentState, usage: _usage.RunUsage, settings: InstrumentationSettings
679
679
  ):
680
680
  if settings.version == 1:
681
- attr_name = 'all_messages_events'
682
- value = [
683
- InstrumentedModel.event_to_dict(e) for e in settings.messages_to_otel_events(state.message_history)
684
- ]
681
+ attrs = {
682
+ 'all_messages_events': json.dumps(
683
+ [
684
+ InstrumentedModel.event_to_dict(e)
685
+ for e in settings.messages_to_otel_events(state.message_history)
686
+ ]
687
+ )
688
+ }
685
689
  else:
686
- attr_name = 'pydantic_ai.all_messages'
687
- value = settings.messages_to_otel_messages(state.message_history)
690
+ attrs = {
691
+ 'pydantic_ai.all_messages': json.dumps(settings.messages_to_otel_messages(state.message_history)),
692
+ **settings.system_instructions_attributes(self._instructions),
693
+ }
688
694
 
689
695
  return {
690
696
  **usage.opentelemetry_attributes(),
691
- attr_name: json.dumps(value),
697
+ **attrs,
692
698
  'logfire.json_schema': json.dumps(
693
699
  {
694
700
  'type': 'object',
695
701
  'properties': {
696
- attr_name: {'type': 'array'},
702
+ **{attr: {'type': 'array'} for attr in attrs.keys()},
697
703
  'final_result': {'type': 'object'},
698
704
  },
699
705
  }
@@ -1,15 +1,24 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import warnings
4
- from collections.abc import Sequence
4
+ from collections.abc import AsyncIterator, Sequence
5
+ from contextlib import AbstractAsyncContextManager
5
6
  from dataclasses import replace
6
7
  from typing import Any, Callable
7
8
 
8
9
  from pydantic.errors import PydanticUserError
9
- from temporalio.client import ClientConfig, Plugin as ClientPlugin
10
+ from temporalio.client import ClientConfig, Plugin as ClientPlugin, WorkflowHistory
10
11
  from temporalio.contrib.pydantic import PydanticPayloadConverter, pydantic_data_converter
11
- from temporalio.converter import DefaultPayloadConverter
12
- from temporalio.worker import Plugin as WorkerPlugin, WorkerConfig
12
+ from temporalio.converter import DataConverter, DefaultPayloadConverter
13
+ from temporalio.service import ConnectConfig, ServiceClient
14
+ from temporalio.worker import (
15
+ Plugin as WorkerPlugin,
16
+ Replayer,
17
+ ReplayerConfig,
18
+ Worker,
19
+ WorkerConfig,
20
+ WorkflowReplayResult,
21
+ )
13
22
  from temporalio.worker.workflow_sandbox import SandboxedWorkflowRunner
14
23
 
15
24
  from ...exceptions import UserError
@@ -31,17 +40,15 @@ __all__ = [
31
40
  class PydanticAIPlugin(ClientPlugin, WorkerPlugin):
32
41
  """Temporal client and worker plugin for Pydantic AI."""
33
42
 
34
- def configure_client(self, config: ClientConfig) -> ClientConfig:
35
- if (data_converter := config.get('data_converter')) and data_converter.payload_converter_class not in (
36
- DefaultPayloadConverter,
37
- PydanticPayloadConverter,
38
- ):
39
- warnings.warn( # pragma: no cover
40
- 'A non-default Temporal data converter was used which has been replaced with the Pydantic data converter.'
41
- )
43
+ def init_client_plugin(self, next: ClientPlugin) -> None:
44
+ self.next_client_plugin = next
42
45
 
43
- config['data_converter'] = pydantic_data_converter
44
- return super().configure_client(config)
46
+ def init_worker_plugin(self, next: WorkerPlugin) -> None:
47
+ self.next_worker_plugin = next
48
+
49
+ def configure_client(self, config: ClientConfig) -> ClientConfig:
50
+ config['data_converter'] = self._get_new_data_converter(config.get('data_converter'))
51
+ return self.next_client_plugin.configure_client(config)
45
52
 
46
53
  def configure_worker(self, config: WorkerConfig) -> WorkerConfig:
47
54
  runner = config.get('workflow_runner') # pyright: ignore[reportUnknownMemberType]
@@ -67,7 +74,35 @@ class PydanticAIPlugin(ClientPlugin, WorkerPlugin):
67
74
  PydanticUserError,
68
75
  ]
69
76
 
70
- return super().configure_worker(config)
77
+ return self.next_worker_plugin.configure_worker(config)
78
+
79
+ async def connect_service_client(self, config: ConnectConfig) -> ServiceClient:
80
+ return await self.next_client_plugin.connect_service_client(config)
81
+
82
+ async def run_worker(self, worker: Worker) -> None:
83
+ await self.next_worker_plugin.run_worker(worker)
84
+
85
+ def configure_replayer(self, config: ReplayerConfig) -> ReplayerConfig: # pragma: no cover
86
+ config['data_converter'] = self._get_new_data_converter(config.get('data_converter')) # pyright: ignore[reportUnknownMemberType]
87
+ return self.next_worker_plugin.configure_replayer(config)
88
+
89
+ def run_replayer(
90
+ self,
91
+ replayer: Replayer,
92
+ histories: AsyncIterator[WorkflowHistory],
93
+ ) -> AbstractAsyncContextManager[AsyncIterator[WorkflowReplayResult]]: # pragma: no cover
94
+ return self.next_worker_plugin.run_replayer(replayer, histories)
95
+
96
+ def _get_new_data_converter(self, converter: DataConverter | None) -> DataConverter:
97
+ if converter and converter.payload_converter_class not in (
98
+ DefaultPayloadConverter,
99
+ PydanticPayloadConverter,
100
+ ):
101
+ warnings.warn( # pragma: no cover
102
+ 'A non-default Temporal data converter was used which has been replaced with the Pydantic data converter.'
103
+ )
104
+
105
+ return pydantic_data_converter
71
106
 
72
107
 
73
108
  class AgentPlugin(WorkerPlugin):
@@ -76,8 +111,24 @@ class AgentPlugin(WorkerPlugin):
76
111
  def __init__(self, agent: TemporalAgent[Any, Any]):
77
112
  self.agent = agent
78
113
 
114
+ def init_worker_plugin(self, next: WorkerPlugin) -> None:
115
+ self.next_worker_plugin = next
116
+
79
117
  def configure_worker(self, config: WorkerConfig) -> WorkerConfig:
80
118
  activities: Sequence[Callable[..., Any]] = config.get('activities', []) # pyright: ignore[reportUnknownMemberType]
81
119
  # Activities are checked for name conflicts by Temporal.
82
120
  config['activities'] = [*activities, *self.agent.temporal_activities]
83
- return super().configure_worker(config)
121
+ return self.next_worker_plugin.configure_worker(config)
122
+
123
+ async def run_worker(self, worker: Worker) -> None:
124
+ await self.next_worker_plugin.run_worker(worker)
125
+
126
+ def configure_replayer(self, config: ReplayerConfig) -> ReplayerConfig: # pragma: no cover
127
+ return self.next_worker_plugin.configure_replayer(config)
128
+
129
+ def run_replayer(
130
+ self,
131
+ replayer: Replayer,
132
+ histories: AsyncIterator[WorkflowHistory],
133
+ ) -> AbstractAsyncContextManager[AsyncIterator[WorkflowReplayResult]]: # pragma: no cover
134
+ return self.next_worker_plugin.run_replayer(replayer, histories)
@@ -25,10 +25,13 @@ class LogfirePlugin(ClientPlugin):
25
25
  self.setup_logfire = setup_logfire
26
26
  self.metrics = metrics
27
27
 
28
+ def init_client_plugin(self, next: ClientPlugin) -> None:
29
+ self.next_client_plugin = next
30
+
28
31
  def configure_client(self, config: ClientConfig) -> ClientConfig:
29
32
  interceptors = config.get('interceptors', [])
30
33
  config['interceptors'] = [*interceptors, TracingInterceptor(get_tracer('temporalio'))]
31
- return super().configure_client(config)
34
+ return self.next_client_plugin.configure_client(config)
32
35
 
33
36
  async def connect_service_client(self, config: ConnectConfig) -> ServiceClient:
34
37
  logfire = self.setup_logfire()
@@ -45,4 +48,4 @@ class LogfirePlugin(ClientPlugin):
45
48
  telemetry=TelemetryConfig(metrics=OpenTelemetryConfig(url=metrics_url, headers=headers))
46
49
  )
47
50
 
48
- return await super().connect_service_client(config)
51
+ return await self.next_client_plugin.connect_service_client(config)
@@ -938,7 +938,7 @@ class ModelResponse:
938
938
  For OpenAI models, this may include 'logprobs', 'finish_reason', etc.
939
939
  """
940
940
 
941
- provider_request_id: str | None = None
941
+ provider_response_id: str | None = None
942
942
  """request ID as specified by the model provider. This can be used to track the specific request to the model."""
943
943
 
944
944
  def price(self) -> genai_types.PriceCalculation:
@@ -1026,9 +1026,14 @@ class ModelResponse:
1026
1026
  return self.provider_details
1027
1027
 
1028
1028
  @property
1029
- @deprecated('`vendor_id` is deprecated, use `provider_request_id` instead')
1029
+ @deprecated('`vendor_id` is deprecated, use `provider_response_id` instead')
1030
1030
  def vendor_id(self) -> str | None:
1031
- return self.provider_request_id
1031
+ return self.provider_response_id
1032
+
1033
+ @property
1034
+ @deprecated('`provider_request_id` is deprecated, use `provider_response_id` instead')
1035
+ def provider_request_id(self) -> str | None:
1036
+ return self.provider_response_id
1032
1037
 
1033
1038
  __repr__ = _utils.dataclasses_no_defaults_repr
1034
1039
 
@@ -7,6 +7,7 @@ specific LLM being used.
7
7
  from __future__ import annotations as _annotations
8
8
 
9
9
  import base64
10
+ import warnings
10
11
  from abc import ABC, abstractmethod
11
12
  from collections.abc import AsyncIterator, Iterator
12
13
  from contextlib import asynccontextmanager, contextmanager
@@ -684,19 +685,29 @@ def infer_model(model: Model | KnownModelName | str) -> Model: # noqa: C901
684
685
  try:
685
686
  provider, model_name = model.split(':', maxsplit=1)
686
687
  except ValueError:
688
+ provider = None
687
689
  model_name = model
688
- # TODO(Marcelo): We should deprecate this way.
689
690
  if model_name.startswith(('gpt', 'o1', 'o3')):
690
691
  provider = 'openai'
691
692
  elif model_name.startswith('claude'):
692
693
  provider = 'anthropic'
693
694
  elif model_name.startswith('gemini'):
694
695
  provider = 'google-gla'
696
+
697
+ if provider is not None:
698
+ warnings.warn(
699
+ f"Specifying a model name without a provider prefix is deprecated. Instead of {model_name!r}, use '{provider}:{model_name}'.",
700
+ DeprecationWarning,
701
+ )
695
702
  else:
696
703
  raise UserError(f'Unknown model: {model}')
697
704
 
698
- if provider == 'vertexai':
699
- provider = 'google-vertex' # pragma: no cover
705
+ if provider == 'vertexai': # pragma: no cover
706
+ warnings.warn(
707
+ "The 'vertexai' provider name is deprecated. Use 'google-vertex' instead.",
708
+ DeprecationWarning,
709
+ )
710
+ provider = 'google-vertex'
700
711
 
701
712
  if provider == 'cohere':
702
713
  from .cohere import CohereModel
@@ -330,7 +330,7 @@ class AnthropicModel(Model):
330
330
  items,
331
331
  usage=_map_usage(response),
332
332
  model_name=response.model,
333
- provider_request_id=response.id,
333
+ provider_response_id=response.id,
334
334
  provider_name=self._provider.name,
335
335
  )
336
336
 
@@ -301,9 +301,13 @@ class BedrockConverseModel(Model):
301
301
  input_tokens=response['usage']['inputTokens'],
302
302
  output_tokens=response['usage']['outputTokens'],
303
303
  )
304
- vendor_id = response.get('ResponseMetadata', {}).get('RequestId', None)
304
+ response_id = response.get('ResponseMetadata', {}).get('RequestId', None)
305
305
  return ModelResponse(
306
- items, usage=u, model_name=self.model_name, provider_request_id=vendor_id, provider_name=self._provider.name
306
+ items,
307
+ usage=u,
308
+ model_name=self.model_name,
309
+ provider_response_id=response_id,
310
+ provider_name=self._provider.name,
307
311
  )
308
312
 
309
313
  @overload
@@ -690,7 +690,7 @@ def _process_response_from_parts(
690
690
  f'Unsupported response from Gemini, expected all parts to be function calls or text, got: {part!r}'
691
691
  )
692
692
  return ModelResponse(
693
- parts=items, usage=usage, model_name=model_name, provider_request_id=vendor_id, provider_details=vendor_details
693
+ parts=items, usage=usage, model_name=model_name, provider_response_id=vendor_id, provider_details=vendor_details
694
694
  )
695
695
 
696
696
 
@@ -648,7 +648,7 @@ def _process_response_from_parts(
648
648
  parts=items,
649
649
  model_name=model_name,
650
650
  usage=usage,
651
- provider_request_id=vendor_id,
651
+ provider_response_id=vendor_id,
652
652
  provider_details=vendor_details,
653
653
  provider_name=provider_name,
654
654
  )
@@ -289,7 +289,7 @@ class GroqModel(Model):
289
289
  usage=_map_usage(response),
290
290
  model_name=response.model,
291
291
  timestamp=timestamp,
292
- provider_request_id=response.id,
292
+ provider_response_id=response.id,
293
293
  provider_name=self._provider.name,
294
294
  )
295
295
 
@@ -271,7 +271,7 @@ class HuggingFaceModel(Model):
271
271
  usage=_map_usage(response),
272
272
  model_name=response.model,
273
273
  timestamp=timestamp,
274
- provider_request_id=response.id,
274
+ provider_response_id=response.id,
275
275
  provider_name=self._provider.name,
276
276
  )
277
277
 
@@ -236,27 +236,36 @@ class InstrumentationSettings:
236
236
  if response.provider_details and 'finish_reason' in response.provider_details:
237
237
  output_message['finish_reason'] = response.provider_details['finish_reason']
238
238
  instructions = InstrumentedModel._get_instructions(input_messages) # pyright: ignore [reportPrivateUsage]
239
+ system_instructions_attributes = self.system_instructions_attributes(instructions)
239
240
  attributes = {
240
241
  'gen_ai.input.messages': json.dumps(self.messages_to_otel_messages(input_messages)),
241
242
  'gen_ai.output.messages': json.dumps([output_message]),
243
+ **system_instructions_attributes,
242
244
  'logfire.json_schema': json.dumps(
243
245
  {
244
246
  'type': 'object',
245
247
  'properties': {
246
248
  'gen_ai.input.messages': {'type': 'array'},
247
249
  'gen_ai.output.messages': {'type': 'array'},
248
- **({'gen_ai.system_instructions': {'type': 'array'}} if instructions else {}),
250
+ **(
251
+ {'gen_ai.system_instructions': {'type': 'array'}}
252
+ if system_instructions_attributes
253
+ else {}
254
+ ),
249
255
  'model_request_parameters': {'type': 'object'},
250
256
  },
251
257
  }
252
258
  ),
253
259
  }
254
- if instructions is not None:
255
- attributes['gen_ai.system_instructions'] = json.dumps(
256
- [_otel_messages.TextPart(type='text', content=instructions)]
257
- )
258
260
  span.set_attributes(attributes)
259
261
 
262
+ def system_instructions_attributes(self, instructions: str | None) -> dict[str, str]:
263
+ if instructions and self.include_content:
264
+ return {
265
+ 'gen_ai.system_instructions': json.dumps([_otel_messages.TextPart(type='text', content=instructions)]),
266
+ }
267
+ return {}
268
+
260
269
  def _emit_events(self, span: Span, events: list[Event]) -> None:
261
270
  if self.event_mode == 'logs':
262
271
  for event in events:
@@ -79,7 +79,7 @@ try:
79
79
  from mistralai.models.usermessage import UserMessage as MistralUserMessage
80
80
  from mistralai.types.basemodel import Unset as MistralUnset
81
81
  from mistralai.utils.eventstreaming import EventStreamAsync as MistralEventStreamAsync
82
- except ImportError as e: # pragma: no cover
82
+ except ImportError as e:
83
83
  raise ImportError(
84
84
  'Please install `mistral` to use the Mistral model, '
85
85
  'you can use the `mistral` optional group — `pip install "pydantic-ai-slim[mistral]"`'
@@ -352,7 +352,7 @@ class MistralModel(Model):
352
352
  usage=_map_usage(response),
353
353
  model_name=response.model,
354
354
  timestamp=timestamp,
355
- provider_request_id=response.id,
355
+ provider_response_id=response.id,
356
356
  provider_name=self._provider.name,
357
357
  )
358
358
 
@@ -517,7 +517,7 @@ class OpenAIChatModel(Model):
517
517
  model_name=response.model,
518
518
  timestamp=timestamp,
519
519
  provider_details=vendor_details,
520
- provider_request_id=response.id,
520
+ provider_response_id=response.id,
521
521
  provider_name=self._provider.name,
522
522
  )
523
523
 
@@ -831,7 +831,7 @@ class OpenAIResponsesModel(Model):
831
831
  items,
832
832
  usage=_map_usage(response),
833
833
  model_name=response.model,
834
- provider_request_id=response.id,
834
+ provider_response_id=response.id,
835
835
  timestamp=timestamp,
836
836
  provider_name=self._provider.name,
837
837
  )
@@ -7,7 +7,7 @@ from datetime import datetime
7
7
  from typing import Generic, cast
8
8
 
9
9
  from pydantic import ValidationError
10
- from typing_extensions import TypeVar
10
+ from typing_extensions import TypeVar, deprecated
11
11
 
12
12
  from pydantic_ai._tool_manager import ToolManager
13
13
 
@@ -62,11 +62,11 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
62
62
  async for response in self.stream_responses(debounce_by=debounce_by):
63
63
  if self._raw_stream_response.final_result_event is not None:
64
64
  try:
65
- yield await self._validate_response(response, allow_partial=True)
65
+ yield await self.validate_response_output(response, allow_partial=True)
66
66
  except ValidationError:
67
67
  pass
68
68
  if self._raw_stream_response.final_result_event is not None: # pragma: no branch
69
- yield await self._validate_response(self._raw_stream_response.get())
69
+ yield await self.validate_response_output(self._raw_stream_response.get())
70
70
 
71
71
  async def stream_responses(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[_messages.ModelResponse]:
72
72
  """Asynchronously stream the (unvalidated) model responses for the agent."""
@@ -127,9 +127,11 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
127
127
  async for _ in self:
128
128
  pass
129
129
 
130
- return await self._validate_response(self._raw_stream_response.get())
130
+ return await self.validate_response_output(self._raw_stream_response.get())
131
131
 
132
- async def _validate_response(self, message: _messages.ModelResponse, *, allow_partial: bool = False) -> OutputDataT:
132
+ async def validate_response_output(
133
+ self, message: _messages.ModelResponse, *, allow_partial: bool = False
134
+ ) -> OutputDataT:
133
135
  """Validate a structured result message."""
134
136
  final_result_event = self._raw_stream_response.final_result_event
135
137
  if final_result_event is None:
@@ -245,9 +247,9 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
245
247
  """Whether the stream has all been received.
246
248
 
247
249
  This is set to `True` when one of
248
- [`stream`][pydantic_ai.result.StreamedRunResult.stream],
250
+ [`stream_output`][pydantic_ai.result.StreamedRunResult.stream_output],
249
251
  [`stream_text`][pydantic_ai.result.StreamedRunResult.stream_text],
250
- [`stream_structured`][pydantic_ai.result.StreamedRunResult.stream_structured] or
252
+ [`stream_responses`][pydantic_ai.result.StreamedRunResult.stream_responses] or
251
253
  [`get_output`][pydantic_ai.result.StreamedRunResult.get_output] completes.
252
254
  """
253
255
 
@@ -318,16 +320,21 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
318
320
  self.new_messages(output_tool_return_content=output_tool_return_content)
319
321
  )
320
322
 
323
+ @deprecated('`StreamedRunResult.stream` is deprecated, use `stream_output` instead.')
321
324
  async def stream(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[OutputDataT]:
322
- """Stream the response as an async iterable.
325
+ async for output in self.stream_output(debounce_by=debounce_by):
326
+ yield output
327
+
328
+ async def stream_output(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[OutputDataT]:
329
+ """Stream the output as an async iterable.
323
330
 
324
331
  The pydantic validator for structured data will be called in
325
332
  [partial mode](https://docs.pydantic.dev/dev/concepts/experimental/#partial-validation)
326
333
  on each iteration.
327
334
 
328
335
  Args:
329
- debounce_by: by how much (if at all) to debounce/group the response chunks by. `None` means no debouncing.
330
- Debouncing is particularly important for long structured responses to reduce the overhead of
336
+ debounce_by: by how much (if at all) to debounce/group the output chunks by. `None` means no debouncing.
337
+ Debouncing is particularly important for long structured outputs to reduce the overhead of
331
338
  performing validation as each token is received.
332
339
 
333
340
  Returns:
@@ -354,8 +361,15 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
354
361
  yield text
355
362
  await self._marked_completed(self._stream_response.get())
356
363
 
364
+ @deprecated('`StreamedRunResult.stream_structured` is deprecated, use `stream_responses` instead.')
357
365
  async def stream_structured(
358
366
  self, *, debounce_by: float | None = 0.1
367
+ ) -> AsyncIterator[tuple[_messages.ModelResponse, bool]]:
368
+ async for msg, last in self.stream_responses(debounce_by=debounce_by):
369
+ yield msg, last
370
+
371
+ async def stream_responses(
372
+ self, *, debounce_by: float | None = 0.1
359
373
  ) -> AsyncIterator[tuple[_messages.ModelResponse, bool]]:
360
374
  """Stream the response as an async iterable of Structured LLM Messages.
361
375
 
@@ -394,13 +408,17 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
394
408
  """Get the timestamp of the response."""
395
409
  return self._stream_response.timestamp()
396
410
 
411
+ @deprecated('`validate_structured_output` is deprecated, use `validate_response_output` instead.')
397
412
  async def validate_structured_output(
398
413
  self, message: _messages.ModelResponse, *, allow_partial: bool = False
414
+ ) -> OutputDataT:
415
+ return await self._stream_response.validate_response_output(message, allow_partial=allow_partial)
416
+
417
+ async def validate_response_output(
418
+ self, message: _messages.ModelResponse, *, allow_partial: bool = False
399
419
  ) -> OutputDataT:
400
420
  """Validate a structured result message."""
401
- return await self._stream_response._validate_response( # pyright: ignore[reportPrivateUsage]
402
- message, allow_partial=allow_partial
403
- )
421
+ return await self._stream_response.validate_response_output(message, allow_partial=allow_partial)
404
422
 
405
423
  async def _marked_completed(self, message: _messages.ModelResponse) -> None:
406
424
  self.is_complete = True
@@ -94,7 +94,7 @@ ag-ui = ["ag-ui-protocol>=0.1.8", "starlette>=0.45.3"]
94
94
  # Retries
95
95
  retries = ["tenacity>=8.2.3"]
96
96
  # Temporal
97
- temporal = ["temporalio==1.15.0"]
97
+ temporal = ["temporalio==1.16.0"]
98
98
 
99
99
  [tool.hatch.metadata]
100
100
  allow-direct-references = true