pydantic-ai-slim 1.0.2__tar.gz → 1.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (126) hide show
  1. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/PKG-INFO +5 -5
  2. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_output.py +19 -7
  3. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_parts_manager.py +8 -10
  4. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_tool_manager.py +18 -1
  5. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/ag_ui.py +32 -17
  6. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/agent/abstract.py +8 -0
  7. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/dbos/_agent.py +5 -2
  8. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_agent.py +1 -1
  9. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/messages.py +30 -6
  10. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/anthropic.py +55 -25
  11. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/bedrock.py +82 -31
  12. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/cohere.py +39 -13
  13. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/function.py +8 -1
  14. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/google.py +62 -33
  15. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/groq.py +35 -7
  16. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/huggingface.py +27 -5
  17. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/mistral.py +54 -20
  18. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/openai.py +88 -45
  19. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/openai.py +7 -0
  20. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/bedrock.py +9 -1
  21. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/settings.py +1 -0
  22. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pyproject.toml +2 -2
  23. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/.gitignore +0 -0
  24. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/LICENSE +0 -0
  25. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/README.md +0 -0
  26. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/__init__.py +0 -0
  27. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/__main__.py +0 -0
  28. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_a2a.py +0 -0
  29. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_agent_graph.py +0 -0
  30. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_cli.py +0 -0
  31. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_function_schema.py +0 -0
  32. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_griffe.py +0 -0
  33. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_mcp.py +0 -0
  34. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_otel_messages.py +0 -0
  35. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_run_context.py +0 -0
  36. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_system_prompt.py +0 -0
  37. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_thinking_part.py +0 -0
  38. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/_utils.py +0 -0
  39. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/agent/__init__.py +0 -0
  40. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/agent/wrapper.py +0 -0
  41. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/builtin_tools.py +0 -0
  42. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/common_tools/__init__.py +0 -0
  43. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  44. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/common_tools/tavily.py +0 -0
  45. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/direct.py +0 -0
  46. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/__init__.py +0 -0
  47. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
  48. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
  49. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
  50. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
  51. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
  52. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  53. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
  54. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  55. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
  56. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  57. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
  58. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/exceptions.py +0 -0
  59. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/ext/__init__.py +0 -0
  60. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/ext/aci.py +0 -0
  61. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/ext/langchain.py +0 -0
  62. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/format_prompt.py +0 -0
  63. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/mcp.py +0 -0
  64. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/__init__.py +0 -0
  65. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/fallback.py +0 -0
  66. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/gemini.py +0 -0
  67. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/instrumented.py +0 -0
  68. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/mcp_sampling.py +0 -0
  69. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/test.py +0 -0
  70. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/wrapper.py +0 -0
  71. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/output.py +0 -0
  72. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/__init__.py +0 -0
  73. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/_json_schema.py +0 -0
  74. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/amazon.py +0 -0
  75. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/anthropic.py +0 -0
  76. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/cohere.py +0 -0
  77. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/deepseek.py +0 -0
  78. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/google.py +0 -0
  79. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/grok.py +0 -0
  80. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/groq.py +0 -0
  81. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/harmony.py +0 -0
  82. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/meta.py +0 -0
  83. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/mistral.py +0 -0
  84. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/moonshotai.py +0 -0
  85. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/qwen.py +0 -0
  86. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/__init__.py +0 -0
  87. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/anthropic.py +0 -0
  88. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/azure.py +0 -0
  89. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/cerebras.py +0 -0
  90. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/cohere.py +0 -0
  91. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/deepseek.py +0 -0
  92. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/fireworks.py +0 -0
  93. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/github.py +0 -0
  94. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/google.py +0 -0
  95. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/google_gla.py +0 -0
  96. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/google_vertex.py +0 -0
  97. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/grok.py +0 -0
  98. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/groq.py +0 -0
  99. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/heroku.py +0 -0
  100. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/huggingface.py +0 -0
  101. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/litellm.py +0 -0
  102. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/mistral.py +0 -0
  103. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/moonshotai.py +0 -0
  104. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/ollama.py +0 -0
  105. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/openai.py +0 -0
  106. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/openrouter.py +0 -0
  107. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/together.py +0 -0
  108. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/vercel.py +0 -0
  109. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/py.typed +0 -0
  110. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/result.py +0 -0
  111. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/retries.py +0 -0
  112. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/run.py +0 -0
  113. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/tools.py +0 -0
  114. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/__init__.py +0 -0
  115. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/_dynamic.py +0 -0
  116. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/abstract.py +0 -0
  117. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/approval_required.py +0 -0
  118. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/combined.py +0 -0
  119. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/external.py +0 -0
  120. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/filtered.py +0 -0
  121. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/function.py +0 -0
  122. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/prefixed.py +0 -0
  123. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/prepared.py +0 -0
  124. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/renamed.py +0 -0
  125. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/wrapper.py +0 -0
  126. {pydantic_ai_slim-1.0.2 → pydantic_ai_slim-1.0.3}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.2
3
+ Version: 1.0.3
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.23
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.2
36
+ Requires-Dist: pydantic-graph==1.0.3
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -51,13 +51,13 @@ Requires-Dist: prompt-toolkit>=3; extra == 'cli'
51
51
  Requires-Dist: pyperclip>=1.9.0; extra == 'cli'
52
52
  Requires-Dist: rich>=13; extra == 'cli'
53
53
  Provides-Extra: cohere
54
- Requires-Dist: cohere>=5.16.0; (platform_system != 'Emscripten') and extra == 'cohere'
54
+ Requires-Dist: cohere>=5.17.0; (platform_system != 'Emscripten') and extra == 'cohere'
55
55
  Provides-Extra: dbos
56
56
  Requires-Dist: dbos>=1.13.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.2; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.3; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -69,7 +69,7 @@ Requires-Dist: logfire[httpx]>=3.14.1; extra == 'logfire'
69
69
  Provides-Extra: mcp
70
70
  Requires-Dist: mcp>=1.12.3; extra == 'mcp'
71
71
  Provides-Extra: mistral
72
- Requires-Dist: mistralai>=1.9.2; extra == 'mistral'
72
+ Requires-Dist: mistralai>=1.9.10; extra == 'mistral'
73
73
  Provides-Extra: openai
74
74
  Requires-Dist: openai>=1.99.9; extra == 'openai'
75
75
  Provides-Extra: retries
@@ -7,7 +7,7 @@ from collections.abc import Awaitable, Callable, Sequence
7
7
  from dataclasses import dataclass, field
8
8
  from typing import TYPE_CHECKING, Any, Generic, Literal, cast, overload
9
9
 
10
- from pydantic import TypeAdapter, ValidationError
10
+ from pydantic import Json, TypeAdapter, ValidationError
11
11
  from pydantic_core import SchemaValidator, to_json
12
12
  from typing_extensions import Self, TypedDict, TypeVar, assert_never
13
13
 
@@ -624,21 +624,33 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
624
624
  json_schema = self._function_schema.json_schema
625
625
  json_schema['description'] = self._function_schema.description
626
626
  else:
627
- type_adapter: TypeAdapter[Any]
627
+ json_schema_type_adapter: TypeAdapter[Any]
628
+ validation_type_adapter: TypeAdapter[Any]
628
629
  if _utils.is_model_like(output):
629
- type_adapter = TypeAdapter(output)
630
+ json_schema_type_adapter = validation_type_adapter = TypeAdapter(output)
630
631
  else:
631
632
  self.outer_typed_dict_key = 'response'
633
+ output_type: type[OutputDataT] = cast(type[OutputDataT], output)
634
+
632
635
  response_data_typed_dict = TypedDict( # noqa: UP013
633
636
  'response_data_typed_dict',
634
- {'response': cast(type[OutputDataT], output)}, # pyright: ignore[reportInvalidTypeForm]
637
+ {'response': output_type}, # pyright: ignore[reportInvalidTypeForm]
638
+ )
639
+ json_schema_type_adapter = TypeAdapter(response_data_typed_dict)
640
+
641
+ # More lenient validator: allow either the native type or a JSON string containing it
642
+ # i.e. `response: OutputDataT | Json[OutputDataT]`, as some models don't follow the schema correctly,
643
+ # e.g. `BedrockConverseModel('us.meta.llama3-2-11b-instruct-v1:0')`
644
+ response_validation_typed_dict = TypedDict( # noqa: UP013
645
+ 'response_validation_typed_dict',
646
+ {'response': output_type | Json[output_type]}, # pyright: ignore[reportInvalidTypeForm]
635
647
  )
636
- type_adapter = TypeAdapter(response_data_typed_dict)
648
+ validation_type_adapter = TypeAdapter(response_validation_typed_dict)
637
649
 
638
650
  # Really a PluggableSchemaValidator, but it's API-compatible
639
- self.validator = cast(SchemaValidator, type_adapter.validator)
651
+ self.validator = cast(SchemaValidator, validation_type_adapter.validator)
640
652
  json_schema = _utils.check_object_json_schema(
641
- type_adapter.json_schema(schema_generator=GenerateToolJsonSchema)
653
+ json_schema_type_adapter.json_schema(schema_generator=GenerateToolJsonSchema)
642
654
  )
643
655
 
644
656
  if self.outer_typed_dict_key:
@@ -156,6 +156,7 @@ class ModelResponsePartsManager:
156
156
  content: str | None = None,
157
157
  id: str | None = None,
158
158
  signature: str | None = None,
159
+ provider_name: str | None = None,
159
160
  ) -> ModelResponseStreamEvent:
160
161
  """Handle incoming thinking content, creating or updating a ThinkingPart in the manager as appropriate.
161
162
 
@@ -170,6 +171,7 @@ class ModelResponsePartsManager:
170
171
  content: The thinking content to append to the appropriate ThinkingPart.
171
172
  id: An optional id for the thinking part.
172
173
  signature: An optional signature for the thinking content.
174
+ provider_name: An optional provider name for the thinking part.
173
175
 
174
176
  Returns:
175
177
  A `PartStartEvent` if a new part was created, or a `PartDeltaEvent` if an existing part was updated.
@@ -199,7 +201,7 @@ class ModelResponsePartsManager:
199
201
  if content is not None:
200
202
  # There is no existing thinking part that should be updated, so create a new one
201
203
  new_part_index = len(self._parts)
202
- part = ThinkingPart(content=content, id=id, signature=signature)
204
+ part = ThinkingPart(content=content, id=id, signature=signature, provider_name=provider_name)
203
205
  if vendor_part_id is not None: # pragma: no branch
204
206
  self._vendor_id_to_part_index[vendor_part_id] = new_part_index
205
207
  self._parts.append(part)
@@ -207,16 +209,12 @@ class ModelResponsePartsManager:
207
209
  else:
208
210
  raise UnexpectedModelBehavior('Cannot create a ThinkingPart with no content')
209
211
  else:
210
- if content is not None:
211
- # Update the existing ThinkingPart with the new content delta
212
- existing_thinking_part, part_index = existing_thinking_part_and_index
213
- part_delta = ThinkingPartDelta(content_delta=content)
214
- self._parts[part_index] = part_delta.apply(existing_thinking_part)
215
- return PartDeltaEvent(index=part_index, delta=part_delta)
216
- elif signature is not None:
217
- # Update the existing ThinkingPart with the new signature delta
212
+ if content is not None or signature is not None:
213
+ # Update the existing ThinkingPart with the new content and/or signature delta
218
214
  existing_thinking_part, part_index = existing_thinking_part_and_index
219
- part_delta = ThinkingPartDelta(signature_delta=signature)
215
+ part_delta = ThinkingPartDelta(
216
+ content_delta=content, signature_delta=signature, provider_name=provider_name
217
+ )
220
218
  self._parts[part_index] = part_delta.apply(existing_thinking_part)
221
219
  return PartDeltaEvent(index=part_index, delta=part_delta)
222
220
  else:
@@ -1,6 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import json
4
+ from collections.abc import Iterator
5
+ from contextlib import contextmanager
6
+ from contextvars import ContextVar
4
7
  from dataclasses import dataclass, field, replace
5
8
  from typing import Any, Generic
6
9
 
@@ -16,6 +19,8 @@ from .tools import ToolDefinition
16
19
  from .toolsets.abstract import AbstractToolset, ToolsetTool
17
20
  from .usage import UsageLimits
18
21
 
22
+ _sequential_tool_calls_ctx_var: ContextVar[bool] = ContextVar('sequential_tool_calls', default=False)
23
+
19
24
 
20
25
  @dataclass
21
26
  class ToolManager(Generic[AgentDepsT]):
@@ -30,6 +35,16 @@ class ToolManager(Generic[AgentDepsT]):
30
35
  failed_tools: set[str] = field(default_factory=set)
31
36
  """Names of tools that failed in this run step."""
32
37
 
38
+ @classmethod
39
+ @contextmanager
40
+ def sequential_tool_calls(cls) -> Iterator[None]:
41
+ """Run tool calls sequentially during the context."""
42
+ token = _sequential_tool_calls_ctx_var.set(True)
43
+ try:
44
+ yield
45
+ finally:
46
+ _sequential_tool_calls_ctx_var.reset(token)
47
+
33
48
  async def for_run_step(self, ctx: RunContext[AgentDepsT]) -> ToolManager[AgentDepsT]:
34
49
  """Build a new tool manager for the next run step, carrying over the retries from the current run step."""
35
50
  if self.ctx is not None:
@@ -58,7 +73,9 @@ class ToolManager(Generic[AgentDepsT]):
58
73
 
59
74
  def should_call_sequentially(self, calls: list[ToolCallPart]) -> bool:
60
75
  """Whether to require sequential tool calls for a list of tool calls."""
61
- return any(tool_def.sequential for call in calls if (tool_def := self.get_tool_def(call.tool_name)))
76
+ return _sequential_tool_calls_ctx_var.get() or any(
77
+ tool_def.sequential for call in calls if (tool_def := self.get_tool_def(call.tool_name))
78
+ )
62
79
 
63
80
  def get_tool_def(self, name: str) -> ToolDefinition | None:
64
81
  """Get the tool definition for a given tool name, or `None` if the tool is unknown."""
@@ -8,7 +8,7 @@ from __future__ import annotations
8
8
 
9
9
  import json
10
10
  import uuid
11
- from collections.abc import AsyncIterator, Callable, Iterable, Mapping, Sequence
11
+ from collections.abc import AsyncIterator, Awaitable, Callable, Iterable, Mapping, Sequence
12
12
  from dataclasses import Field, dataclass, replace
13
13
  from http import HTTPStatus
14
14
  from typing import (
@@ -17,14 +17,16 @@ from typing import (
17
17
  Final,
18
18
  Generic,
19
19
  Protocol,
20
+ TypeAlias,
20
21
  TypeVar,
21
22
  runtime_checkable,
22
23
  )
23
24
 
24
25
  from pydantic import BaseModel, ValidationError
25
26
 
27
+ from . import _utils
26
28
  from ._agent_graph import CallToolsNode, ModelRequestNode
27
- from .agent import AbstractAgent, AgentRun
29
+ from .agent import AbstractAgent, AgentRun, AgentRunResult
28
30
  from .exceptions import UserError
29
31
  from .messages import (
30
32
  FunctionToolResultEvent,
@@ -68,9 +70,8 @@ try:
68
70
  TextMessageContentEvent,
69
71
  TextMessageEndEvent,
70
72
  TextMessageStartEvent,
71
- # TODO: Enable once https://github.com/ag-ui-protocol/ag-ui/issues/289 is resolved.
72
- # ThinkingEndEvent,
73
- # ThinkingStartEvent,
73
+ ThinkingEndEvent,
74
+ ThinkingStartEvent,
74
75
  ThinkingTextMessageContentEvent,
75
76
  ThinkingTextMessageEndEvent,
76
77
  ThinkingTextMessageStartEvent,
@@ -108,6 +109,7 @@ __all__ = [
108
109
  'StateDeps',
109
110
  'StateHandler',
110
111
  'AGUIApp',
112
+ 'OnCompleteFunc',
111
113
  'handle_ag_ui_request',
112
114
  'run_ag_ui',
113
115
  ]
@@ -115,6 +117,9 @@ __all__ = [
115
117
  SSE_CONTENT_TYPE: Final[str] = 'text/event-stream'
116
118
  """Content type header value for Server-Sent Events (SSE)."""
117
119
 
120
+ OnCompleteFunc: TypeAlias = Callable[[AgentRunResult[Any]], None] | Callable[[AgentRunResult[Any]], Awaitable[None]]
121
+ """Callback function type that receives the `AgentRunResult` of the completed run. Can be sync or async."""
122
+
118
123
 
119
124
  class AGUIApp(Generic[AgentDepsT, OutputDataT], Starlette):
120
125
  """ASGI application for running Pydantic AI agents with AG-UI protocol support."""
@@ -221,6 +226,7 @@ async def handle_ag_ui_request(
221
226
  usage: RunUsage | None = None,
222
227
  infer_name: bool = True,
223
228
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
229
+ on_complete: OnCompleteFunc | None = None,
224
230
  ) -> Response:
225
231
  """Handle an AG-UI request by running the agent and returning a streaming response.
226
232
 
@@ -237,6 +243,8 @@ async def handle_ag_ui_request(
237
243
  usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
238
244
  infer_name: Whether to try to infer the agent name from the call frame if it's not set.
239
245
  toolsets: Optional additional toolsets for this run.
246
+ on_complete: Optional callback function called when the agent run completes successfully.
247
+ The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data.
240
248
 
241
249
  Returns:
242
250
  A streaming Starlette response with AG-UI protocol events.
@@ -264,6 +272,7 @@ async def handle_ag_ui_request(
264
272
  usage=usage,
265
273
  infer_name=infer_name,
266
274
  toolsets=toolsets,
275
+ on_complete=on_complete,
267
276
  ),
268
277
  media_type=accept,
269
278
  )
@@ -282,6 +291,7 @@ async def run_ag_ui(
282
291
  usage: RunUsage | None = None,
283
292
  infer_name: bool = True,
284
293
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
294
+ on_complete: OnCompleteFunc | None = None,
285
295
  ) -> AsyncIterator[str]:
286
296
  """Run the agent with the AG-UI run input and stream AG-UI protocol events.
287
297
 
@@ -299,6 +309,8 @@ async def run_ag_ui(
299
309
  usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
300
310
  infer_name: Whether to try to infer the agent name from the call frame if it's not set.
301
311
  toolsets: Optional additional toolsets for this run.
312
+ on_complete: Optional callback function called when the agent run completes successfully.
313
+ The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data.
302
314
 
303
315
  Yields:
304
316
  Streaming event chunks encoded as strings according to the accept header value.
@@ -357,6 +369,12 @@ async def run_ag_ui(
357
369
  ) as run:
358
370
  async for event in _agent_stream(run):
359
371
  yield encoder.encode(event)
372
+
373
+ if on_complete is not None and run.result is not None:
374
+ if _utils.is_async_callable(on_complete):
375
+ await on_complete(run.result)
376
+ else:
377
+ await _utils.run_in_executor(on_complete, run.result)
360
378
  except _RunError as e:
361
379
  yield encoder.encode(
362
380
  RunErrorEvent(message=e.message, code=e.code),
@@ -396,10 +414,9 @@ async def _agent_stream(run: AgentRun[AgentDepsT, Any]) -> AsyncIterator[BaseEve
396
414
  yield stream_ctx.part_end
397
415
  stream_ctx.part_end = None
398
416
  if stream_ctx.thinking:
399
- # TODO: Enable once https://github.com/ag-ui-protocol/ag-ui/issues/289 is resolved.
400
- # yield ThinkingEndEvent(
401
- # type=EventType.THINKING_END,
402
- # )
417
+ yield ThinkingEndEvent(
418
+ type=EventType.THINKING_END,
419
+ )
403
420
  stream_ctx.thinking = False
404
421
  elif isinstance(node, CallToolsNode):
405
422
  async with node.stream(run.ctx) as handle_stream:
@@ -431,10 +448,9 @@ async def _handle_model_request_event( # noqa: C901
431
448
  part = agent_event.part
432
449
  if isinstance(part, ThinkingPart): # pragma: no branch
433
450
  if not stream_ctx.thinking:
434
- # TODO: Enable once https://github.com/ag-ui-protocol/ag-ui/issues/289 is resolved.
435
- # yield ThinkingStartEvent(
436
- # type=EventType.THINKING_START,
437
- # )
451
+ yield ThinkingStartEvent(
452
+ type=EventType.THINKING_START,
453
+ )
438
454
  stream_ctx.thinking = True
439
455
 
440
456
  if part.content:
@@ -450,10 +466,9 @@ async def _handle_model_request_event( # noqa: C901
450
466
  )
451
467
  else:
452
468
  if stream_ctx.thinking:
453
- # TODO: Enable once https://github.com/ag-ui-protocol/ag-ui/issues/289 is resolved.
454
- # yield ThinkingEndEvent(
455
- # type=EventType.THINKING_END,
456
- # )
469
+ yield ThinkingEndEvent(
470
+ type=EventType.THINKING_END,
471
+ )
457
472
  stream_ctx.thinking = False
458
473
 
459
474
  if isinstance(part, TextPart):
@@ -21,6 +21,7 @@ from .. import (
21
21
  result,
22
22
  usage as _usage,
23
23
  )
24
+ from .._tool_manager import ToolManager
24
25
  from ..output import OutputDataT, OutputSpec
25
26
  from ..result import AgentStream, FinalResult, StreamedRunResult
26
27
  from ..run import AgentRun, AgentRunResult
@@ -714,6 +715,13 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
714
715
  self.name = name
715
716
  return
716
717
 
718
+ @staticmethod
719
+ @contextmanager
720
+ def sequential_tool_calls() -> Iterator[None]:
721
+ """Run tool calls sequentially during the context."""
722
+ with ToolManager.sequential_tool_calls():
723
+ yield
724
+
717
725
  @staticmethod
718
726
  def is_model_request_node(
719
727
  node: _agent_graph.AgentNode[T, S] | End[result.FinalResult[S]],
@@ -13,7 +13,6 @@ from pydantic_ai import (
13
13
  models,
14
14
  usage as _usage,
15
15
  )
16
- from pydantic_ai._run_context import AgentDepsT
17
16
  from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, RunOutputDataT, WrapperAgent
18
17
  from pydantic_ai.exceptions import UserError
19
18
  from pydantic_ai.mcp import MCPServer
@@ -22,6 +21,7 @@ from pydantic_ai.output import OutputDataT, OutputSpec
22
21
  from pydantic_ai.result import StreamedRunResult
23
22
  from pydantic_ai.settings import ModelSettings
24
23
  from pydantic_ai.tools import (
24
+ AgentDepsT,
25
25
  DeferredToolResults,
26
26
  RunContext,
27
27
  Tool,
@@ -218,7 +218,10 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
218
218
  @contextmanager
219
219
  def _dbos_overrides(self) -> Iterator[None]:
220
220
  # Override with DBOSModel and DBOSMCPServer in the toolsets.
221
- with super().override(model=self._model, toolsets=self._toolsets, tools=[]):
221
+ with (
222
+ super().override(model=self._model, toolsets=self._toolsets, tools=[]),
223
+ self.sequential_tool_calls(),
224
+ ):
222
225
  yield
223
226
 
224
227
  @overload
@@ -21,7 +21,6 @@ from pydantic_ai import (
21
21
  models,
22
22
  usage as _usage,
23
23
  )
24
- from pydantic_ai._run_context import AgentDepsT
25
24
  from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, RunOutputDataT, WrapperAgent
26
25
  from pydantic_ai.exceptions import UserError
27
26
  from pydantic_ai.models import Model
@@ -29,6 +28,7 @@ from pydantic_ai.output import OutputDataT, OutputSpec
29
28
  from pydantic_ai.result import StreamedRunResult
30
29
  from pydantic_ai.settings import ModelSettings
31
30
  from pydantic_ai.tools import (
31
+ AgentDepsT,
32
32
  DeferredToolResults,
33
33
  RunContext,
34
34
  Tool,
@@ -895,7 +895,18 @@ class ThinkingPart:
895
895
  signature: str | None = None
896
896
  """The signature of the thinking.
897
897
 
898
- The signature is only available on the Anthropic models.
898
+ Supported by:
899
+
900
+ * Anthropic (corresponds to the `signature` field)
901
+ * Bedrock (corresponds to the `signature` field)
902
+ * Google (corresponds to the `thought_signature` field)
903
+ * OpenAI (corresponds to the `encrypted_content` field)
904
+ """
905
+
906
+ provider_name: str | None = None
907
+ """The name of the provider that generated the response.
908
+
909
+ Signatures are only sent back to the same provider.
899
910
  """
900
911
 
901
912
  part_kind: Literal['thinking'] = 'thinking'
@@ -980,7 +991,10 @@ class BuiltinToolCallPart(BaseToolCallPart):
980
991
  _: KW_ONLY
981
992
 
982
993
  provider_name: str | None = None
983
- """The name of the provider that generated the response."""
994
+ """The name of the provider that generated the response.
995
+
996
+ Built-in tool calls are only sent back to the same provider.
997
+ """
984
998
 
985
999
  part_kind: Literal['builtin-tool-call'] = 'builtin-tool-call'
986
1000
  """Part type identifier, this is available on all parts as a discriminator."""
@@ -1198,6 +1212,12 @@ class ThinkingPartDelta:
1198
1212
  Note this is never treated as a delta — it can replace None.
1199
1213
  """
1200
1214
 
1215
+ provider_name: str | None = None
1216
+ """Optional provider name for the thinking part.
1217
+
1218
+ Signatures are only sent back to the same provider.
1219
+ """
1220
+
1201
1221
  part_delta_kind: Literal['thinking'] = 'thinking'
1202
1222
  """Part delta type identifier, used as a discriminator."""
1203
1223
 
@@ -1222,14 +1242,18 @@ class ThinkingPartDelta:
1222
1242
  if isinstance(part, ThinkingPart):
1223
1243
  new_content = part.content + self.content_delta if self.content_delta else part.content
1224
1244
  new_signature = self.signature_delta if self.signature_delta is not None else part.signature
1225
- return replace(part, content=new_content, signature=new_signature)
1245
+ new_provider_name = self.provider_name if self.provider_name is not None else part.provider_name
1246
+ return replace(part, content=new_content, signature=new_signature, provider_name=new_provider_name)
1226
1247
  elif isinstance(part, ThinkingPartDelta):
1227
1248
  if self.content_delta is None and self.signature_delta is None:
1228
1249
  raise ValueError('Cannot apply ThinkingPartDelta with no content or signature')
1229
- if self.signature_delta is not None:
1230
- return replace(part, signature_delta=self.signature_delta)
1231
1250
  if self.content_delta is not None:
1232
- return replace(part, content_delta=self.content_delta)
1251
+ part = replace(part, content_delta=(part.content_delta or '') + self.content_delta)
1252
+ if self.signature_delta is not None:
1253
+ part = replace(part, signature_delta=self.signature_delta)
1254
+ if self.provider_name is not None:
1255
+ part = replace(part, provider_name=self.provider_name)
1256
+ return part
1233
1257
  raise ValueError( # pragma: no cover
1234
1258
  f'Cannot apply ThinkingPartDeltas to non-ThinkingParts or non-ThinkingPartDeltas ({part=}, {self=})'
1235
1259
  )
@@ -1,11 +1,10 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import io
4
- import warnings
5
4
  from collections.abc import AsyncGenerator, AsyncIterable, AsyncIterator
6
5
  from contextlib import asynccontextmanager
7
6
  from dataclasses import dataclass, field
8
- from datetime import datetime, timezone
7
+ from datetime import datetime
9
8
  from typing import Any, Literal, cast, overload
10
9
 
11
10
  from typing_extensions import assert_never
@@ -78,6 +77,7 @@ try:
78
77
  BetaRawMessageStopEvent,
79
78
  BetaRawMessageStreamEvent,
80
79
  BetaRedactedThinkingBlock,
80
+ BetaRedactedThinkingBlockParam,
81
81
  BetaServerToolUseBlock,
82
82
  BetaServerToolUseBlockParam,
83
83
  BetaSignatureDelta,
@@ -305,7 +305,7 @@ class AnthropicModel(Model):
305
305
  elif isinstance(item, BetaWebSearchToolResultBlock | BetaCodeExecutionToolResultBlock):
306
306
  items.append(
307
307
  BuiltinToolReturnPart(
308
- provider_name='anthropic',
308
+ provider_name=self.system,
309
309
  tool_name=item.type,
310
310
  content=item.content,
311
311
  tool_call_id=item.tool_use_id,
@@ -314,20 +314,18 @@ class AnthropicModel(Model):
314
314
  elif isinstance(item, BetaServerToolUseBlock):
315
315
  items.append(
316
316
  BuiltinToolCallPart(
317
- provider_name='anthropic',
317
+ provider_name=self.system,
318
318
  tool_name=item.name,
319
319
  args=cast(dict[str, Any], item.input),
320
320
  tool_call_id=item.id,
321
321
  )
322
322
  )
323
- elif isinstance(item, BetaRedactedThinkingBlock): # pragma: no cover
324
- warnings.warn(
325
- 'Pydantic AI currently does not handle redacted thinking blocks. '
326
- 'If you have a suggestion on how we should handle them, please open an issue.',
327
- UserWarning,
323
+ elif isinstance(item, BetaRedactedThinkingBlock):
324
+ items.append(
325
+ ThinkingPart(id='redacted_thinking', content='', signature=item.data, provider_name=self.system)
328
326
  )
329
327
  elif isinstance(item, BetaThinkingBlock):
330
- items.append(ThinkingPart(content=item.thinking, signature=item.signature))
328
+ items.append(ThinkingPart(content=item.thinking, signature=item.signature, provider_name=self.system))
331
329
  else:
332
330
  assert isinstance(item, BetaToolUseBlock), f'unexpected item type {type(item)}'
333
331
  items.append(
@@ -362,13 +360,13 @@ class AnthropicModel(Model):
362
360
  if isinstance(first_chunk, _utils.Unset):
363
361
  raise UnexpectedModelBehavior('Streamed response ended without content or tool calls') # pragma: no cover
364
362
 
365
- # Since Anthropic doesn't provide a timestamp in the message, we'll use the current time
366
- timestamp = datetime.now(tz=timezone.utc)
363
+ assert isinstance(first_chunk, BetaRawMessageStartEvent)
364
+
367
365
  return AnthropicStreamedResponse(
368
366
  model_request_parameters=model_request_parameters,
369
- _model_name=self._model_name,
367
+ _model_name=first_chunk.message.model,
370
368
  _response=peekable_response,
371
- _timestamp=timestamp,
369
+ _timestamp=_utils.now_utc(),
372
370
  _provider_name=self._provider.name,
373
371
  )
374
372
 
@@ -445,6 +443,7 @@ class AnthropicModel(Model):
445
443
  | BetaWebSearchToolResultBlockParam
446
444
  | BetaCodeExecutionToolResultBlockParam
447
445
  | BetaThinkingBlockParam
446
+ | BetaRedactedThinkingBlockParam
448
447
  ] = []
449
448
  for response_part in m.parts:
450
449
  if isinstance(response_part, TextPart):
@@ -459,15 +458,33 @@ class AnthropicModel(Model):
459
458
  )
460
459
  assistant_content_params.append(tool_use_block_param)
461
460
  elif isinstance(response_part, ThinkingPart):
462
- # NOTE: We only send thinking part back for Anthropic, otherwise they raise an error.
463
- if response_part.signature is not None: # pragma: no branch
461
+ if (
462
+ response_part.provider_name == self.system and response_part.signature is not None
463
+ ): # pragma: no branch
464
+ if response_part.id == 'redacted_thinking':
465
+ assistant_content_params.append(
466
+ BetaRedactedThinkingBlockParam(
467
+ data=response_part.signature,
468
+ type='redacted_thinking',
469
+ )
470
+ )
471
+ else:
472
+ assistant_content_params.append(
473
+ BetaThinkingBlockParam(
474
+ thinking=response_part.content,
475
+ signature=response_part.signature,
476
+ type='thinking',
477
+ )
478
+ )
479
+ elif response_part.content: # pragma: no branch
480
+ start_tag, end_tag = self.profile.thinking_tags
464
481
  assistant_content_params.append(
465
- BetaThinkingBlockParam(
466
- thinking=response_part.content, signature=response_part.signature, type='thinking'
482
+ BetaTextBlockParam(
483
+ text='\n'.join([start_tag, response_part.content, end_tag]), type='text'
467
484
  )
468
485
  )
469
486
  elif isinstance(response_part, BuiltinToolCallPart):
470
- if response_part.provider_name == 'anthropic':
487
+ if response_part.provider_name == self.system:
471
488
  server_tool_use_block_param = BetaServerToolUseBlockParam(
472
489
  id=_guard_tool_call_id(t=response_part),
473
490
  type='server_tool_use',
@@ -476,7 +493,7 @@ class AnthropicModel(Model):
476
493
  )
477
494
  assistant_content_params.append(server_tool_use_block_param)
478
495
  elif isinstance(response_part, BuiltinToolReturnPart):
479
- if response_part.provider_name == 'anthropic':
496
+ if response_part.provider_name == self.system:
480
497
  tool_use_id = _guard_tool_call_id(t=response_part)
481
498
  if response_part.tool_name == 'web_search_tool_result':
482
499
  server_tool_result_block_param = BetaWebSearchToolResultBlockParam(
@@ -609,15 +626,24 @@ class AnthropicStreamedResponse(StreamedResponse):
609
626
  current_block = event.content_block
610
627
  if isinstance(current_block, BetaTextBlock) and current_block.text:
611
628
  maybe_event = self._parts_manager.handle_text_delta(
612
- vendor_part_id='content', content=current_block.text
629
+ vendor_part_id=event.index, content=current_block.text
613
630
  )
614
631
  if maybe_event is not None: # pragma: no branch
615
632
  yield maybe_event
616
633
  elif isinstance(current_block, BetaThinkingBlock):
617
634
  yield self._parts_manager.handle_thinking_delta(
618
- vendor_part_id='thinking',
635
+ vendor_part_id=event.index,
619
636
  content=current_block.thinking,
620
637
  signature=current_block.signature,
638
+ provider_name=self.provider_name,
639
+ )
640
+ elif isinstance(current_block, BetaRedactedThinkingBlock):
641
+ yield self._parts_manager.handle_thinking_delta(
642
+ vendor_part_id=event.index,
643
+ id='redacted_thinking',
644
+ content='',
645
+ signature=current_block.data,
646
+ provider_name=self.provider_name,
621
647
  )
622
648
  elif isinstance(current_block, BetaToolUseBlock):
623
649
  maybe_event = self._parts_manager.handle_tool_call_delta(
@@ -634,17 +660,21 @@ class AnthropicStreamedResponse(StreamedResponse):
634
660
  elif isinstance(event, BetaRawContentBlockDeltaEvent):
635
661
  if isinstance(event.delta, BetaTextDelta):
636
662
  maybe_event = self._parts_manager.handle_text_delta(
637
- vendor_part_id='content', content=event.delta.text
663
+ vendor_part_id=event.index, content=event.delta.text
638
664
  )
639
665
  if maybe_event is not None: # pragma: no branch
640
666
  yield maybe_event
641
667
  elif isinstance(event.delta, BetaThinkingDelta):
642
668
  yield self._parts_manager.handle_thinking_delta(
643
- vendor_part_id='thinking', content=event.delta.thinking
669
+ vendor_part_id=event.index,
670
+ content=event.delta.thinking,
671
+ provider_name=self.provider_name,
644
672
  )
645
673
  elif isinstance(event.delta, BetaSignatureDelta):
646
674
  yield self._parts_manager.handle_thinking_delta(
647
- vendor_part_id='thinking', signature=event.delta.signature
675
+ vendor_part_id=event.index,
676
+ signature=event.delta.signature,
677
+ provider_name=self.provider_name,
648
678
  )
649
679
  elif (
650
680
  current_block