pydantic-ai-slim 1.0.9__tar.gz → 1.0.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/PKG-INFO +3 -3
  2. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_agent_graph.py +59 -53
  3. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/agent/__init__.py +2 -1
  4. pydantic_ai_slim-1.0.10/pydantic_ai/format_prompt.py +205 -0
  5. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/messages.py +1 -5
  6. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/google.py +27 -17
  7. pydantic_ai_slim-1.0.9/pydantic_ai/format_prompt.py +0 -113
  8. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/.gitignore +0 -0
  9. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/LICENSE +0 -0
  10. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/README.md +0 -0
  11. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/__init__.py +0 -0
  12. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/__main__.py +0 -0
  13. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_a2a.py +0 -0
  14. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_cli.py +0 -0
  15. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_function_schema.py +0 -0
  16. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_griffe.py +0 -0
  17. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_mcp.py +0 -0
  18. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_otel_messages.py +0 -0
  19. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_output.py +0 -0
  20. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_parts_manager.py +0 -0
  21. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_run_context.py +0 -0
  22. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_system_prompt.py +0 -0
  23. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_thinking_part.py +0 -0
  24. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_tool_manager.py +0 -0
  25. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/_utils.py +0 -0
  26. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/ag_ui.py +0 -0
  27. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/agent/abstract.py +0 -0
  28. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/agent/wrapper.py +0 -0
  29. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/builtin_tools.py +0 -0
  30. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/common_tools/__init__.py +0 -0
  31. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  32. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/common_tools/tavily.py +0 -0
  33. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/direct.py +0 -0
  34. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/__init__.py +0 -0
  35. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
  36. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/dbos/_agent.py +0 -0
  37. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
  38. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
  39. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
  40. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
  41. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/temporal/_agent.py +0 -0
  42. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  43. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
  44. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  45. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
  46. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  47. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
  48. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/exceptions.py +0 -0
  49. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/ext/__init__.py +0 -0
  50. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/ext/aci.py +0 -0
  51. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/ext/langchain.py +0 -0
  52. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/mcp.py +0 -0
  53. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/__init__.py +0 -0
  54. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/anthropic.py +0 -0
  55. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/bedrock.py +0 -0
  56. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/cohere.py +0 -0
  57. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/fallback.py +0 -0
  58. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/function.py +0 -0
  59. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/gemini.py +0 -0
  60. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/groq.py +0 -0
  61. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/huggingface.py +0 -0
  62. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/instrumented.py +0 -0
  63. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/mcp_sampling.py +0 -0
  64. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/mistral.py +0 -0
  65. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/openai.py +0 -0
  66. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/test.py +0 -0
  67. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/models/wrapper.py +0 -0
  68. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/output.py +0 -0
  69. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/__init__.py +0 -0
  70. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/_json_schema.py +0 -0
  71. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/amazon.py +0 -0
  72. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/anthropic.py +0 -0
  73. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/cohere.py +0 -0
  74. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/deepseek.py +0 -0
  75. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/google.py +0 -0
  76. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/grok.py +0 -0
  77. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/groq.py +0 -0
  78. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/harmony.py +0 -0
  79. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/meta.py +0 -0
  80. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/mistral.py +0 -0
  81. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/moonshotai.py +0 -0
  82. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/openai.py +0 -0
  83. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/profiles/qwen.py +0 -0
  84. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/__init__.py +0 -0
  85. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/anthropic.py +0 -0
  86. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/azure.py +0 -0
  87. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/bedrock.py +0 -0
  88. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/cerebras.py +0 -0
  89. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/cohere.py +0 -0
  90. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/deepseek.py +0 -0
  91. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/fireworks.py +0 -0
  92. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/gateway.py +0 -0
  93. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/github.py +0 -0
  94. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/google.py +0 -0
  95. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/google_gla.py +0 -0
  96. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/google_vertex.py +0 -0
  97. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/grok.py +0 -0
  98. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/groq.py +0 -0
  99. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/heroku.py +0 -0
  100. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/huggingface.py +0 -0
  101. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/litellm.py +0 -0
  102. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/mistral.py +0 -0
  103. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/moonshotai.py +0 -0
  104. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/ollama.py +0 -0
  105. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/openai.py +0 -0
  106. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/openrouter.py +0 -0
  107. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/together.py +0 -0
  108. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/providers/vercel.py +0 -0
  109. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/py.typed +0 -0
  110. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/result.py +0 -0
  111. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/retries.py +0 -0
  112. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/run.py +0 -0
  113. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/settings.py +0 -0
  114. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/tools.py +0 -0
  115. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/__init__.py +0 -0
  116. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/_dynamic.py +0 -0
  117. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/abstract.py +0 -0
  118. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/approval_required.py +0 -0
  119. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/combined.py +0 -0
  120. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/external.py +0 -0
  121. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/filtered.py +0 -0
  122. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/function.py +0 -0
  123. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/prefixed.py +0 -0
  124. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/prepared.py +0 -0
  125. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/renamed.py +0 -0
  126. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/toolsets/wrapper.py +0 -0
  127. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pydantic_ai/usage.py +0 -0
  128. {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.10}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.9
3
+ Version: 1.0.10
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.23
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.9
36
+ Requires-Dist: pydantic-graph==1.0.10
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.9; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.10; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -547,7 +547,7 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
547
547
  async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa: C901
548
548
  text = ''
549
549
  tool_calls: list[_messages.ToolCallPart] = []
550
- thinking_parts: list[_messages.ThinkingPart] = []
550
+ invisible_parts: bool = False
551
551
 
552
552
  for part in self.model_response.parts:
553
553
  if isinstance(part, _messages.TextPart):
@@ -558,11 +558,13 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
558
558
  # Text parts before a built-in tool call are essentially thoughts,
559
559
  # not part of the final result output, so we reset the accumulated text
560
560
  text = ''
561
+ invisible_parts = True
561
562
  yield _messages.BuiltinToolCallEvent(part) # pyright: ignore[reportDeprecated]
562
563
  elif isinstance(part, _messages.BuiltinToolReturnPart):
564
+ invisible_parts = True
563
565
  yield _messages.BuiltinToolResultEvent(part) # pyright: ignore[reportDeprecated]
564
566
  elif isinstance(part, _messages.ThinkingPart):
565
- thinking_parts.append(part)
567
+ invisible_parts = True
566
568
  else:
567
569
  assert_never(part)
568
570
 
@@ -570,43 +572,51 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
570
572
  # In the future, we'd consider making this configurable at the agent or run level.
571
573
  # This accounts for cases like anthropic returns that might contain a text response
572
574
  # and a tool call response, where the text response just indicates the tool call will happen.
573
- if tool_calls:
574
- async for event in self._handle_tool_calls(ctx, tool_calls):
575
- yield event
576
- elif text:
577
- # No events are emitted during the handling of text responses, so we don't need to yield anything
578
- self._next_node = await self._handle_text_response(ctx, text)
579
- elif thinking_parts:
580
- # handle thinking-only responses (responses that contain only ThinkingPart instances)
581
- # this can happen with models that support thinking mode when they don't provide
582
- # actionable output alongside their thinking content.
583
- self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
584
- _messages.ModelRequest(
585
- parts=[_messages.RetryPromptPart('Responses without text or tool calls are not permitted.')]
575
+ try:
576
+ if tool_calls:
577
+ async for event in self._handle_tool_calls(ctx, tool_calls):
578
+ yield event
579
+ elif text:
580
+ # No events are emitted during the handling of text responses, so we don't need to yield anything
581
+ self._next_node = await self._handle_text_response(ctx, text)
582
+ elif invisible_parts:
583
+ # handle responses with only thinking or built-in tool parts.
584
+ # this can happen with models that support thinking mode when they don't provide
585
+ # actionable output alongside their thinking content. so we tell the model to try again.
586
+ m = _messages.RetryPromptPart(
587
+ content='Responses without text or tool calls are not permitted.',
586
588
  )
587
- )
588
- else:
589
- # we got an empty response with no tool calls, text, or thinking
590
- # this sometimes happens with anthropic (and perhaps other models)
591
- # when the model has already returned text along side tool calls
592
- # in this scenario, if text responses are allowed, we return text from the most recent model
593
- # response, if any
594
- if isinstance(ctx.deps.output_schema, _output.TextOutputSchema):
595
- for message in reversed(ctx.state.message_history):
596
- if isinstance(message, _messages.ModelResponse):
597
- text = ''
598
- for part in message.parts:
599
- if isinstance(part, _messages.TextPart):
600
- text += part.content
601
- elif isinstance(part, _messages.BuiltinToolCallPart):
602
- # Text parts before a built-in tool call are essentially thoughts,
603
- # not part of the final result output, so we reset the accumulated text
604
- text = '' # pragma: no cover
605
- if text:
606
- self._next_node = await self._handle_text_response(ctx, text)
607
- return
608
-
609
- raise exceptions.UnexpectedModelBehavior('Received empty model response')
589
+ raise ToolRetryError(m)
590
+ else:
591
+ # we got an empty response with no tool calls, text, thinking, or built-in tool calls.
592
+ # this sometimes happens with anthropic (and perhaps other models)
593
+ # when the model has already returned text along side tool calls
594
+ # in this scenario, if text responses are allowed, we return text from the most recent model
595
+ # response, if any
596
+ if isinstance(ctx.deps.output_schema, _output.TextOutputSchema):
597
+ for message in reversed(ctx.state.message_history):
598
+ if isinstance(message, _messages.ModelResponse):
599
+ text = ''
600
+ for part in message.parts:
601
+ if isinstance(part, _messages.TextPart):
602
+ text += part.content
603
+ elif isinstance(part, _messages.BuiltinToolCallPart):
604
+ # Text parts before a built-in tool call are essentially thoughts,
605
+ # not part of the final result output, so we reset the accumulated text
606
+ text = '' # pragma: no cover
607
+ if text:
608
+ self._next_node = await self._handle_text_response(ctx, text)
609
+ return
610
+
611
+ # Go back to the model request node with an empty request, which means we'll essentially
612
+ # resubmit the most recent request that resulted in an empty response,
613
+ # as the empty response and request will not create any items in the API payload,
614
+ # in the hope the model will return a non-empty response this time.
615
+ ctx.state.increment_retries(ctx.deps.max_result_retries)
616
+ self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
617
+ except ToolRetryError as e:
618
+ ctx.state.increment_retries(ctx.deps.max_result_retries, e)
619
+ self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
610
620
 
611
621
  self._events_iterator = _run_stream()
612
622
 
@@ -666,23 +676,19 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
666
676
  text: str,
667
677
  ) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
668
678
  output_schema = ctx.deps.output_schema
669
- try:
670
- run_context = build_run_context(ctx)
671
- if isinstance(output_schema, _output.TextOutputSchema):
672
- result_data = await output_schema.process(text, run_context)
673
- else:
674
- m = _messages.RetryPromptPart(
675
- content='Plain text responses are not permitted, please include your response in a tool call',
676
- )
677
- raise ToolRetryError(m)
679
+ run_context = build_run_context(ctx)
678
680
 
679
- for validator in ctx.deps.output_validators:
680
- result_data = await validator.validate(result_data, run_context)
681
- except ToolRetryError as e:
682
- ctx.state.increment_retries(ctx.deps.max_result_retries, e)
683
- return ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
681
+ if isinstance(output_schema, _output.TextOutputSchema):
682
+ result_data = await output_schema.process(text, run_context)
684
683
  else:
685
- return self._handle_final_result(ctx, result.FinalResult(result_data), [])
684
+ m = _messages.RetryPromptPart(
685
+ content='Plain text responses are not permitted, please include your response in a tool call',
686
+ )
687
+ raise ToolRetryError(m)
688
+
689
+ for validator in ctx.deps.output_validators:
690
+ result_data = await validator.validate(result_data, run_context)
691
+ return self._handle_final_result(ctx, result.FinalResult(result_data), [])
686
692
 
687
693
  __repr__ = dataclasses_no_defaults_repr
688
694
 
@@ -259,7 +259,8 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
259
259
  name: The name of the agent, used for logging. If `None`, we try to infer the agent name from the call frame
260
260
  when the agent is first run.
261
261
  model_settings: Optional model request settings to use for this agent's runs, by default.
262
- retries: The default number of retries to allow before raising an error.
262
+ retries: The default number of retries to allow for tool calls and output validation, before raising an error.
263
+ For model request retries, see the [HTTP Request Retries](../retries.md) documentation.
263
264
  output_retries: The maximum number of retries to allow for output validation, defaults to `retries`.
264
265
  tools: Tools to register with the agent, you can also register tools via the decorators
265
266
  [`@agent.tool`][pydantic_ai.Agent.tool] and [`@agent.tool_plain`][pydantic_ai.Agent.tool_plain].
@@ -0,0 +1,205 @@
1
+ from __future__ import annotations as _annotations
2
+
3
+ from collections.abc import Iterable, Iterator, Mapping
4
+ from dataclasses import asdict, dataclass, field, fields, is_dataclass
5
+ from datetime import date
6
+ from typing import Any, Literal
7
+ from xml.etree import ElementTree
8
+
9
+ from pydantic import BaseModel
10
+
11
+ __all__ = ('format_as_xml',)
12
+
13
+ from pydantic.fields import ComputedFieldInfo, FieldInfo
14
+
15
+
16
+ def format_as_xml(
17
+ obj: Any,
18
+ root_tag: str | None = None,
19
+ item_tag: str = 'item',
20
+ none_str: str = 'null',
21
+ indent: str | None = ' ',
22
+ include_field_info: Literal['once'] | bool = False,
23
+ ) -> str:
24
+ """Format a Python object as XML.
25
+
26
+ This is useful since LLMs often find it easier to read semi-structured data (e.g. examples) as XML,
27
+ rather than JSON etc.
28
+
29
+ Supports: `str`, `bytes`, `bytearray`, `bool`, `int`, `float`, `date`, `datetime`, `Mapping`,
30
+ `Iterable`, `dataclass`, and `BaseModel`.
31
+
32
+ Args:
33
+ obj: Python Object to serialize to XML.
34
+ root_tag: Outer tag to wrap the XML in, use `None` to omit the outer tag.
35
+ item_tag: Tag to use for each item in an iterable (e.g. list), this is overridden by the class name
36
+ for dataclasses and Pydantic models.
37
+ none_str: String to use for `None` values.
38
+ indent: Indentation string to use for pretty printing.
39
+ include_field_info: Whether to include attributes like Pydantic `Field` attributes and dataclasses `field()`
40
+ `metadata` as XML attributes. In both cases the allowed `Field` attributes and `field()` metadata keys are
41
+ `title` and `description`. If a field is repeated in the data (e.g. in a list) by setting `once`
42
+ the attributes are included only in the first occurrence of an XML element relative to the same field.
43
+
44
+ Returns:
45
+ XML representation of the object.
46
+
47
+ Example:
48
+ ```python {title="format_as_xml_example.py" lint="skip"}
49
+ from pydantic_ai import format_as_xml
50
+
51
+ print(format_as_xml({'name': 'John', 'height': 6, 'weight': 200}, root_tag='user'))
52
+ '''
53
+ <user>
54
+ <name>John</name>
55
+ <height>6</height>
56
+ <weight>200</weight>
57
+ </user>
58
+ '''
59
+ ```
60
+ """
61
+ el = _ToXml(
62
+ data=obj,
63
+ item_tag=item_tag,
64
+ none_str=none_str,
65
+ include_field_info=include_field_info,
66
+ ).to_xml(root_tag)
67
+ if root_tag is None and el.text is None:
68
+ join = '' if indent is None else '\n'
69
+ return join.join(_rootless_xml_elements(el, indent))
70
+ else:
71
+ if indent is not None:
72
+ ElementTree.indent(el, space=indent)
73
+ return ElementTree.tostring(el, encoding='unicode')
74
+
75
+
76
+ @dataclass
77
+ class _ToXml:
78
+ data: Any
79
+ item_tag: str
80
+ none_str: str
81
+ include_field_info: Literal['once'] | bool
82
+ # a map of Pydantic and dataclasses Field paths to their metadata:
83
+ # a field unique string representation and its class
84
+ _fields_info: dict[str, tuple[str, FieldInfo | ComputedFieldInfo]] = field(default_factory=dict)
85
+ # keep track of fields we have extracted attributes from
86
+ _included_fields: set[str] = field(default_factory=set)
87
+ # keep track of class names for dataclasses and Pydantic models, that occur in lists
88
+ _element_names: dict[str, str] = field(default_factory=dict)
89
+ # flag for parsing dataclasses and Pydantic models once
90
+ _is_info_extracted: bool = False
91
+ _FIELD_ATTRIBUTES = ('title', 'description')
92
+
93
+ def to_xml(self, tag: str | None = None) -> ElementTree.Element:
94
+ return self._to_xml(value=self.data, path='', tag=tag)
95
+
96
+ def _to_xml(self, value: Any, path: str, tag: str | None = None) -> ElementTree.Element:
97
+ element = self._create_element(self.item_tag if tag is None else tag, path)
98
+ if value is None:
99
+ element.text = self.none_str
100
+ elif isinstance(value, str):
101
+ element.text = value
102
+ elif isinstance(value, bytes | bytearray):
103
+ element.text = value.decode(errors='ignore')
104
+ elif isinstance(value, bool | int | float):
105
+ element.text = str(value)
106
+ elif isinstance(value, date):
107
+ element.text = value.isoformat()
108
+ elif isinstance(value, Mapping):
109
+ if tag is None and path in self._element_names:
110
+ element.tag = self._element_names[path]
111
+ self._mapping_to_xml(element, value, path) # pyright: ignore[reportUnknownArgumentType]
112
+ elif is_dataclass(value) and not isinstance(value, type):
113
+ self._init_structure_info()
114
+ if tag is None:
115
+ element.tag = value.__class__.__name__
116
+ self._mapping_to_xml(element, asdict(value), path)
117
+ elif isinstance(value, BaseModel):
118
+ self._init_structure_info()
119
+ if tag is None:
120
+ element.tag = value.__class__.__name__
121
+ # by dumping the model we loose all metadata in nested data structures,
122
+ # but we have collected it when called _init_structure_info
123
+ self._mapping_to_xml(element, value.model_dump(), path)
124
+ elif isinstance(value, Iterable):
125
+ for n, item in enumerate(value): # pyright: ignore[reportUnknownVariableType,reportUnknownArgumentType]
126
+ element.append(self._to_xml(value=item, path=f'{path}.[{n}]' if path else f'[{n}]'))
127
+ else:
128
+ raise TypeError(f'Unsupported type for XML formatting: {type(value)}')
129
+ return element
130
+
131
+ def _create_element(self, tag: str, path: str) -> ElementTree.Element:
132
+ element = ElementTree.Element(tag)
133
+ if path in self._fields_info:
134
+ field_repr, field_info = self._fields_info[path]
135
+ if self.include_field_info and self.include_field_info != 'once' or field_repr not in self._included_fields:
136
+ field_attributes = self._extract_attributes(field_info)
137
+ for k, v in field_attributes.items():
138
+ element.set(k, v)
139
+ self._included_fields.add(field_repr)
140
+ return element
141
+
142
+ def _init_structure_info(self):
143
+ """Create maps with all data information (fields info and class names), if not already created."""
144
+ if not self._is_info_extracted:
145
+ self._parse_data_structures(self.data)
146
+ self._is_info_extracted = True
147
+
148
+ def _mapping_to_xml(
149
+ self,
150
+ element: ElementTree.Element,
151
+ mapping: Mapping[Any, Any],
152
+ path: str = '',
153
+ ) -> None:
154
+ for key, value in mapping.items():
155
+ if isinstance(key, int):
156
+ key = str(key)
157
+ elif not isinstance(key, str):
158
+ raise TypeError(f'Unsupported key type for XML formatting: {type(key)}, only str and int are allowed')
159
+ element.append(self._to_xml(value=value, path=f'{path}.{key}' if path else key, tag=key))
160
+
161
+ def _parse_data_structures(
162
+ self,
163
+ value: Any,
164
+ path: str = '',
165
+ ):
166
+ """Parse data structures as dataclasses or Pydantic models to extract element names and attributes."""
167
+ if value is None or isinstance(value, (str | int | float | date | bytearray | bytes | bool)):
168
+ return
169
+ elif isinstance(value, Mapping):
170
+ for k, v in value.items(): # pyright: ignore[reportUnknownVariableType]
171
+ self._parse_data_structures(v, f'{path}.{k}' if path else f'{k}')
172
+ elif is_dataclass(value) and not isinstance(value, type):
173
+ self._element_names[path] = value.__class__.__name__
174
+ for field in fields(value):
175
+ new_path = f'{path}.{field.name}' if path else field.name
176
+ if self.include_field_info and field.metadata:
177
+ attributes = {k: v for k, v in field.metadata.items() if k in self._FIELD_ATTRIBUTES}
178
+ if attributes:
179
+ field_repr = f'{value.__class__.__name__}.{field.name}'
180
+ self._fields_info[new_path] = (field_repr, FieldInfo(**attributes))
181
+ self._parse_data_structures(getattr(value, field.name), new_path)
182
+ elif isinstance(value, BaseModel):
183
+ self._element_names[path] = value.__class__.__name__
184
+ for model_fields in (value.__class__.model_fields, value.__class__.model_computed_fields):
185
+ for field, info in model_fields.items():
186
+ new_path = f'{path}.{field}' if path else field
187
+ if self.include_field_info and (isinstance(info, ComputedFieldInfo) or not info.exclude):
188
+ field_repr = f'{value.__class__.__name__}.{field}'
189
+ self._fields_info[new_path] = (field_repr, info)
190
+ self._parse_data_structures(getattr(value, field), new_path)
191
+ elif isinstance(value, Iterable):
192
+ for n, item in enumerate(value): # pyright: ignore[reportUnknownVariableType,reportUnknownArgumentType]
193
+ new_path = f'{path}.[{n}]' if path else f'[{n}]'
194
+ self._parse_data_structures(item, new_path)
195
+
196
+ @classmethod
197
+ def _extract_attributes(cls, info: FieldInfo | ComputedFieldInfo) -> dict[str, str]:
198
+ return {attr: str(value) for attr in cls._FIELD_ATTRIBUTES if (value := getattr(info, attr, None)) is not None}
199
+
200
+
201
+ def _rootless_xml_elements(root: ElementTree.Element, indent: str | None) -> Iterator[str]:
202
+ for sub_element in root:
203
+ if indent is not None:
204
+ ElementTree.indent(sub_element, space=indent)
205
+ yield ElementTree.tostring(sub_element, encoding='unicode')
@@ -1161,11 +1161,7 @@ class ModelResponse:
1161
1161
  if settings.include_content and part.content is not None: # pragma: no branch
1162
1162
  from .models.instrumented import InstrumentedModel
1163
1163
 
1164
- return_part['result'] = (
1165
- part.content
1166
- if isinstance(part.content, str)
1167
- else {k: InstrumentedModel.serialize_any(v) for k, v in part.content.items()}
1168
- )
1164
+ return_part['result'] = InstrumentedModel.serialize_any(part.content)
1169
1165
 
1170
1166
  parts.append(return_part)
1171
1167
  return parts
@@ -51,6 +51,7 @@ from . import (
51
51
  try:
52
52
  from google.genai import Client
53
53
  from google.genai.types import (
54
+ BlobDict,
54
55
  CodeExecutionResult,
55
56
  CodeExecutionResultDict,
56
57
  ContentDict,
@@ -58,6 +59,7 @@ try:
58
59
  CountTokensConfigDict,
59
60
  ExecutableCode,
60
61
  ExecutableCodeDict,
62
+ FileDataDict,
61
63
  FinishReason as GoogleFinishReason,
62
64
  FunctionCallDict,
63
65
  FunctionCallingConfigDict,
@@ -79,6 +81,7 @@ try:
79
81
  ToolDict,
80
82
  ToolListUnionDict,
81
83
  UrlContextDict,
84
+ VideoMetadataDict,
82
85
  )
83
86
 
84
87
  from ..providers.google import GoogleProvider
@@ -525,17 +528,17 @@ class GoogleModel(Model):
525
528
  if isinstance(item, str):
526
529
  content.append({'text': item})
527
530
  elif isinstance(item, BinaryContent):
528
- # NOTE: The type from Google GenAI is incorrect, it should be `str`, not `bytes`.
529
- base64_encoded = base64.b64encode(item.data).decode('utf-8')
530
- inline_data_dict = {'inline_data': {'data': base64_encoded, 'mime_type': item.media_type}}
531
+ inline_data_dict: BlobDict = {'data': item.data, 'mime_type': item.media_type}
532
+ part_dict: PartDict = {'inline_data': inline_data_dict}
531
533
  if item.vendor_metadata:
532
- inline_data_dict['video_metadata'] = item.vendor_metadata
533
- content.append(inline_data_dict) # type: ignore
534
+ part_dict['video_metadata'] = cast(VideoMetadataDict, item.vendor_metadata)
535
+ content.append(part_dict)
534
536
  elif isinstance(item, VideoUrl) and item.is_youtube:
535
- file_data_dict = {'file_data': {'file_uri': item.url, 'mime_type': item.media_type}}
537
+ file_data_dict: FileDataDict = {'file_uri': item.url, 'mime_type': item.media_type}
538
+ part_dict: PartDict = {'file_data': file_data_dict}
536
539
  if item.vendor_metadata: # pragma: no branch
537
- file_data_dict['video_metadata'] = item.vendor_metadata
538
- content.append(file_data_dict) # type: ignore
540
+ part_dict['video_metadata'] = cast(VideoMetadataDict, item.vendor_metadata)
541
+ content.append(part_dict)
539
542
  elif isinstance(item, FileUrl):
540
543
  if item.force_download or (
541
544
  # google-gla does not support passing file urls directly, except for youtube videos
@@ -543,13 +546,15 @@ class GoogleModel(Model):
543
546
  self.system == 'google-gla'
544
547
  and not item.url.startswith(r'https://generativelanguage.googleapis.com/v1beta/files')
545
548
  ):
546
- downloaded_item = await download_item(item, data_format='base64')
547
- inline_data = {'data': downloaded_item['data'], 'mime_type': downloaded_item['data_type']}
548
- content.append({'inline_data': inline_data}) # type: ignore
549
+ downloaded_item = await download_item(item, data_format='bytes')
550
+ inline_data: BlobDict = {
551
+ 'data': downloaded_item['data'],
552
+ 'mime_type': downloaded_item['data_type'],
553
+ }
554
+ content.append({'inline_data': inline_data})
549
555
  else:
550
- content.append(
551
- {'file_data': {'file_uri': item.url, 'mime_type': item.media_type}}
552
- ) # pragma: lax no cover
556
+ file_data_dict: FileDataDict = {'file_uri': item.url, 'mime_type': item.media_type}
557
+ content.append({'file_data': file_data_dict}) # pragma: lax no cover
553
558
  else:
554
559
  assert_never(item)
555
560
  return content
@@ -578,7 +583,9 @@ class GeminiStreamedResponse(StreamedResponse):
578
583
  async for chunk in self._response:
579
584
  self._usage = _metadata_as_usage(chunk)
580
585
 
581
- assert chunk.candidates is not None
586
+ if not chunk.candidates:
587
+ continue # pragma: no cover
588
+
582
589
  candidate = chunk.candidates[0]
583
590
 
584
591
  if chunk.response_id: # pragma: no branch
@@ -610,7 +617,10 @@ class GeminiStreamedResponse(StreamedResponse):
610
617
  else: # pragma: no cover
611
618
  raise UnexpectedModelBehavior('Content field missing from streaming Gemini response', str(chunk))
612
619
 
613
- parts = candidate.content.parts or []
620
+ parts = candidate.content.parts
621
+ if not parts:
622
+ continue # pragma: no cover
623
+
614
624
  for part in parts:
615
625
  if part.thought_signature:
616
626
  signature = base64.b64encode(part.thought_signature).decode('utf-8')
@@ -822,7 +832,7 @@ def _metadata_as_usage(response: GenerateContentResponse) -> usage.RequestUsage:
822
832
  if not metadata_details:
823
833
  continue
824
834
  for detail in metadata_details:
825
- if not detail.modality or not detail.token_count: # pragma: no cover
835
+ if not detail.modality or not detail.token_count:
826
836
  continue
827
837
  details[f'{detail.modality.lower()}_{prefix}_tokens'] = detail.token_count
828
838
  if detail.modality != 'AUDIO':
@@ -1,113 +0,0 @@
1
- from __future__ import annotations as _annotations
2
-
3
- from collections.abc import Iterable, Iterator, Mapping
4
- from dataclasses import asdict, dataclass, is_dataclass
5
- from datetime import date
6
- from typing import Any
7
- from xml.etree import ElementTree
8
-
9
- from pydantic import BaseModel
10
-
11
- __all__ = ('format_as_xml',)
12
-
13
-
14
- def format_as_xml(
15
- obj: Any,
16
- root_tag: str | None = None,
17
- item_tag: str = 'item',
18
- none_str: str = 'null',
19
- indent: str | None = ' ',
20
- ) -> str:
21
- """Format a Python object as XML.
22
-
23
- This is useful since LLMs often find it easier to read semi-structured data (e.g. examples) as XML,
24
- rather than JSON etc.
25
-
26
- Supports: `str`, `bytes`, `bytearray`, `bool`, `int`, `float`, `date`, `datetime`, `Mapping`,
27
- `Iterable`, `dataclass`, and `BaseModel`.
28
-
29
- Args:
30
- obj: Python Object to serialize to XML.
31
- root_tag: Outer tag to wrap the XML in, use `None` to omit the outer tag.
32
- item_tag: Tag to use for each item in an iterable (e.g. list), this is overridden by the class name
33
- for dataclasses and Pydantic models.
34
- none_str: String to use for `None` values.
35
- indent: Indentation string to use for pretty printing.
36
-
37
- Returns:
38
- XML representation of the object.
39
-
40
- Example:
41
- ```python {title="format_as_xml_example.py" lint="skip"}
42
- from pydantic_ai import format_as_xml
43
-
44
- print(format_as_xml({'name': 'John', 'height': 6, 'weight': 200}, root_tag='user'))
45
- '''
46
- <user>
47
- <name>John</name>
48
- <height>6</height>
49
- <weight>200</weight>
50
- </user>
51
- '''
52
- ```
53
- """
54
- el = _ToXml(item_tag=item_tag, none_str=none_str).to_xml(obj, root_tag)
55
- if root_tag is None and el.text is None:
56
- join = '' if indent is None else '\n'
57
- return join.join(_rootless_xml_elements(el, indent))
58
- else:
59
- if indent is not None:
60
- ElementTree.indent(el, space=indent)
61
- return ElementTree.tostring(el, encoding='unicode')
62
-
63
-
64
- @dataclass
65
- class _ToXml:
66
- item_tag: str
67
- none_str: str
68
-
69
- def to_xml(self, value: Any, tag: str | None) -> ElementTree.Element:
70
- element = ElementTree.Element(self.item_tag if tag is None else tag)
71
- if value is None:
72
- element.text = self.none_str
73
- elif isinstance(value, str):
74
- element.text = value
75
- elif isinstance(value, bytes | bytearray):
76
- element.text = value.decode(errors='ignore')
77
- elif isinstance(value, bool | int | float):
78
- element.text = str(value)
79
- elif isinstance(value, date):
80
- element.text = value.isoformat()
81
- elif isinstance(value, Mapping):
82
- self._mapping_to_xml(element, value) # pyright: ignore[reportUnknownArgumentType]
83
- elif is_dataclass(value) and not isinstance(value, type):
84
- if tag is None:
85
- element = ElementTree.Element(value.__class__.__name__)
86
- dc_dict = asdict(value)
87
- self._mapping_to_xml(element, dc_dict)
88
- elif isinstance(value, BaseModel):
89
- if tag is None:
90
- element = ElementTree.Element(value.__class__.__name__)
91
- self._mapping_to_xml(element, value.model_dump(mode='python'))
92
- elif isinstance(value, Iterable):
93
- for item in value: # pyright: ignore[reportUnknownVariableType]
94
- item_el = self.to_xml(item, None)
95
- element.append(item_el)
96
- else:
97
- raise TypeError(f'Unsupported type for XML formatting: {type(value)}')
98
- return element
99
-
100
- def _mapping_to_xml(self, element: ElementTree.Element, mapping: Mapping[Any, Any]) -> None:
101
- for key, value in mapping.items():
102
- if isinstance(key, int):
103
- key = str(key)
104
- elif not isinstance(key, str):
105
- raise TypeError(f'Unsupported key type for XML formatting: {type(key)}, only str and int are allowed')
106
- element.append(self.to_xml(value, key))
107
-
108
-
109
- def _rootless_xml_elements(root: ElementTree.Element, indent: str | None) -> Iterator[str]:
110
- for sub_element in root:
111
- if indent is not None:
112
- ElementTree.indent(sub_element, space=indent)
113
- yield ElementTree.tostring(sub_element, encoding='unicode')