pydantic-ai-slim 0.7.2__tar.gz → 0.7.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (117) hide show
  1. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/PKG-INFO +10 -4
  2. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/__init__.py +2 -1
  3. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_agent_graph.py +2 -2
  4. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_cli.py +18 -3
  5. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_run_context.py +2 -2
  6. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/ag_ui.py +4 -4
  7. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/agent/__init__.py +7 -9
  8. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/agent/abstract.py +16 -18
  9. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/agent/wrapper.py +4 -6
  10. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/builtin_tools.py +9 -1
  11. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/direct.py +4 -4
  12. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/durable_exec/temporal/_agent.py +13 -15
  13. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/durable_exec/temporal/_model.py +2 -2
  14. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/messages.py +16 -6
  15. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/__init__.py +5 -5
  16. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/anthropic.py +27 -26
  17. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/bedrock.py +24 -26
  18. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/cohere.py +20 -25
  19. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/fallback.py +15 -15
  20. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/function.py +7 -9
  21. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/gemini.py +43 -39
  22. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/google.py +76 -50
  23. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/groq.py +22 -19
  24. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/huggingface.py +18 -21
  25. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/instrumented.py +4 -4
  26. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/mcp_sampling.py +1 -2
  27. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/mistral.py +24 -22
  28. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/openai.py +98 -44
  29. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/test.py +4 -5
  30. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/openai.py +13 -3
  31. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/openai.py +1 -1
  32. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/result.py +5 -5
  33. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/run.py +4 -11
  34. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/tools.py +5 -2
  35. pydantic_ai_slim-0.7.4/pydantic_ai/usage.py +315 -0
  36. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pyproject.toml +10 -3
  37. pydantic_ai_slim-0.7.2/pydantic_ai/usage.py +0 -153
  38. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/.gitignore +0 -0
  39. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/LICENSE +0 -0
  40. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/README.md +0 -0
  41. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/__main__.py +0 -0
  42. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_a2a.py +0 -0
  43. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_function_schema.py +0 -0
  44. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_griffe.py +0 -0
  45. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_mcp.py +0 -0
  46. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_output.py +0 -0
  47. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_parts_manager.py +0 -0
  48. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_system_prompt.py +0 -0
  49. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_thinking_part.py +0 -0
  50. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_tool_manager.py +0 -0
  51. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/_utils.py +0 -0
  52. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/common_tools/__init__.py +0 -0
  53. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  54. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/common_tools/tavily.py +0 -0
  55. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/durable_exec/__init__.py +0 -0
  56. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
  57. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  58. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
  59. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  60. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  61. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
  62. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/exceptions.py +0 -0
  63. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/ext/__init__.py +0 -0
  64. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/ext/aci.py +0 -0
  65. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/ext/langchain.py +0 -0
  66. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/format_prompt.py +0 -0
  67. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/mcp.py +0 -0
  68. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/models/wrapper.py +0 -0
  69. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/output.py +0 -0
  70. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/__init__.py +0 -0
  71. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/_json_schema.py +0 -0
  72. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/amazon.py +0 -0
  73. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/anthropic.py +0 -0
  74. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/cohere.py +0 -0
  75. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/deepseek.py +0 -0
  76. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/google.py +0 -0
  77. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/grok.py +0 -0
  78. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/groq.py +0 -0
  79. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/meta.py +0 -0
  80. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/mistral.py +0 -0
  81. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/moonshotai.py +0 -0
  82. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/profiles/qwen.py +0 -0
  83. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/__init__.py +0 -0
  84. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/anthropic.py +0 -0
  85. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/azure.py +0 -0
  86. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/bedrock.py +0 -0
  87. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/cohere.py +0 -0
  88. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/deepseek.py +0 -0
  89. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/fireworks.py +0 -0
  90. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/github.py +0 -0
  91. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/google.py +0 -0
  92. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/google_gla.py +0 -0
  93. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/google_vertex.py +0 -0
  94. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/grok.py +0 -0
  95. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/groq.py +0 -0
  96. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/heroku.py +0 -0
  97. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/huggingface.py +0 -0
  98. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/mistral.py +0 -0
  99. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/moonshotai.py +0 -0
  100. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/ollama.py +0 -0
  101. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/openrouter.py +0 -0
  102. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/together.py +0 -0
  103. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/providers/vercel.py +0 -0
  104. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/py.typed +0 -0
  105. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/retries.py +0 -0
  106. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/settings.py +0 -0
  107. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/toolsets/__init__.py +0 -0
  108. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/toolsets/_dynamic.py +0 -0
  109. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/toolsets/abstract.py +0 -0
  110. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/toolsets/combined.py +0 -0
  111. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/toolsets/deferred.py +0 -0
  112. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/toolsets/filtered.py +0 -0
  113. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/toolsets/function.py +0 -0
  114. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/toolsets/prefixed.py +0 -0
  115. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/toolsets/prepared.py +0 -0
  116. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/toolsets/renamed.py +0 -0
  117. {pydantic_ai_slim-0.7.2 → pydantic_ai_slim-0.7.4}/pydantic_ai/toolsets/wrapper.py +0 -0
@@ -1,7 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.7.2
3
+ Version: 0.7.4
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
+ Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
+ Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
7
+ Project-URL: Documentation, https://ai.pydantic.dev/install/#slim-install
8
+ Project-URL: Changelog, https://github.com/pydantic/pydantic-ai/releases
5
9
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>, Douwe Maan <douwe@pydantic.dev>
6
10
  License-Expression: MIT
7
11
  License-File: LICENSE
@@ -27,10 +31,11 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
27
31
  Requires-Python: >=3.9
28
32
  Requires-Dist: eval-type-backport>=0.2.0
29
33
  Requires-Dist: exceptiongroup; python_version < '3.11'
34
+ Requires-Dist: genai-prices>=0.0.22
30
35
  Requires-Dist: griffe>=1.3.2
31
36
  Requires-Dist: httpx>=0.27
32
37
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.7.2
38
+ Requires-Dist: pydantic-graph==0.7.4
34
39
  Requires-Dist: pydantic>=2.10
35
40
  Requires-Dist: typing-inspection>=0.4.0
36
41
  Provides-Extra: a2a
@@ -45,13 +50,14 @@ Requires-Dist: boto3>=1.39.0; extra == 'bedrock'
45
50
  Provides-Extra: cli
46
51
  Requires-Dist: argcomplete>=3.5.0; extra == 'cli'
47
52
  Requires-Dist: prompt-toolkit>=3; extra == 'cli'
53
+ Requires-Dist: pyperclip>=1.9.0; extra == 'cli'
48
54
  Requires-Dist: rich>=13; extra == 'cli'
49
55
  Provides-Extra: cohere
50
56
  Requires-Dist: cohere>=5.16.0; (platform_system != 'Emscripten') and extra == 'cohere'
51
57
  Provides-Extra: duckduckgo
52
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
53
59
  Provides-Extra: evals
54
- Requires-Dist: pydantic-evals==0.7.2; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==0.7.4; extra == 'evals'
55
61
  Provides-Extra: google
56
62
  Requires-Dist: google-genai>=1.28.0; extra == 'google'
57
63
  Provides-Extra: groq
@@ -71,7 +77,7 @@ Requires-Dist: tenacity>=8.2.3; extra == 'retries'
71
77
  Provides-Extra: tavily
72
78
  Requires-Dist: tavily-python>=0.5.0; extra == 'tavily'
73
79
  Provides-Extra: temporal
74
- Requires-Dist: temporalio>=1.15.0; extra == 'temporal'
80
+ Requires-Dist: temporalio==1.15.0; extra == 'temporal'
75
81
  Provides-Extra: vertexai
76
82
  Requires-Dist: google-auth>=2.36.0; extra == 'vertexai'
77
83
  Requires-Dist: requests>=2.32.2; extra == 'vertexai'
@@ -1,7 +1,7 @@
1
1
  from importlib.metadata import version as _metadata_version
2
2
 
3
3
  from .agent import Agent, CallToolsNode, EndStrategy, ModelRequestNode, UserPromptNode, capture_run_messages
4
- from .builtin_tools import CodeExecutionTool, WebSearchTool, WebSearchUserLocation
4
+ from .builtin_tools import CodeExecutionTool, UrlContextTool, WebSearchTool, WebSearchUserLocation
5
5
  from .exceptions import (
6
6
  AgentRunError,
7
7
  FallbackExceptionGroup,
@@ -45,6 +45,7 @@ __all__ = (
45
45
  # builtin_tools
46
46
  'WebSearchTool',
47
47
  'WebSearchUserLocation',
48
+ 'UrlContextTool',
48
49
  'CodeExecutionTool',
49
50
  # output
50
51
  'ToolOutput',
@@ -76,7 +76,7 @@ class GraphAgentState:
76
76
  """State kept across the execution of the agent graph."""
77
77
 
78
78
  message_history: list[_messages.ModelMessage]
79
- usage: _usage.Usage
79
+ usage: _usage.RunUsage
80
80
  retries: int
81
81
  run_step: int
82
82
 
@@ -337,7 +337,7 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
337
337
 
338
338
  model_settings, model_request_parameters, message_history, _ = await self._prepare_request(ctx)
339
339
  model_response = await ctx.deps.model.request(message_history, model_settings, model_request_parameters)
340
- ctx.state.usage.incr(_usage.Usage())
340
+ ctx.state.usage.requests += 1
341
341
 
342
342
  return self._finish_handling(ctx, model_response)
343
343
 
@@ -18,12 +18,13 @@ from . import __version__
18
18
  from ._run_context import AgentDepsT
19
19
  from .agent import AbstractAgent, Agent
20
20
  from .exceptions import UserError
21
- from .messages import ModelMessage
21
+ from .messages import ModelMessage, TextPart
22
22
  from .models import KnownModelName, infer_model
23
23
  from .output import OutputDataT
24
24
 
25
25
  try:
26
26
  import argcomplete
27
+ import pyperclip
27
28
  from prompt_toolkit import PromptSession
28
29
  from prompt_toolkit.auto_suggest import AutoSuggestFromHistory, Suggestion
29
30
  from prompt_toolkit.buffer import Buffer
@@ -38,7 +39,7 @@ try:
38
39
  from rich.text import Text
39
40
  except ImportError as _import_error:
40
41
  raise ImportError(
41
- 'Please install `rich`, `prompt-toolkit` and `argcomplete` to use the Pydantic AI CLI, '
42
+ 'Please install `rich`, `prompt-toolkit`, `pyperclip` and `argcomplete` to use the Pydantic AI CLI, '
42
43
  'you can use the `cli` optional group — `pip install "pydantic-ai-slim[cli]"`'
43
44
  ) from _import_error
44
45
 
@@ -114,6 +115,7 @@ Special prompts:
114
115
  * `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)
115
116
  * `/markdown` - show the last markdown output of the last question
116
117
  * `/multiline` - toggle multiline mode
118
+ * `/cp` - copy the last response to clipboard
117
119
  """,
118
120
  formatter_class=argparse.RawTextHelpFormatter,
119
121
  )
@@ -237,7 +239,7 @@ async def run_chat(
237
239
 
238
240
  while True:
239
241
  try:
240
- auto_suggest = CustomAutoSuggest(['/markdown', '/multiline', '/exit'])
242
+ auto_suggest = CustomAutoSuggest(['/markdown', '/multiline', '/exit', '/cp'])
241
243
  text = await session.prompt_async(f'{prog_name} ➤ ', auto_suggest=auto_suggest, multiline=multiline)
242
244
  except (KeyboardInterrupt, EOFError): # pragma: no cover
243
245
  return 0
@@ -347,6 +349,19 @@ def handle_slash_command(
347
349
  elif ident_prompt == '/exit':
348
350
  console.print('[dim]Exiting…[/dim]')
349
351
  return 0, multiline
352
+ elif ident_prompt == '/cp':
353
+ try:
354
+ parts = messages[-1].parts
355
+ except IndexError:
356
+ console.print('[dim]No output available to copy.[/dim]')
357
+ else:
358
+ text_to_copy = '\n\n'.join(part.content for part in parts if isinstance(part, TextPart))
359
+ text_to_copy = text_to_copy.strip()
360
+ if text_to_copy:
361
+ pyperclip.copy(text_to_copy)
362
+ console.print('[dim]Copied last output to clipboard.[/dim]')
363
+ else:
364
+ console.print('[dim]No text content to copy.[/dim]')
350
365
  else:
351
366
  console.print(f'[red]Unknown command[/red] [magenta]`{ident_prompt}`[/magenta]')
352
367
  return None, multiline
@@ -12,7 +12,7 @@ from . import _utils, messages as _messages
12
12
 
13
13
  if TYPE_CHECKING:
14
14
  from .models import Model
15
- from .result import Usage
15
+ from .result import RunUsage
16
16
 
17
17
  AgentDepsT = TypeVar('AgentDepsT', default=None, contravariant=True)
18
18
  """Type variable for agent dependencies."""
@@ -26,7 +26,7 @@ class RunContext(Generic[AgentDepsT]):
26
26
  """Dependencies for the agent."""
27
27
  model: Model
28
28
  """The model used in this run."""
29
- usage: Usage
29
+ usage: RunUsage
30
30
  """LLM usage associated with the run."""
31
31
  prompt: str | Sequence[_messages.UserContent] | None = None
32
32
  """The original user prompt passed to the run."""
@@ -51,7 +51,7 @@ from .settings import ModelSettings
51
51
  from .tools import AgentDepsT, ToolDefinition
52
52
  from .toolsets import AbstractToolset
53
53
  from .toolsets.deferred import DeferredToolset
54
- from .usage import Usage, UsageLimits
54
+ from .usage import RunUsage, UsageLimits
55
55
 
56
56
  try:
57
57
  from ag_ui.core import (
@@ -127,7 +127,7 @@ class AGUIApp(Generic[AgentDepsT, OutputDataT], Starlette):
127
127
  deps: AgentDepsT = None,
128
128
  model_settings: ModelSettings | None = None,
129
129
  usage_limits: UsageLimits | None = None,
130
- usage: Usage | None = None,
130
+ usage: RunUsage | None = None,
131
131
  infer_name: bool = True,
132
132
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
133
133
  # Starlette parameters.
@@ -216,7 +216,7 @@ async def handle_ag_ui_request(
216
216
  deps: AgentDepsT = None,
217
217
  model_settings: ModelSettings | None = None,
218
218
  usage_limits: UsageLimits | None = None,
219
- usage: Usage | None = None,
219
+ usage: RunUsage | None = None,
220
220
  infer_name: bool = True,
221
221
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
222
222
  ) -> Response:
@@ -277,7 +277,7 @@ async def run_ag_ui(
277
277
  deps: AgentDepsT = None,
278
278
  model_settings: ModelSettings | None = None,
279
279
  usage_limits: UsageLimits | None = None,
280
- usage: Usage | None = None,
280
+ usage: RunUsage | None = None,
281
281
  infer_name: bool = True,
282
282
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
283
283
  ) -> AsyncIterator[str]:
@@ -243,7 +243,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
243
243
  """Create an agent.
244
244
 
245
245
  Args:
246
- model: The default model to use for this agent, if not provide,
246
+ model: The default model to use for this agent, if not provided,
247
247
  you must provide the model when calling it. We allow `str` here since the actual list of allowed models changes frequently.
248
248
  output_type: The type of the output data, used to validate the data returned by the model,
249
249
  defaults to `str`.
@@ -431,7 +431,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
431
431
  deps: AgentDepsT = None,
432
432
  model_settings: ModelSettings | None = None,
433
433
  usage_limits: _usage.UsageLimits | None = None,
434
- usage: _usage.Usage | None = None,
434
+ usage: _usage.RunUsage | None = None,
435
435
  infer_name: bool = True,
436
436
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
437
437
  ) -> AbstractAsyncContextManager[AgentRun[AgentDepsT, OutputDataT]]: ...
@@ -447,7 +447,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
447
447
  deps: AgentDepsT = None,
448
448
  model_settings: ModelSettings | None = None,
449
449
  usage_limits: _usage.UsageLimits | None = None,
450
- usage: _usage.Usage | None = None,
450
+ usage: _usage.RunUsage | None = None,
451
451
  infer_name: bool = True,
452
452
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
453
453
  ) -> AbstractAsyncContextManager[AgentRun[AgentDepsT, RunOutputDataT]]: ...
@@ -463,7 +463,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
463
463
  deps: AgentDepsT = None,
464
464
  model_settings: ModelSettings | None = None,
465
465
  usage_limits: _usage.UsageLimits | None = None,
466
- usage: _usage.Usage | None = None,
466
+ usage: _usage.RunUsage | None = None,
467
467
  infer_name: bool = True,
468
468
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
469
469
  ) -> AsyncIterator[AgentRun[AgentDepsT, Any]]:
@@ -514,9 +514,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
514
514
  CallToolsNode(
515
515
  model_response=ModelResponse(
516
516
  parts=[TextPart(content='The capital of France is Paris.')],
517
- usage=Usage(
518
- requests=1, request_tokens=56, response_tokens=7, total_tokens=63
519
- ),
517
+ usage=RequestUsage(input_tokens=56, output_tokens=7),
520
518
  model_name='gpt-4o',
521
519
  timestamp=datetime.datetime(...),
522
520
  )
@@ -575,7 +573,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
575
573
  )
576
574
 
577
575
  # Build the initial state
578
- usage = usage or _usage.Usage()
576
+ usage = usage or _usage.RunUsage()
579
577
  state = _agent_graph.GraphAgentState(
580
578
  message_history=message_history[:] if message_history else [],
581
579
  usage=usage,
@@ -677,7 +675,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
677
675
  run_span.end()
678
676
 
679
677
  def _run_span_end_attributes(
680
- self, state: _agent_graph.GraphAgentState, usage: _usage.Usage, settings: InstrumentationSettings
678
+ self, state: _agent_graph.GraphAgentState, usage: _usage.RunUsage, settings: InstrumentationSettings
681
679
  ):
682
680
  return {
683
681
  **usage.opentelemetry_attributes(),
@@ -32,7 +32,7 @@ from ..tools import (
32
32
  ToolFuncEither,
33
33
  )
34
34
  from ..toolsets import AbstractToolset
35
- from ..usage import Usage, UsageLimits
35
+ from ..usage import RunUsage, UsageLimits
36
36
 
37
37
  # Re-exporting like this improves auto-import behavior in PyCharm
38
38
  capture_run_messages = _agent_graph.capture_run_messages
@@ -131,7 +131,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
131
131
  deps: AgentDepsT = None,
132
132
  model_settings: ModelSettings | None = None,
133
133
  usage_limits: _usage.UsageLimits | None = None,
134
- usage: _usage.Usage | None = None,
134
+ usage: _usage.RunUsage | None = None,
135
135
  infer_name: bool = True,
136
136
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
137
137
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -148,7 +148,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
148
148
  deps: AgentDepsT = None,
149
149
  model_settings: ModelSettings | None = None,
150
150
  usage_limits: _usage.UsageLimits | None = None,
151
- usage: _usage.Usage | None = None,
151
+ usage: _usage.RunUsage | None = None,
152
152
  infer_name: bool = True,
153
153
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
154
154
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -164,7 +164,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
164
164
  deps: AgentDepsT = None,
165
165
  model_settings: ModelSettings | None = None,
166
166
  usage_limits: _usage.UsageLimits | None = None,
167
- usage: _usage.Usage | None = None,
167
+ usage: _usage.RunUsage | None = None,
168
168
  infer_name: bool = True,
169
169
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
170
170
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -240,7 +240,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
240
240
  deps: AgentDepsT = None,
241
241
  model_settings: ModelSettings | None = None,
242
242
  usage_limits: _usage.UsageLimits | None = None,
243
- usage: _usage.Usage | None = None,
243
+ usage: _usage.RunUsage | None = None,
244
244
  infer_name: bool = True,
245
245
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
246
246
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -257,7 +257,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
257
257
  deps: AgentDepsT = None,
258
258
  model_settings: ModelSettings | None = None,
259
259
  usage_limits: _usage.UsageLimits | None = None,
260
- usage: _usage.Usage | None = None,
260
+ usage: _usage.RunUsage | None = None,
261
261
  infer_name: bool = True,
262
262
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
263
263
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -273,7 +273,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
273
273
  deps: AgentDepsT = None,
274
274
  model_settings: ModelSettings | None = None,
275
275
  usage_limits: _usage.UsageLimits | None = None,
276
- usage: _usage.Usage | None = None,
276
+ usage: _usage.RunUsage | None = None,
277
277
  infer_name: bool = True,
278
278
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
279
279
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -341,7 +341,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
341
341
  deps: AgentDepsT = None,
342
342
  model_settings: ModelSettings | None = None,
343
343
  usage_limits: _usage.UsageLimits | None = None,
344
- usage: _usage.Usage | None = None,
344
+ usage: _usage.RunUsage | None = None,
345
345
  infer_name: bool = True,
346
346
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
347
347
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -358,7 +358,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
358
358
  deps: AgentDepsT = None,
359
359
  model_settings: ModelSettings | None = None,
360
360
  usage_limits: _usage.UsageLimits | None = None,
361
- usage: _usage.Usage | None = None,
361
+ usage: _usage.RunUsage | None = None,
362
362
  infer_name: bool = True,
363
363
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
364
364
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -375,7 +375,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
375
375
  deps: AgentDepsT = None,
376
376
  model_settings: ModelSettings | None = None,
377
377
  usage_limits: _usage.UsageLimits | None = None,
378
- usage: _usage.Usage | None = None,
378
+ usage: _usage.RunUsage | None = None,
379
379
  infer_name: bool = True,
380
380
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
381
381
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -534,7 +534,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
534
534
  deps: AgentDepsT = None,
535
535
  model_settings: ModelSettings | None = None,
536
536
  usage_limits: _usage.UsageLimits | None = None,
537
- usage: _usage.Usage | None = None,
537
+ usage: _usage.RunUsage | None = None,
538
538
  infer_name: bool = True,
539
539
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
540
540
  ) -> AbstractAsyncContextManager[AgentRun[AgentDepsT, OutputDataT]]: ...
@@ -550,7 +550,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
550
550
  deps: AgentDepsT = None,
551
551
  model_settings: ModelSettings | None = None,
552
552
  usage_limits: _usage.UsageLimits | None = None,
553
- usage: _usage.Usage | None = None,
553
+ usage: _usage.RunUsage | None = None,
554
554
  infer_name: bool = True,
555
555
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
556
556
  ) -> AbstractAsyncContextManager[AgentRun[AgentDepsT, RunOutputDataT]]: ...
@@ -567,7 +567,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
567
567
  deps: AgentDepsT = None,
568
568
  model_settings: ModelSettings | None = None,
569
569
  usage_limits: _usage.UsageLimits | None = None,
570
- usage: _usage.Usage | None = None,
570
+ usage: _usage.RunUsage | None = None,
571
571
  infer_name: bool = True,
572
572
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
573
573
  ) -> AsyncIterator[AgentRun[AgentDepsT, Any]]:
@@ -618,9 +618,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
618
618
  CallToolsNode(
619
619
  model_response=ModelResponse(
620
620
  parts=[TextPart(content='The capital of France is Paris.')],
621
- usage=Usage(
622
- requests=1, request_tokens=56, response_tokens=7, total_tokens=63
623
- ),
621
+ usage=RequestUsage(input_tokens=56, output_tokens=7),
624
622
  model_name='gpt-4o',
625
623
  timestamp=datetime.datetime(...),
626
624
  )
@@ -678,7 +676,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
678
676
  def _infer_name(self, function_frame: FrameType | None) -> None:
679
677
  """Infer the agent name from the call frame.
680
678
 
681
- Usage should be `self._infer_name(inspect.currentframe())`.
679
+ RunUsage should be `self._infer_name(inspect.currentframe())`.
682
680
  """
683
681
  assert self.name is None, 'Name already set'
684
682
  if function_frame is not None: # pragma: no branch
@@ -751,7 +749,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
751
749
  deps: AgentDepsT = None,
752
750
  model_settings: ModelSettings | None = None,
753
751
  usage_limits: UsageLimits | None = None,
754
- usage: Usage | None = None,
752
+ usage: RunUsage | None = None,
755
753
  infer_name: bool = True,
756
754
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
757
755
  # Starlette
@@ -76,7 +76,7 @@ class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
76
76
  deps: AgentDepsT = None,
77
77
  model_settings: ModelSettings | None = None,
78
78
  usage_limits: _usage.UsageLimits | None = None,
79
- usage: _usage.Usage | None = None,
79
+ usage: _usage.RunUsage | None = None,
80
80
  infer_name: bool = True,
81
81
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
82
82
  ) -> AbstractAsyncContextManager[AgentRun[AgentDepsT, OutputDataT]]: ...
@@ -92,7 +92,7 @@ class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
92
92
  deps: AgentDepsT = None,
93
93
  model_settings: ModelSettings | None = None,
94
94
  usage_limits: _usage.UsageLimits | None = None,
95
- usage: _usage.Usage | None = None,
95
+ usage: _usage.RunUsage | None = None,
96
96
  infer_name: bool = True,
97
97
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
98
98
  ) -> AbstractAsyncContextManager[AgentRun[AgentDepsT, RunOutputDataT]]: ...
@@ -108,7 +108,7 @@ class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
108
108
  deps: AgentDepsT = None,
109
109
  model_settings: ModelSettings | None = None,
110
110
  usage_limits: _usage.UsageLimits | None = None,
111
- usage: _usage.Usage | None = None,
111
+ usage: _usage.RunUsage | None = None,
112
112
  infer_name: bool = True,
113
113
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
114
114
  ) -> AsyncIterator[AgentRun[AgentDepsT, Any]]:
@@ -159,9 +159,7 @@ class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
159
159
  CallToolsNode(
160
160
  model_response=ModelResponse(
161
161
  parts=[TextPart(content='The capital of France is Paris.')],
162
- usage=Usage(
163
- requests=1, request_tokens=56, response_tokens=7, total_tokens=63
164
- ),
162
+ usage=RequestUsage(input_tokens=56, output_tokens=7),
165
163
  model_name='gpt-4o',
166
164
  timestamp=datetime.datetime(...),
167
165
  )
@@ -6,7 +6,7 @@ from typing import Literal
6
6
 
7
7
  from typing_extensions import TypedDict
8
8
 
9
- __all__ = ('AbstractBuiltinTool', 'WebSearchTool', 'WebSearchUserLocation', 'CodeExecutionTool')
9
+ __all__ = ('AbstractBuiltinTool', 'WebSearchTool', 'WebSearchUserLocation', 'CodeExecutionTool', 'UrlContextTool')
10
10
 
11
11
 
12
12
  @dataclass
@@ -103,3 +103,11 @@ class CodeExecutionTool(AbstractBuiltinTool):
103
103
  * OpenAI
104
104
  * Google
105
105
  """
106
+
107
+
108
+ class UrlContextTool(AbstractBuiltinTool):
109
+ """Allows your agent to access contents from URLs.
110
+
111
+ Supported by:
112
+ * Google
113
+ """
@@ -16,7 +16,7 @@ from dataclasses import dataclass, field
16
16
  from datetime import datetime
17
17
  from types import TracebackType
18
18
 
19
- from pydantic_ai.usage import Usage
19
+ from pydantic_ai.usage import RequestUsage
20
20
  from pydantic_graph._utils import get_event_loop as _get_event_loop
21
21
 
22
22
  from . import agent, messages, models, settings
@@ -57,7 +57,7 @@ async def model_request(
57
57
  '''
58
58
  ModelResponse(
59
59
  parts=[TextPart(content='The capital of France is Paris.')],
60
- usage=Usage(requests=1, request_tokens=56, response_tokens=7, total_tokens=63),
60
+ usage=RequestUsage(input_tokens=56, output_tokens=7),
61
61
  model_name='claude-3-5-haiku-latest',
62
62
  timestamp=datetime.datetime(...),
63
63
  )
@@ -110,7 +110,7 @@ def model_request_sync(
110
110
  '''
111
111
  ModelResponse(
112
112
  parts=[TextPart(content='The capital of France is Paris.')],
113
- usage=Usage(requests=1, request_tokens=56, response_tokens=7, total_tokens=63),
113
+ usage=RequestUsage(input_tokens=56, output_tokens=7),
114
114
  model_name='claude-3-5-haiku-latest',
115
115
  timestamp=datetime.datetime(...),
116
116
  )
@@ -366,7 +366,7 @@ class StreamedResponseSync:
366
366
  """Build a ModelResponse from the data received from the stream so far."""
367
367
  return self._ensure_stream_ready().get()
368
368
 
369
- def usage(self) -> Usage:
369
+ def usage(self) -> RequestUsage:
370
370
  """Get the usage of the response so far."""
371
371
  return self._ensure_stream_ready().usage()
372
372
 
@@ -200,7 +200,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
200
200
  deps: AgentDepsT = None,
201
201
  model_settings: ModelSettings | None = None,
202
202
  usage_limits: _usage.UsageLimits | None = None,
203
- usage: _usage.Usage | None = None,
203
+ usage: _usage.RunUsage | None = None,
204
204
  infer_name: bool = True,
205
205
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
206
206
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -217,7 +217,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
217
217
  deps: AgentDepsT = None,
218
218
  model_settings: ModelSettings | None = None,
219
219
  usage_limits: _usage.UsageLimits | None = None,
220
- usage: _usage.Usage | None = None,
220
+ usage: _usage.RunUsage | None = None,
221
221
  infer_name: bool = True,
222
222
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
223
223
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -233,7 +233,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
233
233
  deps: AgentDepsT = None,
234
234
  model_settings: ModelSettings | None = None,
235
235
  usage_limits: _usage.UsageLimits | None = None,
236
- usage: _usage.Usage | None = None,
236
+ usage: _usage.RunUsage | None = None,
237
237
  infer_name: bool = True,
238
238
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
239
239
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -305,7 +305,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
305
305
  deps: AgentDepsT = None,
306
306
  model_settings: ModelSettings | None = None,
307
307
  usage_limits: _usage.UsageLimits | None = None,
308
- usage: _usage.Usage | None = None,
308
+ usage: _usage.RunUsage | None = None,
309
309
  infer_name: bool = True,
310
310
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
311
311
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -322,7 +322,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
322
322
  deps: AgentDepsT = None,
323
323
  model_settings: ModelSettings | None = None,
324
324
  usage_limits: _usage.UsageLimits | None = None,
325
- usage: _usage.Usage | None = None,
325
+ usage: _usage.RunUsage | None = None,
326
326
  infer_name: bool = True,
327
327
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
328
328
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -338,7 +338,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
338
338
  deps: AgentDepsT = None,
339
339
  model_settings: ModelSettings | None = None,
340
340
  usage_limits: _usage.UsageLimits | None = None,
341
- usage: _usage.Usage | None = None,
341
+ usage: _usage.RunUsage | None = None,
342
342
  infer_name: bool = True,
343
343
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
344
344
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -408,7 +408,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
408
408
  deps: AgentDepsT = None,
409
409
  model_settings: ModelSettings | None = None,
410
410
  usage_limits: _usage.UsageLimits | None = None,
411
- usage: _usage.Usage | None = None,
411
+ usage: _usage.RunUsage | None = None,
412
412
  infer_name: bool = True,
413
413
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
414
414
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -425,7 +425,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
425
425
  deps: AgentDepsT = None,
426
426
  model_settings: ModelSettings | None = None,
427
427
  usage_limits: _usage.UsageLimits | None = None,
428
- usage: _usage.Usage | None = None,
428
+ usage: _usage.RunUsage | None = None,
429
429
  infer_name: bool = True,
430
430
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
431
431
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -442,7 +442,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
442
442
  deps: AgentDepsT = None,
443
443
  model_settings: ModelSettings | None = None,
444
444
  usage_limits: _usage.UsageLimits | None = None,
445
- usage: _usage.Usage | None = None,
445
+ usage: _usage.RunUsage | None = None,
446
446
  infer_name: bool = True,
447
447
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
448
448
  event_stream_handler: EventStreamHandler[AgentDepsT] | None = None,
@@ -513,7 +513,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
513
513
  deps: AgentDepsT = None,
514
514
  model_settings: ModelSettings | None = None,
515
515
  usage_limits: _usage.UsageLimits | None = None,
516
- usage: _usage.Usage | None = None,
516
+ usage: _usage.RunUsage | None = None,
517
517
  infer_name: bool = True,
518
518
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
519
519
  **_deprecated_kwargs: Never,
@@ -530,7 +530,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
530
530
  deps: AgentDepsT = None,
531
531
  model_settings: ModelSettings | None = None,
532
532
  usage_limits: _usage.UsageLimits | None = None,
533
- usage: _usage.Usage | None = None,
533
+ usage: _usage.RunUsage | None = None,
534
534
  infer_name: bool = True,
535
535
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
536
536
  **_deprecated_kwargs: Never,
@@ -547,7 +547,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
547
547
  deps: AgentDepsT = None,
548
548
  model_settings: ModelSettings | None = None,
549
549
  usage_limits: _usage.UsageLimits | None = None,
550
- usage: _usage.Usage | None = None,
550
+ usage: _usage.RunUsage | None = None,
551
551
  infer_name: bool = True,
552
552
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
553
553
  **_deprecated_kwargs: Never,
@@ -599,9 +599,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
599
599
  CallToolsNode(
600
600
  model_response=ModelResponse(
601
601
  parts=[TextPart(content='The capital of France is Paris.')],
602
- usage=Usage(
603
- requests=1, request_tokens=56, response_tokens=7, total_tokens=63
604
- ),
602
+ usage=RequestUsage(input_tokens=56, output_tokens=7),
605
603
  model_name='gpt-4o',
606
604
  timestamp=datetime.datetime(...),
607
605
  )
@@ -21,7 +21,7 @@ from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse
21
21
  from pydantic_ai.models.wrapper import WrapperModel
22
22
  from pydantic_ai.settings import ModelSettings
23
23
  from pydantic_ai.tools import AgentDepsT, RunContext
24
- from pydantic_ai.usage import Usage
24
+ from pydantic_ai.usage import RequestUsage
25
25
 
26
26
  from ._run_context import TemporalRunContext
27
27
 
@@ -48,7 +48,7 @@ class TemporalStreamedResponse(StreamedResponse):
48
48
  def get(self) -> ModelResponse:
49
49
  return self.response
50
50
 
51
- def usage(self) -> Usage:
51
+ def usage(self) -> RequestUsage:
52
52
  return self.response.usage # pragma: no cover
53
53
 
54
54
  @property