openai-agents 0.0.13__tar.gz → 0.0.15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

Files changed (349) hide show
  1. {openai_agents-0.0.13 → openai_agents-0.0.15}/PKG-INFO +3 -3
  2. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/tracing.md +1 -0
  3. openai_agents-0.0.15/examples/mcp/streamablehttp_example/README.md +13 -0
  4. openai_agents-0.0.15/examples/mcp/streamablehttp_example/main.py +83 -0
  5. openai_agents-0.0.15/examples/mcp/streamablehttp_example/server.py +33 -0
  6. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/agents/search_agent.py +1 -1
  7. {openai_agents-0.0.13 → openai_agents-0.0.15}/pyproject.toml +3 -3
  8. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/extensions/models/litellm_model.py +2 -0
  9. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/mcp/__init__.py +4 -0
  10. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/mcp/server.py +98 -8
  11. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/models/chatcmpl_converter.py +1 -1
  12. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/result.py +7 -9
  13. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/run.py +3 -0
  14. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/__init__.py +2 -0
  15. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/model.py +3 -4
  16. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/fake_model.py +16 -3
  17. openai_agents-0.0.15/tests/models/test_litellm_extra_body.py +45 -0
  18. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_result_cast.py +2 -1
  19. {openai_agents-0.0.13 → openai_agents-0.0.15}/uv.lock +1700 -1693
  20. {openai_agents-0.0.13 → openai_agents-0.0.15}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
  21. {openai_agents-0.0.13 → openai_agents-0.0.15}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
  22. {openai_agents-0.0.13 → openai_agents-0.0.15}/.github/ISSUE_TEMPLATE/model_provider.md +0 -0
  23. {openai_agents-0.0.13 → openai_agents-0.0.15}/.github/ISSUE_TEMPLATE/question.md +0 -0
  24. {openai_agents-0.0.13 → openai_agents-0.0.15}/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +0 -0
  25. {openai_agents-0.0.13 → openai_agents-0.0.15}/.github/workflows/docs.yml +0 -0
  26. {openai_agents-0.0.13 → openai_agents-0.0.15}/.github/workflows/issues.yml +0 -0
  27. {openai_agents-0.0.13 → openai_agents-0.0.15}/.github/workflows/publish.yml +0 -0
  28. {openai_agents-0.0.13 → openai_agents-0.0.15}/.github/workflows/tests.yml +0 -0
  29. {openai_agents-0.0.13 → openai_agents-0.0.15}/.gitignore +0 -0
  30. {openai_agents-0.0.13 → openai_agents-0.0.15}/.prettierrc +0 -0
  31. {openai_agents-0.0.13 → openai_agents-0.0.15}/.vscode/settings.json +0 -0
  32. {openai_agents-0.0.13 → openai_agents-0.0.15}/LICENSE +0 -0
  33. {openai_agents-0.0.13 → openai_agents-0.0.15}/Makefile +0 -0
  34. {openai_agents-0.0.13 → openai_agents-0.0.15}/README.md +0 -0
  35. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/agents.md +0 -0
  36. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/assets/images/favicon-platform.svg +0 -0
  37. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/assets/images/graph.png +0 -0
  38. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/assets/images/mcp-tracing.jpg +0 -0
  39. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/assets/images/orchestration.png +0 -0
  40. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/assets/logo.svg +0 -0
  41. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/config.md +0 -0
  42. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/context.md +0 -0
  43. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/examples.md +0 -0
  44. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/guardrails.md +0 -0
  45. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/handoffs.md +0 -0
  46. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/index.md +0 -0
  47. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/agents.md +0 -0
  48. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/config.md +0 -0
  49. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/context.md +0 -0
  50. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/examples.md +0 -0
  51. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/guardrails.md +0 -0
  52. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/handoffs.md +0 -0
  53. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/index.md +0 -0
  54. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/mcp.md +0 -0
  55. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/models/index.md +0 -0
  56. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/models/litellm.md +0 -0
  57. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/models.md +0 -0
  58. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/multi_agent.md +0 -0
  59. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/quickstart.md +0 -0
  60. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/results.md +0 -0
  61. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/running_agents.md +0 -0
  62. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/streaming.md +0 -0
  63. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/tools.md +0 -0
  64. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/tracing.md +0 -0
  65. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/visualization.md +0 -0
  66. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/voice/pipeline.md +0 -0
  67. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/voice/quickstart.md +0 -0
  68. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ja/voice/tracing.md +0 -0
  69. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/mcp.md +0 -0
  70. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/models/index.md +0 -0
  71. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/models/litellm.md +0 -0
  72. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/multi_agent.md +0 -0
  73. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/quickstart.md +0 -0
  74. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/agent.md +0 -0
  75. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/agent_output.md +0 -0
  76. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/exceptions.md +0 -0
  77. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/extensions/handoff_filters.md +0 -0
  78. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/extensions/handoff_prompt.md +0 -0
  79. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/extensions/litellm.md +0 -0
  80. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/function_schema.md +0 -0
  81. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/guardrail.md +0 -0
  82. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/handoffs.md +0 -0
  83. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/index.md +0 -0
  84. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/items.md +0 -0
  85. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/lifecycle.md +0 -0
  86. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/mcp/server.md +0 -0
  87. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/mcp/util.md +0 -0
  88. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/model_settings.md +0 -0
  89. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/models/interface.md +0 -0
  90. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/models/openai_chatcompletions.md +0 -0
  91. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/models/openai_responses.md +0 -0
  92. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/result.md +0 -0
  93. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/run.md +0 -0
  94. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/run_context.md +0 -0
  95. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/stream_events.md +0 -0
  96. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/tool.md +0 -0
  97. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/tracing/create.md +0 -0
  98. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/tracing/index.md +0 -0
  99. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/tracing/processor_interface.md +0 -0
  100. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/tracing/processors.md +0 -0
  101. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/tracing/scope.md +0 -0
  102. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/tracing/setup.md +0 -0
  103. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/tracing/span_data.md +0 -0
  104. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/tracing/spans.md +0 -0
  105. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/tracing/traces.md +0 -0
  106. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/tracing/util.md +0 -0
  107. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/usage.md +0 -0
  108. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/events.md +0 -0
  109. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/exceptions.md +0 -0
  110. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/input.md +0 -0
  111. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/model.md +0 -0
  112. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/models/openai_provider.md +0 -0
  113. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/models/openai_stt.md +0 -0
  114. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/models/openai_tts.md +0 -0
  115. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/pipeline.md +0 -0
  116. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/pipeline_config.md +0 -0
  117. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/result.md +0 -0
  118. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/utils.md +0 -0
  119. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/ref/voice/workflow.md +0 -0
  120. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/results.md +0 -0
  121. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/running_agents.md +0 -0
  122. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/scripts/translate_docs.py +0 -0
  123. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/streaming.md +0 -0
  124. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/stylesheets/extra.css +0 -0
  125. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/tools.md +0 -0
  126. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/visualization.md +0 -0
  127. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/voice/pipeline.md +0 -0
  128. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/voice/quickstart.md +0 -0
  129. {openai_agents-0.0.13 → openai_agents-0.0.15}/docs/voice/tracing.md +0 -0
  130. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/__init__.py +0 -0
  131. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/agent_patterns/README.md +0 -0
  132. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/agent_patterns/agents_as_tools.py +0 -0
  133. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/agent_patterns/deterministic.py +0 -0
  134. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/agent_patterns/forcing_tool_use.py +0 -0
  135. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/agent_patterns/input_guardrails.py +0 -0
  136. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/agent_patterns/llm_as_a_judge.py +0 -0
  137. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/agent_patterns/output_guardrails.py +0 -0
  138. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/agent_patterns/parallelization.py +0 -0
  139. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/agent_patterns/routing.py +0 -0
  140. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/agent_patterns/streaming_guardrails.py +0 -0
  141. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/agent_lifecycle_example.py +0 -0
  142. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/dynamic_system_prompt.py +0 -0
  143. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/hello_world.py +0 -0
  144. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/hello_world_jupyter.py +0 -0
  145. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/lifecycle_example.py +0 -0
  146. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/local_image.py +0 -0
  147. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/media/image_bison.jpg +0 -0
  148. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/non_strict_output_type.py +0 -0
  149. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/previous_response_id.py +0 -0
  150. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/remote_image.py +0 -0
  151. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/stream_items.py +0 -0
  152. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/stream_text.py +0 -0
  153. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/basic/tools.py +0 -0
  154. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/customer_service/main.py +0 -0
  155. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/README.md +0 -0
  156. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/__init__.py +0 -0
  157. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/agents/__init__.py +0 -0
  158. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/agents/financials_agent.py +0 -0
  159. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/agents/planner_agent.py +0 -0
  160. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/agents/risk_agent.py +0 -0
  161. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/agents/search_agent.py +0 -0
  162. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/agents/verifier_agent.py +0 -0
  163. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/agents/writer_agent.py +0 -0
  164. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/main.py +0 -0
  165. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/manager.py +0 -0
  166. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/financial_research_agent/printer.py +0 -0
  167. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/handoffs/message_filter.py +0 -0
  168. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/handoffs/message_filter_streaming.py +0 -0
  169. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/mcp/filesystem_example/README.md +0 -0
  170. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/mcp/filesystem_example/main.py +0 -0
  171. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/mcp/filesystem_example/sample_files/favorite_books.txt +0 -0
  172. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/mcp/filesystem_example/sample_files/favorite_cities.txt +0 -0
  173. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/mcp/filesystem_example/sample_files/favorite_songs.txt +0 -0
  174. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/mcp/git_example/README.md +0 -0
  175. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/mcp/git_example/main.py +0 -0
  176. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/mcp/sse_example/README.md +0 -0
  177. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/mcp/sse_example/main.py +0 -0
  178. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/mcp/sse_example/server.py +0 -0
  179. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/model_providers/README.md +0 -0
  180. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/model_providers/custom_example_agent.py +0 -0
  181. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/model_providers/custom_example_global.py +0 -0
  182. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/model_providers/custom_example_provider.py +0 -0
  183. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/model_providers/litellm_auto.py +0 -0
  184. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/model_providers/litellm_provider.py +0 -0
  185. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/README.md +0 -0
  186. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/__init__.py +0 -0
  187. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/agents/__init__.py +0 -0
  188. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/agents/planner_agent.py +0 -0
  189. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/agents/writer_agent.py +0 -0
  190. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/main.py +0 -0
  191. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/manager.py +0 -0
  192. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/printer.py +0 -0
  193. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/sample_outputs/product_recs.md +0 -0
  194. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/sample_outputs/product_recs.txt +0 -0
  195. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/sample_outputs/vacation.md +0 -0
  196. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/research_bot/sample_outputs/vacation.txt +0 -0
  197. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/tools/computer_use.py +0 -0
  198. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/tools/file_search.py +0 -0
  199. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/tools/web_search.py +0 -0
  200. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/voice/__init__.py +0 -0
  201. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/voice/static/README.md +0 -0
  202. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/voice/static/__init__.py +0 -0
  203. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/voice/static/main.py +0 -0
  204. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/voice/static/util.py +0 -0
  205. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/voice/streamed/README.md +0 -0
  206. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/voice/streamed/__init__.py +0 -0
  207. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/voice/streamed/main.py +0 -0
  208. {openai_agents-0.0.13 → openai_agents-0.0.15}/examples/voice/streamed/my_workflow.py +0 -0
  209. {openai_agents-0.0.13 → openai_agents-0.0.15}/mkdocs.yml +0 -0
  210. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/__init__.py +0 -0
  211. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/_config.py +0 -0
  212. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/_debug.py +0 -0
  213. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/_run_impl.py +0 -0
  214. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/agent.py +0 -0
  215. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/agent_output.py +0 -0
  216. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/computer.py +0 -0
  217. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/exceptions.py +0 -0
  218. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/extensions/__init__.py +0 -0
  219. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/extensions/handoff_filters.py +0 -0
  220. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/extensions/handoff_prompt.py +0 -0
  221. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/extensions/models/__init__.py +0 -0
  222. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/extensions/models/litellm_provider.py +0 -0
  223. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/extensions/visualization.py +0 -0
  224. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/function_schema.py +0 -0
  225. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/guardrail.py +0 -0
  226. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/handoffs.py +0 -0
  227. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/items.py +0 -0
  228. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/lifecycle.py +0 -0
  229. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/logger.py +0 -0
  230. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/mcp/util.py +0 -0
  231. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/model_settings.py +0 -0
  232. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/models/__init__.py +0 -0
  233. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/models/_openai_shared.py +0 -0
  234. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/models/chatcmpl_helpers.py +0 -0
  235. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/models/chatcmpl_stream_handler.py +0 -0
  236. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/models/fake_id.py +0 -0
  237. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/models/interface.py +0 -0
  238. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/models/multi_provider.py +0 -0
  239. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/models/openai_chatcompletions.py +0 -0
  240. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/models/openai_provider.py +0 -0
  241. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/models/openai_responses.py +0 -0
  242. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/py.typed +0 -0
  243. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/run_context.py +0 -0
  244. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/stream_events.py +0 -0
  245. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/strict_schema.py +0 -0
  246. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tool.py +0 -0
  247. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tracing/__init__.py +0 -0
  248. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tracing/create.py +0 -0
  249. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tracing/logger.py +0 -0
  250. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tracing/processor_interface.py +0 -0
  251. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tracing/processors.py +0 -0
  252. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tracing/scope.py +0 -0
  253. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tracing/setup.py +0 -0
  254. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tracing/span_data.py +0 -0
  255. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tracing/spans.py +0 -0
  256. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tracing/traces.py +0 -0
  257. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/tracing/util.py +0 -0
  258. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/usage.py +0 -0
  259. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/util/__init__.py +0 -0
  260. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/util/_coro.py +0 -0
  261. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/util/_error_tracing.py +0 -0
  262. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/util/_json.py +0 -0
  263. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/util/_pretty_print.py +0 -0
  264. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/util/_transforms.py +0 -0
  265. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/util/_types.py +0 -0
  266. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/version.py +0 -0
  267. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/events.py +0 -0
  268. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/exceptions.py +0 -0
  269. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/imports.py +0 -0
  270. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/input.py +0 -0
  271. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/models/__init__.py +0 -0
  272. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/models/openai_model_provider.py +0 -0
  273. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/models/openai_stt.py +0 -0
  274. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/models/openai_tts.py +0 -0
  275. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/pipeline.py +0 -0
  276. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/pipeline_config.py +0 -0
  277. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/result.py +0 -0
  278. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/utils.py +0 -0
  279. {openai_agents-0.0.13 → openai_agents-0.0.15}/src/agents/voice/workflow.py +0 -0
  280. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/README.md +0 -0
  281. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/__init__.py +0 -0
  282. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/conftest.py +0 -0
  283. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/fastapi/__init__.py +0 -0
  284. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/fastapi/streaming_app.py +0 -0
  285. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/fastapi/test_streaming_context.py +0 -0
  286. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/mcp/__init__.py +0 -0
  287. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/mcp/conftest.py +0 -0
  288. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/mcp/helpers.py +0 -0
  289. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/mcp/test_caching.py +0 -0
  290. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/mcp/test_connect_disconnect.py +0 -0
  291. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/mcp/test_mcp_tracing.py +0 -0
  292. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/mcp/test_mcp_util.py +0 -0
  293. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/mcp/test_runner_calls_mcp.py +0 -0
  294. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/mcp/test_server_errors.py +0 -0
  295. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/model_settings/test_serialization.py +0 -0
  296. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/models/__init__.py +0 -0
  297. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/models/conftest.py +0 -0
  298. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/models/test_litellm_chatcompletions_stream.py +0 -0
  299. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/models/test_map.py +0 -0
  300. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_agent_config.py +0 -0
  301. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_agent_hooks.py +0 -0
  302. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_agent_runner.py +0 -0
  303. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_agent_runner_streamed.py +0 -0
  304. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_agent_tracing.py +0 -0
  305. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_cancel_streaming.py +0 -0
  306. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_computer_action.py +0 -0
  307. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_config.py +0 -0
  308. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_doc_parsing.py +0 -0
  309. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_extension_filters.py +0 -0
  310. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_extra_headers.py +0 -0
  311. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_function_schema.py +0 -0
  312. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_function_tool.py +0 -0
  313. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_function_tool_decorator.py +0 -0
  314. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_global_hooks.py +0 -0
  315. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_guardrails.py +0 -0
  316. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_handoff_tool.py +0 -0
  317. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_items_helpers.py +0 -0
  318. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_max_turns.py +0 -0
  319. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_openai_chatcompletions.py +0 -0
  320. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_openai_chatcompletions_converter.py +0 -0
  321. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_openai_chatcompletions_stream.py +0 -0
  322. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_openai_responses_converter.py +0 -0
  323. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_output_tool.py +0 -0
  324. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_pretty_print.py +0 -0
  325. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_responses.py +0 -0
  326. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_responses_tracing.py +0 -0
  327. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_run_config.py +0 -0
  328. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_run_step_execution.py +0 -0
  329. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_run_step_processing.py +0 -0
  330. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_strict_schema.py +0 -0
  331. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_tool_choice_reset.py +0 -0
  332. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_tool_converter.py +0 -0
  333. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_tool_use_behavior.py +0 -0
  334. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_trace_processor.py +0 -0
  335. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_tracing.py +0 -0
  336. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_tracing_errors.py +0 -0
  337. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_tracing_errors_streamed.py +0 -0
  338. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/test_visualization.py +0 -0
  339. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/testing_processor.py +0 -0
  340. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/tracing/test_processor_api_key.py +0 -0
  341. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/voice/__init__.py +0 -0
  342. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/voice/conftest.py +0 -0
  343. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/voice/fake_models.py +0 -0
  344. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/voice/helpers.py +0 -0
  345. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/voice/test_input.py +0 -0
  346. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/voice/test_openai_stt.py +0 -0
  347. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/voice/test_openai_tts.py +0 -0
  348. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/voice/test_pipeline.py +0 -0
  349. {openai_agents-0.0.13 → openai_agents-0.0.15}/tests/voice/test_workflow.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.0.13
3
+ Version: 0.0.15
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://github.com/openai/openai-agents-python
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -19,14 +19,14 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
19
  Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.9
21
21
  Requires-Dist: griffe<2,>=1.5.6
22
- Requires-Dist: mcp<2,>=1.6.0; python_version >= '3.10'
22
+ Requires-Dist: mcp<2,>=1.8.0; python_version >= '3.10'
23
23
  Requires-Dist: openai>=1.76.0
24
24
  Requires-Dist: pydantic<3,>=2.10
25
25
  Requires-Dist: requests<3,>=2.0
26
26
  Requires-Dist: types-requests<3,>=2.0
27
27
  Requires-Dist: typing-extensions<5,>=4.12.2
28
28
  Provides-Extra: litellm
29
- Requires-Dist: litellm<2,>=1.65.0; extra == 'litellm'
29
+ Requires-Dist: litellm<2,>=1.67.4.post1; extra == 'litellm'
30
30
  Provides-Extra: viz
31
31
  Requires-Dist: graphviz>=0.17; extra == 'viz'
32
32
  Provides-Extra: voice
@@ -101,6 +101,7 @@ To customize this default setup, to send traces to alternative or additional bac
101
101
 
102
102
  - [Weights & Biases](https://weave-docs.wandb.ai/guides/integrations/openai_agents)
103
103
  - [Arize-Phoenix](https://docs.arize.com/phoenix/tracing/integrations-tracing/openai-agents-sdk)
104
+ - [Future AGI](https://docs.futureagi.com/future-agi/products/observability/auto-instrumentation/openai_agents)
104
105
  - [MLflow (self-hosted/OSS](https://mlflow.org/docs/latest/tracing/integrations/openai-agent)
105
106
  - [MLflow (Databricks hosted](https://docs.databricks.com/aws/en/mlflow/mlflow-tracing#-automatic-tracing)
106
107
  - [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk)
@@ -0,0 +1,13 @@
1
+ # MCP Streamable HTTP Example
2
+
3
+ This example uses a local Streamable HTTP server in [server.py](server.py).
4
+
5
+ Run the example via:
6
+
7
+ ```
8
+ uv run python examples/mcp/streamablehttp_example/main.py
9
+ ```
10
+
11
+ ## Details
12
+
13
+ The example uses the `MCPServerStreamableHttp` class from `agents.mcp`. The server runs in a sub-process at `https://localhost:8000/mcp`.
@@ -0,0 +1,83 @@
1
+ import asyncio
2
+ import os
3
+ import shutil
4
+ import subprocess
5
+ import time
6
+ from typing import Any
7
+
8
+ from agents import Agent, Runner, gen_trace_id, trace
9
+ from agents.mcp import MCPServer, MCPServerStreamableHttp
10
+ from agents.model_settings import ModelSettings
11
+
12
+
13
+ async def run(mcp_server: MCPServer):
14
+ agent = Agent(
15
+ name="Assistant",
16
+ instructions="Use the tools to answer the questions.",
17
+ mcp_servers=[mcp_server],
18
+ model_settings=ModelSettings(tool_choice="required"),
19
+ )
20
+
21
+ # Use the `add` tool to add two numbers
22
+ message = "Add these numbers: 7 and 22."
23
+ print(f"Running: {message}")
24
+ result = await Runner.run(starting_agent=agent, input=message)
25
+ print(result.final_output)
26
+
27
+ # Run the `get_weather` tool
28
+ message = "What's the weather in Tokyo?"
29
+ print(f"\n\nRunning: {message}")
30
+ result = await Runner.run(starting_agent=agent, input=message)
31
+ print(result.final_output)
32
+
33
+ # Run the `get_secret_word` tool
34
+ message = "What's the secret word?"
35
+ print(f"\n\nRunning: {message}")
36
+ result = await Runner.run(starting_agent=agent, input=message)
37
+ print(result.final_output)
38
+
39
+
40
+ async def main():
41
+ async with MCPServerStreamableHttp(
42
+ name="Streamable HTTP Python Server",
43
+ params={
44
+ "url": "http://localhost:8000/mcp",
45
+ },
46
+ ) as server:
47
+ trace_id = gen_trace_id()
48
+ with trace(workflow_name="Streamable HTTP Example", trace_id=trace_id):
49
+ print(f"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}\n")
50
+ await run(server)
51
+
52
+
53
+ if __name__ == "__main__":
54
+ # Let's make sure the user has uv installed
55
+ if not shutil.which("uv"):
56
+ raise RuntimeError(
57
+ "uv is not installed. Please install it: https://docs.astral.sh/uv/getting-started/installation/"
58
+ )
59
+
60
+ # We'll run the Streamable HTTP server in a subprocess. Usually this would be a remote server, but for this
61
+ # demo, we'll run it locally at http://localhost:8000/mcp
62
+ process: subprocess.Popen[Any] | None = None
63
+ try:
64
+ this_dir = os.path.dirname(os.path.abspath(__file__))
65
+ server_file = os.path.join(this_dir, "server.py")
66
+
67
+ print("Starting Streamable HTTP server at http://localhost:8000/mcp ...")
68
+
69
+ # Run `uv run server.py` to start the Streamable HTTP server
70
+ process = subprocess.Popen(["uv", "run", server_file])
71
+ # Give it 3 seconds to start
72
+ time.sleep(3)
73
+
74
+ print("Streamable HTTP server started. Running example...\n\n")
75
+ except Exception as e:
76
+ print(f"Error starting Streamable HTTP server: {e}")
77
+ exit(1)
78
+
79
+ try:
80
+ asyncio.run(main())
81
+ finally:
82
+ if process:
83
+ process.terminate()
@@ -0,0 +1,33 @@
1
+ import random
2
+
3
+ import requests
4
+ from mcp.server.fastmcp import FastMCP
5
+
6
+ # Create server
7
+ mcp = FastMCP("Echo Server")
8
+
9
+
10
+ @mcp.tool()
11
+ def add(a: int, b: int) -> int:
12
+ """Add two numbers"""
13
+ print(f"[debug-server] add({a}, {b})")
14
+ return a + b
15
+
16
+
17
+ @mcp.tool()
18
+ def get_secret_word() -> str:
19
+ print("[debug-server] get_secret_word()")
20
+ return random.choice(["apple", "banana", "cherry"])
21
+
22
+
23
+ @mcp.tool()
24
+ def get_current_weather(city: str) -> str:
25
+ print(f"[debug-server] get_current_weather({city})")
26
+
27
+ endpoint = "https://wttr.in"
28
+ response = requests.get(f"{endpoint}/{city}")
29
+ return response.text
30
+
31
+
32
+ if __name__ == "__main__":
33
+ mcp.run(transport="streamable-http")
@@ -3,7 +3,7 @@ from agents.model_settings import ModelSettings
3
3
 
4
4
  INSTRUCTIONS = (
5
5
  "You are a research assistant. Given a search term, you search the web for that term and "
6
- "produce a concise summary of the results. The summary must 2-3 paragraphs and less than 300 "
6
+ "produce a concise summary of the results. The summary must be 2-3 paragraphs and less than 300 "
7
7
  "words. Capture the main points. Write succinctly, no need to have complete sentences or good "
8
8
  "grammar. This will be consumed by someone synthesizing a report, so its vital you capture the "
9
9
  "essence and ignore any fluff. Do not include any additional commentary other than the summary "
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "openai-agents"
3
- version = "0.0.13"
3
+ version = "0.0.15"
4
4
  description = "OpenAI Agents SDK"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.9"
@@ -13,7 +13,7 @@ dependencies = [
13
13
  "typing-extensions>=4.12.2, <5",
14
14
  "requests>=2.0, <3",
15
15
  "types-requests>=2.0, <3",
16
- "mcp>=1.6.0, <2; python_version >= '3.10'",
16
+ "mcp>=1.8.0, <2; python_version >= '3.10'",
17
17
  ]
18
18
  classifiers = [
19
19
  "Typing :: Typed",
@@ -36,7 +36,7 @@ Repository = "https://github.com/openai/openai-agents-python"
36
36
  [project.optional-dependencies]
37
37
  voice = ["numpy>=2.2.0, <3; python_version>='3.10'", "websockets>=15.0, <16"]
38
38
  viz = ["graphviz>=0.17"]
39
- litellm = ["litellm>=1.65.0, <2"]
39
+ litellm = ["litellm>=1.67.4.post1, <2"]
40
40
 
41
41
  [dependency-groups]
42
42
  dev = [
@@ -269,6 +269,8 @@ class LitellmModel(Model):
269
269
  extra_kwargs["extra_query"] = model_settings.extra_query
270
270
  if model_settings.metadata:
271
271
  extra_kwargs["metadata"] = model_settings.metadata
272
+ if model_settings.extra_body and isinstance(model_settings.extra_body, dict):
273
+ extra_kwargs.update(model_settings.extra_body)
272
274
 
273
275
  ret = await litellm.acompletion(
274
276
  model=self.model,
@@ -5,6 +5,8 @@ try:
5
5
  MCPServerSseParams,
6
6
  MCPServerStdio,
7
7
  MCPServerStdioParams,
8
+ MCPServerStreamableHttp,
9
+ MCPServerStreamableHttpParams,
8
10
  )
9
11
  except ImportError:
10
12
  pass
@@ -17,5 +19,7 @@ __all__ = [
17
19
  "MCPServerSseParams",
18
20
  "MCPServerStdio",
19
21
  "MCPServerStdioParams",
22
+ "MCPServerStreamableHttp",
23
+ "MCPServerStreamableHttpParams",
20
24
  "MCPUtil",
21
25
  ]
@@ -10,7 +10,9 @@ from typing import Any, Literal
10
10
  from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
11
11
  from mcp import ClientSession, StdioServerParameters, Tool as MCPTool, stdio_client
12
12
  from mcp.client.sse import sse_client
13
- from mcp.types import CallToolResult, JSONRPCMessage
13
+ from mcp.client.streamable_http import GetSessionIdCallback, streamablehttp_client
14
+ from mcp.shared.message import SessionMessage
15
+ from mcp.types import CallToolResult
14
16
  from typing_extensions import NotRequired, TypedDict
15
17
 
16
18
  from ..exceptions import UserError
@@ -83,8 +85,9 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
83
85
  self,
84
86
  ) -> AbstractAsyncContextManager[
85
87
  tuple[
86
- MemoryObjectReceiveStream[JSONRPCMessage | Exception],
87
- MemoryObjectSendStream[JSONRPCMessage],
88
+ MemoryObjectReceiveStream[SessionMessage | Exception],
89
+ MemoryObjectSendStream[SessionMessage],
90
+ GetSessionIdCallback | None
88
91
  ]
89
92
  ]:
90
93
  """Create the streams for the server."""
@@ -105,7 +108,11 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
105
108
  """Connect to the server."""
106
109
  try:
107
110
  transport = await self.exit_stack.enter_async_context(self.create_streams())
108
- read, write = transport
111
+ # streamablehttp_client returns (read, write, get_session_id)
112
+ # sse_client returns (read, write)
113
+
114
+ read, write, *_ = transport
115
+
109
116
  session = await self.exit_stack.enter_async_context(
110
117
  ClientSession(
111
118
  read,
@@ -232,8 +239,9 @@ class MCPServerStdio(_MCPServerWithClientSession):
232
239
  self,
233
240
  ) -> AbstractAsyncContextManager[
234
241
  tuple[
235
- MemoryObjectReceiveStream[JSONRPCMessage | Exception],
236
- MemoryObjectSendStream[JSONRPCMessage],
242
+ MemoryObjectReceiveStream[SessionMessage | Exception],
243
+ MemoryObjectSendStream[SessionMessage],
244
+ GetSessionIdCallback | None
237
245
  ]
238
246
  ]:
239
247
  """Create the streams for the server."""
@@ -302,8 +310,9 @@ class MCPServerSse(_MCPServerWithClientSession):
302
310
  self,
303
311
  ) -> AbstractAsyncContextManager[
304
312
  tuple[
305
- MemoryObjectReceiveStream[JSONRPCMessage | Exception],
306
- MemoryObjectSendStream[JSONRPCMessage],
313
+ MemoryObjectReceiveStream[SessionMessage | Exception],
314
+ MemoryObjectSendStream[SessionMessage],
315
+ GetSessionIdCallback | None
307
316
  ]
308
317
  ]:
309
318
  """Create the streams for the server."""
@@ -318,3 +327,84 @@ class MCPServerSse(_MCPServerWithClientSession):
318
327
  def name(self) -> str:
319
328
  """A readable name for the server."""
320
329
  return self._name
330
+
331
+
332
+ class MCPServerStreamableHttpParams(TypedDict):
333
+ """Mirrors the params in`mcp.client.streamable_http.streamablehttp_client`."""
334
+
335
+ url: str
336
+ """The URL of the server."""
337
+
338
+ headers: NotRequired[dict[str, str]]
339
+ """The headers to send to the server."""
340
+
341
+ timeout: NotRequired[timedelta]
342
+ """The timeout for the HTTP request. Defaults to 5 seconds."""
343
+
344
+ sse_read_timeout: NotRequired[timedelta]
345
+ """The timeout for the SSE connection, in seconds. Defaults to 5 minutes."""
346
+
347
+ terminate_on_close: NotRequired[bool]
348
+ """Terminate on close"""
349
+
350
+
351
+ class MCPServerStreamableHttp(_MCPServerWithClientSession):
352
+ """MCP server implementation that uses the Streamable HTTP transport. See the [spec]
353
+ (https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http)
354
+ for details.
355
+ """
356
+
357
+ def __init__(
358
+ self,
359
+ params: MCPServerStreamableHttpParams,
360
+ cache_tools_list: bool = False,
361
+ name: str | None = None,
362
+ client_session_timeout_seconds: float | None = 5,
363
+ ):
364
+ """Create a new MCP server based on the Streamable HTTP transport.
365
+
366
+ Args:
367
+ params: The params that configure the server. This includes the URL of the server,
368
+ the headers to send to the server, the timeout for the HTTP request, and the
369
+ timeout for the Streamable HTTP connection and whether we need to
370
+ terminate on close.
371
+
372
+ cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
373
+ cached and only fetched from the server once. If `False`, the tools list will be
374
+ fetched from the server on each call to `list_tools()`. The cache can be
375
+ invalidated by calling `invalidate_tools_cache()`. You should set this to `True`
376
+ if you know the server will not change its tools list, because it can drastically
377
+ improve latency (by avoiding a round-trip to the server every time).
378
+
379
+ name: A readable name for the server. If not provided, we'll create one from the
380
+ URL.
381
+
382
+ client_session_timeout_seconds: the read timeout passed to the MCP ClientSession.
383
+ """
384
+ super().__init__(cache_tools_list, client_session_timeout_seconds)
385
+
386
+ self.params = params
387
+ self._name = name or f"streamable_http: {self.params['url']}"
388
+
389
+ def create_streams(
390
+ self,
391
+ ) -> AbstractAsyncContextManager[
392
+ tuple[
393
+ MemoryObjectReceiveStream[SessionMessage | Exception],
394
+ MemoryObjectSendStream[SessionMessage],
395
+ GetSessionIdCallback | None
396
+ ]
397
+ ]:
398
+ """Create the streams for the server."""
399
+ return streamablehttp_client(
400
+ url=self.params["url"],
401
+ headers=self.params.get("headers", None),
402
+ timeout=self.params.get("timeout", timedelta(seconds=30)),
403
+ sse_read_timeout=self.params.get("sse_read_timeout", timedelta(seconds=60 * 5)),
404
+ terminate_on_close=self.params.get("terminate_on_close", True)
405
+ )
406
+
407
+ @property
408
+ def name(self) -> str:
409
+ """A readable name for the server."""
410
+ return self._name
@@ -234,7 +234,7 @@ class Converter:
234
234
  type="image_url",
235
235
  image_url={
236
236
  "url": casted_image_param["image_url"],
237
- "detail": casted_image_param["detail"],
237
+ "detail": casted_image_param.get("detail", "auto"),
238
238
  },
239
239
  )
240
240
  )
@@ -15,6 +15,7 @@ from .exceptions import InputGuardrailTripwireTriggered, MaxTurnsExceeded
15
15
  from .guardrail import InputGuardrailResult, OutputGuardrailResult
16
16
  from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
17
17
  from .logger import logger
18
+ from .run_context import RunContextWrapper
18
19
  from .stream_events import StreamEvent
19
20
  from .tracing import Trace
20
21
  from .util._pretty_print import pretty_print_result, pretty_print_run_result_streaming
@@ -50,6 +51,9 @@ class RunResultBase(abc.ABC):
50
51
  output_guardrail_results: list[OutputGuardrailResult]
51
52
  """Guardrail results for the final output of the agent."""
52
53
 
54
+ context_wrapper: RunContextWrapper[Any]
55
+ """The context wrapper for the agent run."""
56
+
53
57
  @property
54
58
  @abc.abstractmethod
55
59
  def last_agent(self) -> Agent[Any]:
@@ -75,9 +79,7 @@ class RunResultBase(abc.ABC):
75
79
 
76
80
  def to_input_list(self) -> list[TResponseInputItem]:
77
81
  """Creates a new input list, merging the original input with all the new items generated."""
78
- original_items: list[TResponseInputItem] = ItemHelpers.input_to_new_input_list(
79
- self.input
80
- )
82
+ original_items: list[TResponseInputItem] = ItemHelpers.input_to_new_input_list(self.input)
81
83
  new_items = [item.to_input_item() for item in self.new_items]
82
84
 
83
85
  return original_items + new_items
@@ -206,17 +208,13 @@ class RunResultStreaming(RunResultBase):
206
208
 
207
209
  def _check_errors(self):
208
210
  if self.current_turn > self.max_turns:
209
- self._stored_exception = MaxTurnsExceeded(
210
- f"Max turns ({self.max_turns}) exceeded"
211
- )
211
+ self._stored_exception = MaxTurnsExceeded(f"Max turns ({self.max_turns}) exceeded")
212
212
 
213
213
  # Fetch all the completed guardrail results from the queue and raise if needed
214
214
  while not self._input_guardrail_queue.empty():
215
215
  guardrail_result = self._input_guardrail_queue.get_nowait()
216
216
  if guardrail_result.output.tripwire_triggered:
217
- self._stored_exception = InputGuardrailTripwireTriggered(
218
- guardrail_result
219
- )
217
+ self._stored_exception = InputGuardrailTripwireTriggered(guardrail_result)
220
218
 
221
219
  # Check the tasks for any exceptions
222
220
  if self._run_impl_task and self._run_impl_task.done():
@@ -270,6 +270,7 @@ class Runner:
270
270
  _last_agent=current_agent,
271
271
  input_guardrail_results=input_guardrail_results,
272
272
  output_guardrail_results=output_guardrail_results,
273
+ context_wrapper=context_wrapper,
273
274
  )
274
275
  elif isinstance(turn_result.next_step, NextStepHandoff):
275
276
  current_agent = cast(Agent[TContext], turn_result.next_step.new_agent)
@@ -423,6 +424,7 @@ class Runner:
423
424
  output_guardrail_results=[],
424
425
  _current_agent_output_schema=output_schema,
425
426
  trace=new_trace,
427
+ context_wrapper=context_wrapper,
426
428
  )
427
429
 
428
430
  # Kick off the actual agent loop in the background and return the streamed result object.
@@ -696,6 +698,7 @@ class Runner:
696
698
  usage=usage,
697
699
  response_id=event.response.id,
698
700
  )
701
+ context_wrapper.usage.add(usage)
699
702
 
700
703
  streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
701
704
 
@@ -7,6 +7,7 @@ from .model import (
7
7
  STTModelSettings,
8
8
  TTSModel,
9
9
  TTSModelSettings,
10
+ TTSVoice,
10
11
  VoiceModelProvider,
11
12
  )
12
13
  from .models.openai_model_provider import OpenAIVoiceModelProvider
@@ -30,6 +31,7 @@ __all__ = [
30
31
  "STTModelSettings",
31
32
  "TTSModel",
32
33
  "TTSModelSettings",
34
+ "TTSVoice",
33
35
  "VoiceModelProvider",
34
36
  "StreamedAudioResult",
35
37
  "SingleAgentVoiceWorkflow",
@@ -14,14 +14,13 @@ DEFAULT_TTS_INSTRUCTIONS = (
14
14
  )
15
15
  DEFAULT_TTS_BUFFER_SIZE = 120
16
16
 
17
+ TTSVoice = Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]
18
+ """Exportable type for the TTSModelSettings voice enum"""
17
19
 
18
20
  @dataclass
19
21
  class TTSModelSettings:
20
22
  """Settings for a TTS model."""
21
-
22
- voice: (
23
- Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"] | None
24
- ) = None
23
+ voice: TTSVoice | None = None
25
24
  """
26
25
  The voice to use for the TTS model. If not provided, the default voice for the respective model
27
26
  will be used.
@@ -3,7 +3,8 @@ from __future__ import annotations
3
3
  from collections.abc import AsyncIterator
4
4
  from typing import Any
5
5
 
6
- from openai.types.responses import Response, ResponseCompletedEvent
6
+ from openai.types.responses import Response, ResponseCompletedEvent, ResponseUsage
7
+ from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
7
8
 
8
9
  from agents.agent_output import AgentOutputSchemaBase
9
10
  from agents.handoffs import Handoff
@@ -33,6 +34,10 @@ class FakeModel(Model):
33
34
  )
34
35
  self.tracing_enabled = tracing_enabled
35
36
  self.last_turn_args: dict[str, Any] = {}
37
+ self.hardcoded_usage: Usage | None = None
38
+
39
+ def set_hardcoded_usage(self, usage: Usage):
40
+ self.hardcoded_usage = usage
36
41
 
37
42
  def set_next_output(self, output: list[TResponseOutputItem] | Exception):
38
43
  self.turn_outputs.append(output)
@@ -83,7 +88,7 @@ class FakeModel(Model):
83
88
 
84
89
  return ModelResponse(
85
90
  output=output,
86
- usage=Usage(),
91
+ usage=self.hardcoded_usage or Usage(),
87
92
  response_id=None,
88
93
  )
89
94
 
@@ -123,13 +128,14 @@ class FakeModel(Model):
123
128
 
124
129
  yield ResponseCompletedEvent(
125
130
  type="response.completed",
126
- response=get_response_obj(output),
131
+ response=get_response_obj(output, usage=self.hardcoded_usage),
127
132
  )
128
133
 
129
134
 
130
135
  def get_response_obj(
131
136
  output: list[TResponseOutputItem],
132
137
  response_id: str | None = None,
138
+ usage: Usage | None = None,
133
139
  ) -> Response:
134
140
  return Response(
135
141
  id=response_id or "123",
@@ -141,4 +147,11 @@ def get_response_obj(
141
147
  tools=[],
142
148
  top_p=None,
143
149
  parallel_tool_calls=False,
150
+ usage=ResponseUsage(
151
+ input_tokens=usage.input_tokens if usage else 0,
152
+ output_tokens=usage.output_tokens if usage else 0,
153
+ total_tokens=usage.total_tokens if usage else 0,
154
+ input_tokens_details=InputTokensDetails(cached_tokens=0),
155
+ output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
156
+ ),
144
157
  )
@@ -0,0 +1,45 @@
1
+ import litellm
2
+ import pytest
3
+ from litellm.types.utils import Choices, Message, ModelResponse, Usage
4
+
5
+ from agents.extensions.models.litellm_model import LitellmModel
6
+ from agents.model_settings import ModelSettings
7
+ from agents.models.interface import ModelTracing
8
+
9
+
10
+ @pytest.mark.allow_call_model_methods
11
+ @pytest.mark.asyncio
12
+ async def test_extra_body_is_forwarded(monkeypatch):
13
+ """
14
+ Forward `extra_body` entries into litellm.acompletion kwargs.
15
+
16
+ This ensures that user-provided parameters (e.g. cached_content)
17
+ arrive alongside default arguments.
18
+ """
19
+ captured: dict[str, object] = {}
20
+
21
+ async def fake_acompletion(model, messages=None, **kwargs):
22
+ captured.update(kwargs)
23
+ msg = Message(role="assistant", content="ok")
24
+ choice = Choices(index=0, message=msg)
25
+ return ModelResponse(choices=[choice], usage=Usage(0, 0, 0))
26
+
27
+ monkeypatch.setattr(litellm, "acompletion", fake_acompletion)
28
+ settings = ModelSettings(
29
+ temperature=0.1,
30
+ extra_body={"cached_content": "some_cache", "foo": 123}
31
+ )
32
+ model = LitellmModel(model="test-model")
33
+
34
+ await model.get_response(
35
+ system_instructions=None,
36
+ input=[],
37
+ model_settings=settings,
38
+ tools=[],
39
+ output_schema=None,
40
+ handoffs=[],
41
+ tracing=ModelTracing.DISABLED,
42
+ previous_response_id=None,
43
+ )
44
+
45
+ assert {"cached_content": "some_cache", "foo": 123}.items() <= captured.items()
@@ -3,7 +3,7 @@ from typing import Any
3
3
  import pytest
4
4
  from pydantic import BaseModel
5
5
 
6
- from agents import Agent, RunResult
6
+ from agents import Agent, RunContextWrapper, RunResult
7
7
 
8
8
 
9
9
  def create_run_result(final_output: Any) -> RunResult:
@@ -15,6 +15,7 @@ def create_run_result(final_output: Any) -> RunResult:
15
15
  input_guardrail_results=[],
16
16
  output_guardrail_results=[],
17
17
  _last_agent=Agent(name="test"),
18
+ context_wrapper=RunContextWrapper(context=None),
18
19
  )
19
20
 
20
21