openai-agents 0.0.7__tar.gz → 0.0.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

Files changed (296) hide show
  1. {openai_agents-0.0.7 → openai_agents-0.0.8}/.github/ISSUE_TEMPLATE/feature_request.md +1 -1
  2. {openai_agents-0.0.7 → openai_agents-0.0.8}/.github/ISSUE_TEMPLATE/question.md +1 -1
  3. openai_agents-0.0.8/.vscode/settings.json +7 -0
  4. {openai_agents-0.0.7 → openai_agents-0.0.8}/PKG-INFO +2 -2
  5. openai_agents-0.0.8/docs/assets/images/mcp-tracing.jpg +0 -0
  6. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/mcp.md +11 -2
  7. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/running_agents.md +1 -1
  8. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/tracing.md +4 -1
  9. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/manager.py +1 -1
  10. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/mcp/filesystem_example/README.md +2 -2
  11. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/mcp/filesystem_example/main.py +1 -1
  12. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/mcp/git_example/README.md +3 -2
  13. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/mcp/git_example/main.py +2 -6
  14. openai_agents-0.0.8/examples/mcp/sse_example/README.md +13 -0
  15. openai_agents-0.0.8/examples/mcp/sse_example/main.py +83 -0
  16. openai_agents-0.0.8/examples/mcp/sse_example/server.py +33 -0
  17. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/manager.py +1 -1
  18. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/sample_outputs/product_recs.txt +1 -1
  19. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/sample_outputs/vacation.txt +1 -1
  20. {openai_agents-0.0.7 → openai_agents-0.0.8}/pyproject.toml +2 -3
  21. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/__init__.py +2 -0
  22. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/agent.py +16 -3
  23. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/mcp/util.py +23 -7
  24. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/model_settings.py +25 -13
  25. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/models/openai_chatcompletions.py +12 -2
  26. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/models/openai_responses.py +3 -0
  27. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/strict_schema.py +1 -1
  28. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tracing/processors.py +0 -1
  29. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tracing/span_data.py +1 -1
  30. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/version.py +1 -1
  31. openai_agents-0.0.8/tests/mcp/test_mcp_util.py +262 -0
  32. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_agent_runner.py +28 -0
  33. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_function_schema.py +12 -1
  34. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_function_tool_decorator.py +35 -0
  35. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_openai_chatcompletions.py +2 -0
  36. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_tracing_errors.py +6 -8
  37. {openai_agents-0.0.7 → openai_agents-0.0.8}/uv.lock +7 -5
  38. openai_agents-0.0.7/tests/mcp/test_mcp_util.py +0 -109
  39. {openai_agents-0.0.7 → openai_agents-0.0.8}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
  40. {openai_agents-0.0.7 → openai_agents-0.0.8}/.github/ISSUE_TEMPLATE/model_provider.md +0 -0
  41. {openai_agents-0.0.7 → openai_agents-0.0.8}/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +0 -0
  42. {openai_agents-0.0.7 → openai_agents-0.0.8}/.github/workflows/docs.yml +0 -0
  43. {openai_agents-0.0.7 → openai_agents-0.0.8}/.github/workflows/issues.yml +0 -0
  44. {openai_agents-0.0.7 → openai_agents-0.0.8}/.github/workflows/publish.yml +0 -0
  45. {openai_agents-0.0.7 → openai_agents-0.0.8}/.github/workflows/tests.yml +0 -0
  46. {openai_agents-0.0.7 → openai_agents-0.0.8}/.gitignore +0 -0
  47. {openai_agents-0.0.7 → openai_agents-0.0.8}/.prettierrc +0 -0
  48. {openai_agents-0.0.7 → openai_agents-0.0.8}/LICENSE +0 -0
  49. {openai_agents-0.0.7 → openai_agents-0.0.8}/Makefile +0 -0
  50. {openai_agents-0.0.7 → openai_agents-0.0.8}/README.md +0 -0
  51. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/agents.md +0 -0
  52. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/assets/images/favicon-platform.svg +0 -0
  53. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/assets/images/graph.png +0 -0
  54. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/assets/images/orchestration.png +0 -0
  55. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/assets/logo.svg +0 -0
  56. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/config.md +0 -0
  57. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/context.md +0 -0
  58. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/examples.md +0 -0
  59. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/guardrails.md +0 -0
  60. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/handoffs.md +0 -0
  61. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/index.md +0 -0
  62. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/models.md +0 -0
  63. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/multi_agent.md +0 -0
  64. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/quickstart.md +0 -0
  65. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/agent.md +0 -0
  66. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/agent_output.md +0 -0
  67. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/exceptions.md +0 -0
  68. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/extensions/handoff_filters.md +0 -0
  69. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/extensions/handoff_prompt.md +0 -0
  70. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/function_schema.md +0 -0
  71. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/guardrail.md +0 -0
  72. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/handoffs.md +0 -0
  73. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/index.md +0 -0
  74. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/items.md +0 -0
  75. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/lifecycle.md +0 -0
  76. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/mcp/server.md +0 -0
  77. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/mcp/util.md +0 -0
  78. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/model_settings.md +0 -0
  79. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/models/interface.md +0 -0
  80. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/models/openai_chatcompletions.md +0 -0
  81. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/models/openai_responses.md +0 -0
  82. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/result.md +0 -0
  83. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/run.md +0 -0
  84. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/run_context.md +0 -0
  85. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/stream_events.md +0 -0
  86. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/tool.md +0 -0
  87. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/tracing/create.md +0 -0
  88. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/tracing/index.md +0 -0
  89. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/tracing/processor_interface.md +0 -0
  90. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/tracing/processors.md +0 -0
  91. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/tracing/scope.md +0 -0
  92. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/tracing/setup.md +0 -0
  93. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/tracing/span_data.md +0 -0
  94. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/tracing/spans.md +0 -0
  95. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/tracing/traces.md +0 -0
  96. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/tracing/util.md +0 -0
  97. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/usage.md +0 -0
  98. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/events.md +0 -0
  99. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/exceptions.md +0 -0
  100. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/input.md +0 -0
  101. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/model.md +0 -0
  102. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/models/openai_provider.md +0 -0
  103. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/models/openai_stt.md +0 -0
  104. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/models/openai_tts.md +0 -0
  105. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/pipeline.md +0 -0
  106. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/pipeline_config.md +0 -0
  107. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/result.md +0 -0
  108. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/utils.md +0 -0
  109. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/ref/voice/workflow.md +0 -0
  110. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/results.md +0 -0
  111. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/streaming.md +0 -0
  112. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/stylesheets/extra.css +0 -0
  113. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/tools.md +0 -0
  114. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/visualization.md +0 -0
  115. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/voice/pipeline.md +0 -0
  116. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/voice/quickstart.md +0 -0
  117. {openai_agents-0.0.7 → openai_agents-0.0.8}/docs/voice/tracing.md +0 -0
  118. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/__init__.py +0 -0
  119. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/agent_patterns/README.md +0 -0
  120. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/agent_patterns/agents_as_tools.py +0 -0
  121. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/agent_patterns/deterministic.py +0 -0
  122. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/agent_patterns/forcing_tool_use.py +0 -0
  123. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/agent_patterns/input_guardrails.py +0 -0
  124. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/agent_patterns/llm_as_a_judge.py +0 -0
  125. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/agent_patterns/output_guardrails.py +0 -0
  126. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/agent_patterns/parallelization.py +0 -0
  127. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/agent_patterns/routing.py +0 -0
  128. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/basic/agent_lifecycle_example.py +0 -0
  129. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/basic/dynamic_system_prompt.py +0 -0
  130. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/basic/hello_world.py +0 -0
  131. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/basic/hello_world_jupyter.py +0 -0
  132. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/basic/lifecycle_example.py +0 -0
  133. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/basic/stream_items.py +0 -0
  134. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/basic/stream_text.py +0 -0
  135. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/basic/tools.py +0 -0
  136. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/customer_service/main.py +0 -0
  137. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/README.md +0 -0
  138. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/__init__.py +0 -0
  139. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/agents/__init__.py +0 -0
  140. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/agents/financials_agent.py +0 -0
  141. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/agents/planner_agent.py +0 -0
  142. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/agents/risk_agent.py +0 -0
  143. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/agents/search_agent.py +0 -0
  144. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/agents/verifier_agent.py +0 -0
  145. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/agents/writer_agent.py +0 -0
  146. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/main.py +0 -0
  147. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/financial_research_agent/printer.py +0 -0
  148. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/handoffs/message_filter.py +0 -0
  149. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/handoffs/message_filter_streaming.py +0 -0
  150. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/mcp/filesystem_example/sample_files/favorite_books.txt +0 -0
  151. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/mcp/filesystem_example/sample_files/favorite_cities.txt +0 -0
  152. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/mcp/filesystem_example/sample_files/favorite_songs.txt +0 -0
  153. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/model_providers/README.md +0 -0
  154. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/model_providers/custom_example_agent.py +0 -0
  155. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/model_providers/custom_example_global.py +0 -0
  156. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/model_providers/custom_example_provider.py +0 -0
  157. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/README.md +0 -0
  158. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/__init__.py +0 -0
  159. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/agents/__init__.py +0 -0
  160. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/agents/planner_agent.py +0 -0
  161. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/agents/search_agent.py +0 -0
  162. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/agents/writer_agent.py +0 -0
  163. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/main.py +0 -0
  164. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/printer.py +0 -0
  165. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/sample_outputs/product_recs.md +0 -0
  166. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/research_bot/sample_outputs/vacation.md +0 -0
  167. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/tools/computer_use.py +0 -0
  168. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/tools/file_search.py +0 -0
  169. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/tools/web_search.py +0 -0
  170. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/voice/__init__.py +0 -0
  171. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/voice/static/README.md +0 -0
  172. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/voice/static/__init__.py +0 -0
  173. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/voice/static/main.py +0 -0
  174. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/voice/static/util.py +0 -0
  175. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/voice/streamed/README.md +0 -0
  176. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/voice/streamed/__init__.py +0 -0
  177. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/voice/streamed/main.py +0 -0
  178. {openai_agents-0.0.7 → openai_agents-0.0.8}/examples/voice/streamed/my_workflow.py +0 -0
  179. {openai_agents-0.0.7 → openai_agents-0.0.8}/mkdocs.yml +0 -0
  180. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/_config.py +0 -0
  181. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/_debug.py +0 -0
  182. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/_run_impl.py +0 -0
  183. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/agent_output.py +0 -0
  184. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/computer.py +0 -0
  185. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/exceptions.py +0 -0
  186. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/extensions/__init__.py +0 -0
  187. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/extensions/handoff_filters.py +0 -0
  188. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/extensions/handoff_prompt.py +0 -0
  189. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/extensions/visualization.py +0 -0
  190. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/function_schema.py +0 -0
  191. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/guardrail.py +0 -0
  192. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/handoffs.py +0 -0
  193. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/items.py +0 -0
  194. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/lifecycle.py +0 -0
  195. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/logger.py +0 -0
  196. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/mcp/__init__.py +0 -0
  197. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/mcp/server.py +0 -0
  198. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/models/__init__.py +0 -0
  199. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/models/_openai_shared.py +0 -0
  200. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/models/fake_id.py +0 -0
  201. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/models/interface.py +0 -0
  202. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/models/openai_provider.py +0 -0
  203. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/py.typed +0 -0
  204. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/result.py +0 -0
  205. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/run.py +0 -0
  206. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/run_context.py +0 -0
  207. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/stream_events.py +0 -0
  208. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tool.py +0 -0
  209. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tracing/__init__.py +0 -0
  210. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tracing/create.py +0 -0
  211. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tracing/logger.py +0 -0
  212. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tracing/processor_interface.py +0 -0
  213. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tracing/scope.py +0 -0
  214. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tracing/setup.py +0 -0
  215. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tracing/spans.py +0 -0
  216. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tracing/traces.py +0 -0
  217. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/tracing/util.py +0 -0
  218. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/usage.py +0 -0
  219. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/util/__init__.py +0 -0
  220. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/util/_coro.py +0 -0
  221. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/util/_error_tracing.py +0 -0
  222. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/util/_json.py +0 -0
  223. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/util/_pretty_print.py +0 -0
  224. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/util/_transforms.py +0 -0
  225. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/util/_types.py +0 -0
  226. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/__init__.py +0 -0
  227. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/events.py +0 -0
  228. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/exceptions.py +0 -0
  229. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/imports.py +0 -0
  230. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/input.py +0 -0
  231. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/model.py +0 -0
  232. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/models/__init__.py +0 -0
  233. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/models/openai_model_provider.py +0 -0
  234. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/models/openai_stt.py +0 -0
  235. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/models/openai_tts.py +0 -0
  236. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/pipeline.py +0 -0
  237. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/pipeline_config.py +0 -0
  238. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/result.py +0 -0
  239. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/utils.py +0 -0
  240. {openai_agents-0.0.7 → openai_agents-0.0.8}/src/agents/voice/workflow.py +0 -0
  241. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/README.md +0 -0
  242. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/__init__.py +0 -0
  243. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/conftest.py +0 -0
  244. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/fake_model.py +0 -0
  245. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/mcp/__init__.py +0 -0
  246. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/mcp/conftest.py +0 -0
  247. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/mcp/helpers.py +0 -0
  248. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/mcp/test_caching.py +0 -0
  249. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/mcp/test_connect_disconnect.py +0 -0
  250. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/mcp/test_mcp_tracing.py +0 -0
  251. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/mcp/test_runner_calls_mcp.py +0 -0
  252. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/mcp/test_server_errors.py +0 -0
  253. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_agent_config.py +0 -0
  254. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_agent_hooks.py +0 -0
  255. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_agent_runner_streamed.py +0 -0
  256. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_agent_tracing.py +0 -0
  257. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_computer_action.py +0 -0
  258. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_config.py +0 -0
  259. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_doc_parsing.py +0 -0
  260. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_extension_filters.py +0 -0
  261. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_function_tool.py +0 -0
  262. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_global_hooks.py +0 -0
  263. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_guardrails.py +0 -0
  264. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_handoff_tool.py +0 -0
  265. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_items_helpers.py +0 -0
  266. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_max_turns.py +0 -0
  267. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_openai_chatcompletions_converter.py +0 -0
  268. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_openai_chatcompletions_stream.py +0 -0
  269. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_openai_responses_converter.py +0 -0
  270. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_output_tool.py +0 -0
  271. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_pretty_print.py +0 -0
  272. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_responses.py +0 -0
  273. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_responses_tracing.py +0 -0
  274. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_result_cast.py +0 -0
  275. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_run_config.py +0 -0
  276. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_run_step_execution.py +0 -0
  277. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_run_step_processing.py +0 -0
  278. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_strict_schema.py +0 -0
  279. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_tool_choice_reset.py +0 -0
  280. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_tool_converter.py +0 -0
  281. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_tool_use_behavior.py +0 -0
  282. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_trace_processor.py +0 -0
  283. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_tracing.py +0 -0
  284. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_tracing_errors_streamed.py +0 -0
  285. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/test_visualization.py +0 -0
  286. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/testing_processor.py +0 -0
  287. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/tracing/test_processor_api_key.py +0 -0
  288. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/voice/__init__.py +0 -0
  289. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/voice/conftest.py +0 -0
  290. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/voice/fake_models.py +0 -0
  291. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/voice/helpers.py +0 -0
  292. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/voice/test_input.py +0 -0
  293. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/voice/test_openai_stt.py +0 -0
  294. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/voice/test_openai_tts.py +0 -0
  295. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/voice/test_pipeline.py +0 -0
  296. {openai_agents-0.0.7 → openai_agents-0.0.8}/tests/voice/test_workflow.py +0 -0
@@ -10,7 +10,7 @@ assignees: ''
10
10
  ### Please read this first
11
11
 
12
12
  - **Have you read the docs?**[Agents SDK docs](https://openai.github.io/openai-agents-python/)
13
- - **Have you searched for related issues?** Others may have had similar requesrs
13
+ - **Have you searched for related issues?** Others may have had similar requests
14
14
 
15
15
  ### Describe the feature
16
16
  What is the feature you're requesting? How would it work? Please provide examples and details if possible.
@@ -10,7 +10,7 @@ assignees: ''
10
10
  ### Please read this first
11
11
 
12
12
  - **Have you read the docs?**[Agents SDK docs](https://openai.github.io/openai-agents-python/)
13
- - **Have you searched for related issues?** Others may have had similar requesrs
13
+ - **Have you searched for related issues?** Others may have had similar requests
14
14
 
15
15
  ### Question
16
16
  Describe your question. Provide details if available.
@@ -0,0 +1,7 @@
1
+ {
2
+ "python.testing.pytestArgs": [
3
+ "tests"
4
+ ],
5
+ "python.testing.unittestEnabled": false,
6
+ "python.testing.pytestEnabled": true
7
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.0.7
3
+ Version: 0.0.8
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://github.com/openai/openai-agents-python
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -19,7 +19,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
19
  Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.9
21
21
  Requires-Dist: griffe<2,>=1.5.6
22
- Requires-Dist: mcp; python_version >= '3.10'
22
+ Requires-Dist: mcp<2,>=1.6.0; python_version >= '3.10'
23
23
  Requires-Dist: openai>=1.66.5
24
24
  Requires-Dist: pydantic<3,>=2.10
25
25
  Requires-Dist: requests<3,>=2.0
@@ -1,4 +1,4 @@
1
- # Model context protocol
1
+ # Model context protocol (MCP)
2
2
 
3
3
  The [Model context protocol](https://modelcontextprotocol.io/introduction) (aka MCP) is a way to provide tools and context to the LLM. From the MCP docs:
4
4
 
@@ -46,6 +46,15 @@ Every time an Agent runs, it calls `list_tools()` on the MCP server. This can be
46
46
 
47
47
  If you want to invalidate the cache, you can call `invalidate_tools_cache()` on the servers.
48
48
 
49
- ## End-to-end example
49
+ ## End-to-end examples
50
50
 
51
51
  View complete working examples at [examples/mcp](https://github.com/openai/openai-agents-python/tree/main/examples/mcp).
52
+
53
+ ## Tracing
54
+
55
+ [Tracing](./tracing.md) automatically captures MCP operations, including:
56
+
57
+ 1. Calls to the MCP server to list tools
58
+ 2. MCP-related info on function calls
59
+
60
+ ![MCP Tracing Screenshot](./assets/images/mcp-tracing.jpg)
@@ -53,7 +53,7 @@ The `run_config` parameter lets you configure some global settings for the agent
53
53
  - [`handoff_input_filter`][agents.run.RunConfig.handoff_input_filter]: A global input filter to apply to all handoffs, if the handoff doesn't already have one. The input filter allows you to edit the inputs that are sent to the new agent. See the documentation in [`Handoff.input_filter`][agents.handoffs.Handoff.input_filter] for more details.
54
54
  - [`tracing_disabled`][agents.run.RunConfig.tracing_disabled]: Allows you to disable [tracing](tracing.md) for the entire run.
55
55
  - [`trace_include_sensitive_data`][agents.run.RunConfig.trace_include_sensitive_data]: Configures whether traces will include potentially sensitive data, such as LLM and tool call inputs/outputs.
56
- - [`workflow_name`][agents.run.RunConfig.workflow_name], [`trace_id`][agents.run.RunConfig.trace_id], [`group_id`][agents.run.RunConfig.group_id]: Sets the tracing workflow name, trace ID and trace group ID for the run. We recommend at least setting `workflow_name`. The session ID is an optional field that lets you link traces across multiple runs.
56
+ - [`workflow_name`][agents.run.RunConfig.workflow_name], [`trace_id`][agents.run.RunConfig.trace_id], [`group_id`][agents.run.RunConfig.group_id]: Sets the tracing workflow name, trace ID and trace group ID for the run. We recommend at least setting `workflow_name`. The group ID is an optional field that lets you link traces across multiple runs.
57
57
  - [`trace_metadata`][agents.run.RunConfig.trace_metadata]: Metadata to include on all traces.
58
58
 
59
59
  ## Conversations/chat threads
@@ -101,7 +101,8 @@ To customize this default setup, to send traces to alternative or additional bac
101
101
 
102
102
  - [Weights & Biases](https://weave-docs.wandb.ai/guides/integrations/openai_agents)
103
103
  - [Arize-Phoenix](https://docs.arize.com/phoenix/tracing/integrations-tracing/openai-agents-sdk)
104
- - [MLflow](https://mlflow.org/docs/latest/tracing/integrations/openai-agent)
104
+ - [MLflow (self-hosted/OSS](https://mlflow.org/docs/latest/tracing/integrations/openai-agent)
105
+ - [MLflow (Databricks hosted](https://docs.databricks.com/aws/en/mlflow/mlflow-tracing#-automatic-tracing)
105
106
  - [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk)
106
107
  - [Pydantic Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents)
107
108
  - [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk)
@@ -111,3 +112,5 @@ To customize this default setup, to send traces to alternative or additional bac
111
112
  - [Maxim AI](https://www.getmaxim.ai/docs/observe/integrations/openai-agents-sdk)
112
113
  - [Comet Opik](https://www.comet.com/docs/opik/tracing/integrations/openai_agents)
113
114
  - [Langfuse](https://langfuse.com/docs/integrations/openaiagentssdk/openai-agents)
115
+ - [Langtrace](https://docs.langtrace.ai/supported-integrations/llm-frameworks/openai-agents-sdk)
116
+ - [Okahu-Monocle](https://github.com/monocle2ai/monocle)
@@ -38,7 +38,7 @@ class FinancialResearchManager:
38
38
  with trace("Financial research trace", trace_id=trace_id):
39
39
  self.printer.update_item(
40
40
  "trace_id",
41
- f"View trace: https://platform.openai.com/traces/{trace_id}",
41
+ f"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}",
42
42
  is_done=True,
43
43
  hide_checkmark=True,
44
44
  )
@@ -5,12 +5,12 @@ This example uses the [filesystem MCP server](https://github.com/modelcontextpro
5
5
  Run it via:
6
6
 
7
7
  ```
8
- uv run python python examples/mcp/filesystem_example/main.py
8
+ uv run python examples/mcp/filesystem_example/main.py
9
9
  ```
10
10
 
11
11
  ## Details
12
12
 
13
- The example uses the `MCPServerStdio` class from `agents`, with the command:
13
+ The example uses the `MCPServerStdio` class from `agents.mcp`, with the command:
14
14
 
15
15
  ```bash
16
16
  npx -y "@modelcontextprotocol/server-filesystem" <samples_directory>
@@ -45,7 +45,7 @@ async def main():
45
45
  ) as server:
46
46
  trace_id = gen_trace_id()
47
47
  with trace(workflow_name="MCP Filesystem Example", trace_id=trace_id):
48
- print(f"View trace: https://platform.openai.com/traces/{trace_id}\n")
48
+ print(f"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}\n")
49
49
  await run(server)
50
50
 
51
51
 
@@ -10,16 +10,17 @@ uv run python examples/mcp/git_example/main.py
10
10
 
11
11
  ## Details
12
12
 
13
- The example uses the `MCPServerStdio` class from `agents`, with the command:
13
+ The example uses the `MCPServerStdio` class from `agents.mcp`, with the command:
14
14
 
15
15
  ```bash
16
16
  uvx mcp-server-git
17
17
  ```
18
+
18
19
  Prior to running the agent, the user is prompted to provide a local directory path to their git repo. Using that, the Agent can invoke Git MCP tools like `git_log` to inspect the git commit log.
19
20
 
20
21
  Under the hood:
21
22
 
22
23
  1. The server is spun up in a subprocess, and exposes a bunch of tools like `git_log()`
23
24
  2. We add the server instance to the Agent via `mcp_agents`.
24
- 3. Each time the agent runs, we call out to the MCP server to fetch the list of tools via `server.list_tools()`. The result is cached.
25
+ 3. Each time the agent runs, we call out to the MCP server to fetch the list of tools via `server.list_tools()`. The result is cached.
25
26
  4. If the LLM chooses to use an MCP tool, we call the MCP server to run the tool via `server.run_tool()`.
@@ -30,12 +30,8 @@ async def main():
30
30
  directory_path = input("Please enter the path to the git repository: ")
31
31
 
32
32
  async with MCPServerStdio(
33
- params={
34
- "command": "uvx",
35
- "args": [
36
- "mcp-server-git"
37
- ]
38
- }
33
+ cache_tools_list=True, # Cache the tools list, for demonstration
34
+ params={"command": "uvx", "args": ["mcp-server-git"]},
39
35
  ) as server:
40
36
  with trace(workflow_name="MCP Git Example"):
41
37
  await run(server, directory_path)
@@ -0,0 +1,13 @@
1
+ # MCP SSE Example
2
+
3
+ This example uses a local SSE server in [server.py](server.py).
4
+
5
+ Run the example via:
6
+
7
+ ```
8
+ uv run python examples/mcp/sse_example/main.py
9
+ ```
10
+
11
+ ## Details
12
+
13
+ The example uses the `MCPServerSse` class from `agents.mcp`. The server runs in a sub-process at `https://localhost:8000/sse`.
@@ -0,0 +1,83 @@
1
+ import asyncio
2
+ import os
3
+ import shutil
4
+ import subprocess
5
+ import time
6
+ from typing import Any
7
+
8
+ from agents import Agent, Runner, gen_trace_id, trace
9
+ from agents.mcp import MCPServer, MCPServerSse
10
+ from agents.model_settings import ModelSettings
11
+
12
+
13
+ async def run(mcp_server: MCPServer):
14
+ agent = Agent(
15
+ name="Assistant",
16
+ instructions="Use the tools to answer the questions.",
17
+ mcp_servers=[mcp_server],
18
+ model_settings=ModelSettings(tool_choice="required"),
19
+ )
20
+
21
+ # Use the `add` tool to add two numbers
22
+ message = "Add these numbers: 7 and 22."
23
+ print(f"Running: {message}")
24
+ result = await Runner.run(starting_agent=agent, input=message)
25
+ print(result.final_output)
26
+
27
+ # Run the `get_weather` tool
28
+ message = "What's the weather in Tokyo?"
29
+ print(f"\n\nRunning: {message}")
30
+ result = await Runner.run(starting_agent=agent, input=message)
31
+ print(result.final_output)
32
+
33
+ # Run the `get_secret_word` tool
34
+ message = "What's the secret word?"
35
+ print(f"\n\nRunning: {message}")
36
+ result = await Runner.run(starting_agent=agent, input=message)
37
+ print(result.final_output)
38
+
39
+
40
+ async def main():
41
+ async with MCPServerSse(
42
+ name="SSE Python Server",
43
+ params={
44
+ "url": "http://localhost:8000/sse",
45
+ },
46
+ ) as server:
47
+ trace_id = gen_trace_id()
48
+ with trace(workflow_name="SSE Example", trace_id=trace_id):
49
+ print(f"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}\n")
50
+ await run(server)
51
+
52
+
53
+ if __name__ == "__main__":
54
+ # Let's make sure the user has uv installed
55
+ if not shutil.which("uv"):
56
+ raise RuntimeError(
57
+ "uv is not installed. Please install it: https://docs.astral.sh/uv/getting-started/installation/"
58
+ )
59
+
60
+ # We'll run the SSE server in a subprocess. Usually this would be a remote server, but for this
61
+ # demo, we'll run it locally at http://localhost:8000/sse
62
+ process: subprocess.Popen[Any] | None = None
63
+ try:
64
+ this_dir = os.path.dirname(os.path.abspath(__file__))
65
+ server_file = os.path.join(this_dir, "server.py")
66
+
67
+ print("Starting SSE server at http://localhost:8000/sse ...")
68
+
69
+ # Run `uv run server.py` to start the SSE server
70
+ process = subprocess.Popen(["uv", "run", server_file])
71
+ # Give it 3 seconds to start
72
+ time.sleep(3)
73
+
74
+ print("SSE server started. Running example...\n\n")
75
+ except Exception as e:
76
+ print(f"Error starting SSE server: {e}")
77
+ exit(1)
78
+
79
+ try:
80
+ asyncio.run(main())
81
+ finally:
82
+ if process:
83
+ process.terminate()
@@ -0,0 +1,33 @@
1
+ import random
2
+
3
+ import requests
4
+ from mcp.server.fastmcp import FastMCP
5
+
6
+ # Create server
7
+ mcp = FastMCP("Echo Server")
8
+
9
+
10
+ @mcp.tool()
11
+ def add(a: int, b: int) -> int:
12
+ """Add two numbers"""
13
+ print(f"[debug-server] add({a}, {b})")
14
+ return a + b
15
+
16
+
17
+ @mcp.tool()
18
+ def get_secret_word() -> str:
19
+ print("[debug-server] get_secret_word()")
20
+ return random.choice(["apple", "banana", "cherry"])
21
+
22
+
23
+ @mcp.tool()
24
+ def get_current_weather(city: str) -> str:
25
+ print(f"[debug-server] get_current_weather({city})")
26
+
27
+ endpoint = "https://wttr.in"
28
+ response = requests.get(f"{endpoint}/{city}")
29
+ return response.text
30
+
31
+
32
+ if __name__ == "__main__":
33
+ mcp.run(transport="sse")
@@ -23,7 +23,7 @@ class ResearchManager:
23
23
  with trace("Research trace", trace_id=trace_id):
24
24
  self.printer.update_item(
25
25
  "trace_id",
26
- f"View trace: https://platform.openai.com/traces/{trace_id}",
26
+ f"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}",
27
27
  is_done=True,
28
28
  hide_checkmark=True,
29
29
  )
@@ -3,7 +3,7 @@
3
3
  $ uv run python -m examples.research_bot.main
4
4
 
5
5
  What would you like to research? Best surfboards for beginners. I can catch my own waves, but previously used an 11ft board. What should I look for, what are my options? Various budget ranges.
6
- View trace: https://platform.openai.com/traces/trace_...
6
+ View trace: https://platform.openai.com/traces/trace?trace_id=trace_...
7
7
  Starting research...
8
8
  ✅ Will perform 15 searches
9
9
  ✅ Searching... 15/15 completed
@@ -2,7 +2,7 @@
2
2
 
3
3
  $ uv run python -m examples.research_bot.main
4
4
  What would you like to research? Caribbean vacation spots in April, optimizing for surfing, hiking and water sports
5
- View trace: https://platform.openai.com/traces/trace_....
5
+ View trace: https://platform.openai.com/traces/trace?trace_id=trace_....
6
6
  Starting research...
7
7
  ✅ Will perform 15 searches
8
8
  ✅ Searching... 15/15 completed
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "openai-agents"
3
- version = "0.0.7"
3
+ version = "0.0.8"
4
4
  description = "OpenAI Agents SDK"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.9"
@@ -13,7 +13,7 @@ dependencies = [
13
13
  "typing-extensions>=4.12.2, <5",
14
14
  "requests>=2.0, <3",
15
15
  "types-requests>=2.0, <3",
16
- "mcp; python_version >= '3.10'",
16
+ "mcp>=1.6.0, <2; python_version >= '3.10'",
17
17
  ]
18
18
  classifiers = [
19
19
  "Typing :: Typed",
@@ -54,7 +54,6 @@ dev = [
54
54
  "pynput",
55
55
  "types-pynput",
56
56
  "sounddevice",
57
- "pynput",
58
57
  "textual",
59
58
  "websockets",
60
59
  "graphviz",
@@ -100,6 +100,7 @@ from .tracing import (
100
100
  transcription_span,
101
101
  )
102
102
  from .usage import Usage
103
+ from .version import __version__
103
104
 
104
105
 
105
106
  def set_default_openai_key(key: str, use_for_tracing: bool = True) -> None:
@@ -247,4 +248,5 @@ __all__ = [
247
248
  "gen_trace_id",
248
249
  "gen_span_id",
249
250
  "default_tool_error_function",
251
+ "__version__",
250
252
  ]
@@ -6,7 +6,7 @@ from collections.abc import Awaitable
6
6
  from dataclasses import dataclass, field
7
7
  from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, cast
8
8
 
9
- from typing_extensions import TypeAlias, TypedDict
9
+ from typing_extensions import NotRequired, TypeAlias, TypedDict
10
10
 
11
11
  from .guardrail import InputGuardrail, OutputGuardrail
12
12
  from .handoffs import Handoff
@@ -44,7 +44,7 @@ ToolsToFinalOutputFunction: TypeAlias = Callable[
44
44
  MaybeAwaitable[ToolsToFinalOutputResult],
45
45
  ]
46
46
  """A function that takes a run context and a list of tool results, and returns a
47
- `ToolToFinalOutputResult`.
47
+ `ToolsToFinalOutputResult`.
48
48
  """
49
49
 
50
50
 
@@ -53,6 +53,15 @@ class StopAtTools(TypedDict):
53
53
  """A list of tool names, any of which will stop the agent from running further."""
54
54
 
55
55
 
56
+ class MCPConfig(TypedDict):
57
+ """Configuration for MCP servers."""
58
+
59
+ convert_schemas_to_strict: NotRequired[bool]
60
+ """If True, we will attempt to convert the MCP schemas to strict-mode schemas. This is a
61
+ best-effort conversion, so some schemas may not be convertible. Defaults to False.
62
+ """
63
+
64
+
56
65
  @dataclass
57
66
  class Agent(Generic[TContext]):
58
67
  """An agent is an AI model configured with instructions, tools, guardrails, handoffs and more.
@@ -119,6 +128,9 @@ class Agent(Generic[TContext]):
119
128
  longer needed.
120
129
  """
121
130
 
131
+ mcp_config: MCPConfig = field(default_factory=lambda: MCPConfig())
132
+ """Configuration for MCP servers."""
133
+
122
134
  input_guardrails: list[InputGuardrail[TContext]] = field(default_factory=list)
123
135
  """A list of checks that run in parallel to the agent's execution, before generating a
124
136
  response. Runs only if the agent is the first agent in the chain.
@@ -224,7 +236,8 @@ class Agent(Generic[TContext]):
224
236
 
225
237
  async def get_mcp_tools(self) -> list[Tool]:
226
238
  """Fetches the available tools from the MCP servers."""
227
- return await MCPUtil.get_all_function_tools(self.mcp_servers)
239
+ convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False)
240
+ return await MCPUtil.get_all_function_tools(self.mcp_servers, convert_schemas_to_strict)
228
241
 
229
242
  async def get_all_tools(self) -> list[Tool]:
230
243
  """All agent tools, including MCP tools and function tools."""
@@ -2,6 +2,8 @@ import functools
2
2
  import json
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
+ from agents.strict_schema import ensure_strict_json_schema
6
+
5
7
  from .. import _debug
6
8
  from ..exceptions import AgentsException, ModelBehaviorError, UserError
7
9
  from ..logger import logger
@@ -19,12 +21,14 @@ class MCPUtil:
19
21
  """Set of utilities for interop between MCP and Agents SDK tools."""
20
22
 
21
23
  @classmethod
22
- async def get_all_function_tools(cls, servers: list["MCPServer"]) -> list[Tool]:
24
+ async def get_all_function_tools(
25
+ cls, servers: list["MCPServer"], convert_schemas_to_strict: bool
26
+ ) -> list[Tool]:
23
27
  """Get all function tools from a list of MCP servers."""
24
28
  tools = []
25
29
  tool_names: set[str] = set()
26
30
  for server in servers:
27
- server_tools = await cls.get_function_tools(server)
31
+ server_tools = await cls.get_function_tools(server, convert_schemas_to_strict)
28
32
  server_tool_names = {tool.name for tool in server_tools}
29
33
  if len(server_tool_names & tool_names) > 0:
30
34
  raise UserError(
@@ -37,25 +41,37 @@ class MCPUtil:
37
41
  return tools
38
42
 
39
43
  @classmethod
40
- async def get_function_tools(cls, server: "MCPServer") -> list[Tool]:
44
+ async def get_function_tools(
45
+ cls, server: "MCPServer", convert_schemas_to_strict: bool
46
+ ) -> list[Tool]:
41
47
  """Get all function tools from a single MCP server."""
42
48
 
43
49
  with mcp_tools_span(server=server.name) as span:
44
50
  tools = await server.list_tools()
45
51
  span.span_data.result = [tool.name for tool in tools]
46
52
 
47
- return [cls.to_function_tool(tool, server) for tool in tools]
53
+ return [cls.to_function_tool(tool, server, convert_schemas_to_strict) for tool in tools]
48
54
 
49
55
  @classmethod
50
- def to_function_tool(cls, tool: "MCPTool", server: "MCPServer") -> FunctionTool:
56
+ def to_function_tool(
57
+ cls, tool: "MCPTool", server: "MCPServer", convert_schemas_to_strict: bool
58
+ ) -> FunctionTool:
51
59
  """Convert an MCP tool to an Agents SDK function tool."""
52
60
  invoke_func = functools.partial(cls.invoke_mcp_tool, server, tool)
61
+ schema, is_strict = tool.inputSchema, False
62
+ if convert_schemas_to_strict:
63
+ try:
64
+ schema = ensure_strict_json_schema(schema)
65
+ is_strict = True
66
+ except Exception as e:
67
+ logger.info(f"Error converting MCP schema to strict mode: {e}")
68
+
53
69
  return FunctionTool(
54
70
  name=tool.name,
55
71
  description=tool.description or "",
56
- params_json_schema=tool.inputSchema,
72
+ params_json_schema=schema,
57
73
  on_invoke_tool=invoke_func,
58
- strict_json_schema=False,
74
+ strict_json_schema=is_strict,
59
75
  )
60
76
 
61
77
  @classmethod
@@ -1,8 +1,10 @@
1
1
  from __future__ import annotations
2
2
 
3
- from dataclasses import dataclass
3
+ from dataclasses import dataclass, fields, replace
4
4
  from typing import Literal
5
5
 
6
+ from openai.types.shared import Reasoning
7
+
6
8
 
7
9
  @dataclass
8
10
  class ModelSettings:
@@ -30,8 +32,9 @@ class ModelSettings:
30
32
  tool_choice: Literal["auto", "required", "none"] | str | None = None
31
33
  """The tool choice to use when calling the model."""
32
34
 
33
- parallel_tool_calls: bool | None = False
34
- """Whether to use parallel tool calls when calling the model."""
35
+ parallel_tool_calls: bool | None = None
36
+ """Whether to use parallel tool calls when calling the model.
37
+ Defaults to False if not provided."""
35
38
 
36
39
  truncation: Literal["auto", "disabled"] | None = None
37
40
  """The truncation strategy to use when calling the model."""
@@ -39,18 +42,27 @@ class ModelSettings:
39
42
  max_tokens: int | None = None
40
43
  """The maximum number of output tokens to generate."""
41
44
 
45
+ reasoning: Reasoning | None = None
46
+ """Configuration options for
47
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
48
+ """
49
+
50
+ metadata: dict[str, str] | None = None
51
+ """Metadata to include with the model response call."""
52
+
53
+ store: bool | None = None
54
+ """Whether to store the generated model response for later retrieval.
55
+ Defaults to True if not provided."""
56
+
42
57
  def resolve(self, override: ModelSettings | None) -> ModelSettings:
43
58
  """Produce a new ModelSettings by overlaying any non-None values from the
44
59
  override on top of this instance."""
45
60
  if override is None:
46
61
  return self
47
- return ModelSettings(
48
- temperature=override.temperature or self.temperature,
49
- top_p=override.top_p or self.top_p,
50
- frequency_penalty=override.frequency_penalty or self.frequency_penalty,
51
- presence_penalty=override.presence_penalty or self.presence_penalty,
52
- tool_choice=override.tool_choice or self.tool_choice,
53
- parallel_tool_calls=override.parallel_tool_calls or self.parallel_tool_calls,
54
- truncation=override.truncation or self.truncation,
55
- max_tokens=override.max_tokens or self.max_tokens,
56
- )
62
+
63
+ changes = {
64
+ field.name: getattr(override, field.name)
65
+ for field in fields(self)
66
+ if getattr(override, field.name) is not None
67
+ }
68
+ return replace(self, **changes)
@@ -518,6 +518,11 @@ class OpenAIChatCompletionsModel(Model):
518
518
  f"Response format: {response_format}\n"
519
519
  )
520
520
 
521
+ # Match the behavior of Responses where store is True when not given
522
+ store = model_settings.store if model_settings.store is not None else True
523
+
524
+ reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None
525
+
521
526
  ret = await self._get_client().chat.completions.create(
522
527
  model=self.model,
523
528
  messages=converted_messages,
@@ -532,7 +537,10 @@ class OpenAIChatCompletionsModel(Model):
532
537
  parallel_tool_calls=parallel_tool_calls,
533
538
  stream=stream,
534
539
  stream_options={"include_usage": True} if stream else NOT_GIVEN,
540
+ store=store,
541
+ reasoning_effort=self._non_null_or_not_given(reasoning_effort),
535
542
  extra_headers=_HEADERS,
543
+ metadata=model_settings.metadata,
536
544
  )
537
545
 
538
546
  if isinstance(ret, ChatCompletion):
@@ -551,6 +559,7 @@ class OpenAIChatCompletionsModel(Model):
551
559
  temperature=model_settings.temperature,
552
560
  tools=[],
553
561
  parallel_tool_calls=parallel_tool_calls or False,
562
+ reasoning=model_settings.reasoning,
554
563
  )
555
564
  return response, ret
556
565
 
@@ -919,12 +928,13 @@ class _Converter:
919
928
  elif func_call := cls.maybe_function_tool_call(item):
920
929
  asst = ensure_assistant_message()
921
930
  tool_calls = list(asst.get("tool_calls", []))
931
+ arguments = func_call["arguments"] if func_call["arguments"] else "{}"
922
932
  new_tool_call = ChatCompletionMessageToolCallParam(
923
933
  id=func_call["call_id"],
924
934
  type="function",
925
935
  function={
926
936
  "name": func_call["name"],
927
- "arguments": func_call["arguments"],
937
+ "arguments": arguments,
928
938
  },
929
939
  )
930
940
  tool_calls.append(new_tool_call)
@@ -967,7 +977,7 @@ class ToolConverter:
967
977
  }
968
978
 
969
979
  raise UserError(
970
- f"Hosted tools are not supported with the ChatCompletions API. FGot tool type: "
980
+ f"Hosted tools are not supported with the ChatCompletions API. Got tool type: "
971
981
  f"{type(tool)}, tool: {tool}"
972
982
  )
973
983
 
@@ -246,6 +246,9 @@ class OpenAIResponsesModel(Model):
246
246
  stream=stream,
247
247
  extra_headers=_HEADERS,
248
248
  text=response_format,
249
+ store=self._non_null_or_not_given(model_settings.store),
250
+ reasoning=self._non_null_or_not_given(model_settings.reasoning),
251
+ metadata=model_settings.metadata,
249
252
  )
250
253
 
251
254
  def _get_client(self) -> AsyncOpenAI:
@@ -54,7 +54,7 @@ def _ensure_strict_json_schema(
54
54
  elif (
55
55
  typ == "object"
56
56
  and "additionalProperties" in json_schema
57
- and json_schema["additionalProperties"] is True
57
+ and json_schema["additionalProperties"]
58
58
  ):
59
59
  raise UserError(
60
60
  "additionalProperties should not be set for object types. This could be because "
@@ -182,7 +182,6 @@ class BatchTraceProcessor(TracingProcessor):
182
182
  # Track when we next *must* perform a scheduled export
183
183
  self._next_export_time = time.time() + self._schedule_delay
184
184
 
185
- self._shutdown_event = threading.Event()
186
185
  self._worker_thread = threading.Thread(target=self._run, daemon=True)
187
186
  self._worker_thread.start()
188
187