openai-agents 0.0.6__tar.gz → 0.0.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

Files changed (290) hide show
  1. {openai_agents-0.0.6 → openai_agents-0.0.7}/.github/workflows/issues.yml +6 -3
  2. {openai_agents-0.0.6 → openai_agents-0.0.7}/.github/workflows/tests.yml +3 -0
  3. {openai_agents-0.0.6 → openai_agents-0.0.7}/.gitignore +2 -2
  4. {openai_agents-0.0.6 → openai_agents-0.0.7}/Makefile +1 -1
  5. {openai_agents-0.0.6 → openai_agents-0.0.7}/PKG-INFO +5 -2
  6. {openai_agents-0.0.6 → openai_agents-0.0.7}/README.md +1 -1
  7. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/agents.md +3 -1
  8. openai_agents-0.0.7/docs/assets/images/graph.png +0 -0
  9. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/context.md +3 -3
  10. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/guardrails.md +1 -1
  11. openai_agents-0.0.7/docs/mcp.md +51 -0
  12. openai_agents-0.0.7/docs/ref/mcp/server.md +3 -0
  13. openai_agents-0.0.7/docs/ref/mcp/util.md +3 -0
  14. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/tracing.md +10 -1
  15. openai_agents-0.0.7/docs/visualization.md +86 -0
  16. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/voice/quickstart.md +10 -5
  17. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/basic/lifecycle_example.py +1 -1
  18. openai_agents-0.0.7/examples/mcp/filesystem_example/README.md +26 -0
  19. openai_agents-0.0.7/examples/mcp/filesystem_example/main.py +57 -0
  20. openai_agents-0.0.7/examples/mcp/filesystem_example/sample_files/favorite_books.txt +20 -0
  21. openai_agents-0.0.7/examples/mcp/filesystem_example/sample_files/favorite_cities.txt +4 -0
  22. openai_agents-0.0.7/examples/mcp/filesystem_example/sample_files/favorite_songs.txt +10 -0
  23. openai_agents-0.0.7/examples/mcp/git_example/README.md +25 -0
  24. openai_agents-0.0.7/examples/mcp/git_example/main.py +48 -0
  25. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/agents/search_agent.py +1 -1
  26. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/voice/static/main.py +5 -0
  27. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/voice/static/util.py +1 -0
  28. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/voice/streamed/main.py +13 -1
  29. {openai_agents-0.0.6 → openai_agents-0.0.7}/mkdocs.yml +6 -0
  30. {openai_agents-0.0.6 → openai_agents-0.0.7}/pyproject.toml +5 -1
  31. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/__init__.py +4 -0
  32. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/_run_impl.py +56 -6
  33. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/agent.py +25 -0
  34. openai_agents-0.0.7/src/agents/extensions/visualization.py +137 -0
  35. openai_agents-0.0.7/src/agents/mcp/__init__.py +21 -0
  36. openai_agents-0.0.7/src/agents/mcp/server.py +301 -0
  37. openai_agents-0.0.7/src/agents/mcp/util.py +115 -0
  38. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/models/openai_chatcompletions.py +1 -1
  39. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/models/openai_responses.py +6 -2
  40. openai_agents-0.0.7/src/agents/py.typed +1 -0
  41. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/run.py +45 -7
  42. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tracing/__init__.py +4 -0
  43. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tracing/create.py +29 -0
  44. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tracing/processors.py +26 -7
  45. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tracing/span_data.py +32 -2
  46. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/imports.py +1 -1
  47. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/models/openai_stt.py +1 -2
  48. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/fake_model.py +10 -0
  49. openai_agents-0.0.7/tests/mcp/conftest.py +11 -0
  50. openai_agents-0.0.7/tests/mcp/helpers.py +58 -0
  51. openai_agents-0.0.7/tests/mcp/test_caching.py +57 -0
  52. openai_agents-0.0.7/tests/mcp/test_connect_disconnect.py +69 -0
  53. openai_agents-0.0.7/tests/mcp/test_mcp_tracing.py +198 -0
  54. openai_agents-0.0.7/tests/mcp/test_mcp_util.py +109 -0
  55. openai_agents-0.0.7/tests/mcp/test_runner_calls_mcp.py +197 -0
  56. openai_agents-0.0.7/tests/mcp/test_server_errors.py +42 -0
  57. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_agent_runner_streamed.py +1 -1
  58. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_agent_tracing.py +148 -62
  59. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_global_hooks.py +2 -2
  60. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_responses_tracing.py +8 -25
  61. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_run_step_execution.py +1 -0
  62. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_run_step_processing.py +78 -22
  63. openai_agents-0.0.7/tests/test_tool_choice_reset.py +210 -0
  64. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_tracing.py +143 -142
  65. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_tracing_errors.py +26 -89
  66. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_tracing_errors_streamed.py +12 -171
  67. openai_agents-0.0.7/tests/test_visualization.py +136 -0
  68. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/testing_processor.py +23 -5
  69. openai_agents-0.0.7/tests/tracing/test_processor_api_key.py +27 -0
  70. openai_agents-0.0.7/tests/voice/__init__.py +0 -0
  71. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/voice/test_openai_stt.py +4 -6
  72. {openai_agents-0.0.6 → openai_agents-0.0.7}/uv.lock +107 -3
  73. {openai_agents-0.0.6 → openai_agents-0.0.7}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
  74. {openai_agents-0.0.6 → openai_agents-0.0.7}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
  75. {openai_agents-0.0.6 → openai_agents-0.0.7}/.github/ISSUE_TEMPLATE/model_provider.md +0 -0
  76. {openai_agents-0.0.6 → openai_agents-0.0.7}/.github/ISSUE_TEMPLATE/question.md +0 -0
  77. {openai_agents-0.0.6 → openai_agents-0.0.7}/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +0 -0
  78. {openai_agents-0.0.6 → openai_agents-0.0.7}/.github/workflows/docs.yml +0 -0
  79. {openai_agents-0.0.6 → openai_agents-0.0.7}/.github/workflows/publish.yml +0 -0
  80. {openai_agents-0.0.6 → openai_agents-0.0.7}/.prettierrc +0 -0
  81. {openai_agents-0.0.6 → openai_agents-0.0.7}/LICENSE +0 -0
  82. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/assets/images/favicon-platform.svg +0 -0
  83. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/assets/images/orchestration.png +0 -0
  84. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/assets/logo.svg +0 -0
  85. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/config.md +0 -0
  86. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/examples.md +0 -0
  87. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/handoffs.md +0 -0
  88. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/index.md +0 -0
  89. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/models.md +0 -0
  90. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/multi_agent.md +0 -0
  91. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/quickstart.md +0 -0
  92. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/agent.md +0 -0
  93. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/agent_output.md +0 -0
  94. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/exceptions.md +0 -0
  95. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/extensions/handoff_filters.md +0 -0
  96. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/extensions/handoff_prompt.md +0 -0
  97. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/function_schema.md +0 -0
  98. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/guardrail.md +0 -0
  99. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/handoffs.md +0 -0
  100. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/index.md +0 -0
  101. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/items.md +0 -0
  102. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/lifecycle.md +0 -0
  103. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/model_settings.md +0 -0
  104. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/models/interface.md +0 -0
  105. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/models/openai_chatcompletions.md +0 -0
  106. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/models/openai_responses.md +0 -0
  107. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/result.md +0 -0
  108. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/run.md +0 -0
  109. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/run_context.md +0 -0
  110. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/stream_events.md +0 -0
  111. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/tool.md +0 -0
  112. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/tracing/create.md +0 -0
  113. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/tracing/index.md +0 -0
  114. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/tracing/processor_interface.md +0 -0
  115. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/tracing/processors.md +0 -0
  116. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/tracing/scope.md +0 -0
  117. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/tracing/setup.md +0 -0
  118. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/tracing/span_data.md +0 -0
  119. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/tracing/spans.md +0 -0
  120. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/tracing/traces.md +0 -0
  121. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/tracing/util.md +0 -0
  122. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/usage.md +0 -0
  123. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/events.md +0 -0
  124. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/exceptions.md +0 -0
  125. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/input.md +0 -0
  126. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/model.md +0 -0
  127. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/models/openai_provider.md +0 -0
  128. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/models/openai_stt.md +0 -0
  129. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/models/openai_tts.md +0 -0
  130. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/pipeline.md +0 -0
  131. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/pipeline_config.md +0 -0
  132. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/result.md +0 -0
  133. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/utils.md +0 -0
  134. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/ref/voice/workflow.md +0 -0
  135. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/results.md +0 -0
  136. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/running_agents.md +0 -0
  137. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/streaming.md +0 -0
  138. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/stylesheets/extra.css +0 -0
  139. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/tools.md +0 -0
  140. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/voice/pipeline.md +0 -0
  141. {openai_agents-0.0.6 → openai_agents-0.0.7}/docs/voice/tracing.md +0 -0
  142. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/__init__.py +0 -0
  143. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/agent_patterns/README.md +0 -0
  144. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/agent_patterns/agents_as_tools.py +0 -0
  145. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/agent_patterns/deterministic.py +0 -0
  146. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/agent_patterns/forcing_tool_use.py +0 -0
  147. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/agent_patterns/input_guardrails.py +0 -0
  148. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/agent_patterns/llm_as_a_judge.py +0 -0
  149. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/agent_patterns/output_guardrails.py +0 -0
  150. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/agent_patterns/parallelization.py +0 -0
  151. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/agent_patterns/routing.py +0 -0
  152. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/basic/agent_lifecycle_example.py +0 -0
  153. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/basic/dynamic_system_prompt.py +0 -0
  154. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/basic/hello_world.py +0 -0
  155. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/basic/hello_world_jupyter.py +0 -0
  156. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/basic/stream_items.py +0 -0
  157. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/basic/stream_text.py +0 -0
  158. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/basic/tools.py +0 -0
  159. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/customer_service/main.py +0 -0
  160. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/README.md +0 -0
  161. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/__init__.py +0 -0
  162. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/agents/__init__.py +0 -0
  163. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/agents/financials_agent.py +0 -0
  164. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/agents/planner_agent.py +0 -0
  165. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/agents/risk_agent.py +0 -0
  166. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/agents/search_agent.py +0 -0
  167. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/agents/verifier_agent.py +0 -0
  168. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/agents/writer_agent.py +0 -0
  169. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/main.py +0 -0
  170. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/manager.py +0 -0
  171. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/financial_research_agent/printer.py +0 -0
  172. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/handoffs/message_filter.py +0 -0
  173. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/handoffs/message_filter_streaming.py +0 -0
  174. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/model_providers/README.md +0 -0
  175. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/model_providers/custom_example_agent.py +0 -0
  176. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/model_providers/custom_example_global.py +0 -0
  177. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/model_providers/custom_example_provider.py +0 -0
  178. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/README.md +0 -0
  179. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/__init__.py +0 -0
  180. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/agents/__init__.py +0 -0
  181. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/agents/planner_agent.py +0 -0
  182. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/agents/writer_agent.py +0 -0
  183. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/main.py +0 -0
  184. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/manager.py +0 -0
  185. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/printer.py +0 -0
  186. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/sample_outputs/product_recs.md +0 -0
  187. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/sample_outputs/product_recs.txt +0 -0
  188. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/sample_outputs/vacation.md +0 -0
  189. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/research_bot/sample_outputs/vacation.txt +0 -0
  190. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/tools/computer_use.py +0 -0
  191. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/tools/file_search.py +0 -0
  192. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/tools/web_search.py +0 -0
  193. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/voice/__init__.py +0 -0
  194. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/voice/static/README.md +0 -0
  195. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/voice/static/__init__.py +0 -0
  196. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/voice/streamed/README.md +0 -0
  197. {openai_agents-0.0.6 → openai_agents-0.0.7}/examples/voice/streamed/__init__.py +0 -0
  198. /openai_agents-0.0.6/examples/voice/streamed/agents.py → /openai_agents-0.0.7/examples/voice/streamed/my_workflow.py +0 -0
  199. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/_config.py +0 -0
  200. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/_debug.py +0 -0
  201. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/agent_output.py +0 -0
  202. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/computer.py +0 -0
  203. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/exceptions.py +0 -0
  204. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/extensions/__init__.py +0 -0
  205. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/extensions/handoff_filters.py +0 -0
  206. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/extensions/handoff_prompt.py +0 -0
  207. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/function_schema.py +0 -0
  208. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/guardrail.py +0 -0
  209. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/handoffs.py +0 -0
  210. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/items.py +0 -0
  211. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/lifecycle.py +0 -0
  212. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/logger.py +0 -0
  213. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/model_settings.py +0 -0
  214. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/models/__init__.py +0 -0
  215. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/models/_openai_shared.py +0 -0
  216. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/models/fake_id.py +0 -0
  217. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/models/interface.py +0 -0
  218. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/models/openai_provider.py +0 -0
  219. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/result.py +0 -0
  220. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/run_context.py +0 -0
  221. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/stream_events.py +0 -0
  222. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/strict_schema.py +0 -0
  223. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tool.py +0 -0
  224. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tracing/logger.py +0 -0
  225. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tracing/processor_interface.py +0 -0
  226. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tracing/scope.py +0 -0
  227. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tracing/setup.py +0 -0
  228. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tracing/spans.py +0 -0
  229. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tracing/traces.py +0 -0
  230. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/tracing/util.py +0 -0
  231. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/usage.py +0 -0
  232. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/util/__init__.py +0 -0
  233. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/util/_coro.py +0 -0
  234. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/util/_error_tracing.py +0 -0
  235. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/util/_json.py +0 -0
  236. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/util/_pretty_print.py +0 -0
  237. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/util/_transforms.py +0 -0
  238. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/util/_types.py +0 -0
  239. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/version.py +0 -0
  240. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/__init__.py +0 -0
  241. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/events.py +0 -0
  242. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/exceptions.py +0 -0
  243. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/input.py +0 -0
  244. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/model.py +0 -0
  245. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/models/__init__.py +0 -0
  246. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/models/openai_model_provider.py +0 -0
  247. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/models/openai_tts.py +0 -0
  248. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/pipeline.py +0 -0
  249. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/pipeline_config.py +0 -0
  250. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/result.py +0 -0
  251. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/utils.py +0 -0
  252. {openai_agents-0.0.6 → openai_agents-0.0.7}/src/agents/voice/workflow.py +0 -0
  253. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/README.md +0 -0
  254. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/__init__.py +0 -0
  255. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/conftest.py +0 -0
  256. {openai_agents-0.0.6/tests/voice → openai_agents-0.0.7/tests/mcp}/__init__.py +0 -0
  257. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_agent_config.py +0 -0
  258. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_agent_hooks.py +0 -0
  259. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_agent_runner.py +0 -0
  260. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_computer_action.py +0 -0
  261. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_config.py +0 -0
  262. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_doc_parsing.py +0 -0
  263. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_extension_filters.py +0 -0
  264. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_function_schema.py +0 -0
  265. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_function_tool.py +0 -0
  266. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_function_tool_decorator.py +0 -0
  267. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_guardrails.py +0 -0
  268. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_handoff_tool.py +0 -0
  269. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_items_helpers.py +0 -0
  270. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_max_turns.py +0 -0
  271. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_openai_chatcompletions.py +0 -0
  272. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_openai_chatcompletions_converter.py +0 -0
  273. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_openai_chatcompletions_stream.py +0 -0
  274. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_openai_responses_converter.py +0 -0
  275. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_output_tool.py +0 -0
  276. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_pretty_print.py +0 -0
  277. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_responses.py +0 -0
  278. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_result_cast.py +0 -0
  279. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_run_config.py +0 -0
  280. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_strict_schema.py +0 -0
  281. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_tool_converter.py +0 -0
  282. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_tool_use_behavior.py +0 -0
  283. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/test_trace_processor.py +0 -0
  284. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/voice/conftest.py +0 -0
  285. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/voice/fake_models.py +0 -0
  286. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/voice/helpers.py +0 -0
  287. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/voice/test_input.py +0 -0
  288. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/voice/test_openai_tts.py +0 -0
  289. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/voice/test_pipeline.py +0 -0
  290. {openai_agents-0.0.6 → openai_agents-0.0.7}/tests/voice/test_workflow.py +0 -0
@@ -17,7 +17,10 @@ jobs:
17
17
  stale-issue-label: "stale"
18
18
  stale-issue-message: "This issue is stale because it has been open for 7 days with no activity."
19
19
  close-issue-message: "This issue was closed because it has been inactive for 3 days since being marked as stale."
20
- days-before-pr-stale: -1
21
- days-before-pr-close: -1
22
- any-of-labels: 'question,needs-more-info'
20
+ any-of-issue-labels: 'question,needs-more-info'
21
+ days-before-pr-stale: 10
22
+ days-before-pr-close: 7
23
+ stale-pr-label: "stale"
24
+ stale-pr-message: "This PR is stale because it has been open for 10 days with no activity."
25
+ close-pr-message: "This PR was closed because it has been inactive for 7 days since being marked as stale."
23
26
  repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -8,6 +8,9 @@ on:
8
8
  branches:
9
9
  - main
10
10
 
11
+ env:
12
+ UV_FROZEN: "1"
13
+
11
14
  jobs:
12
15
  lint:
13
16
  runs-on: ubuntu-latest
@@ -135,10 +135,10 @@ dmypy.json
135
135
  cython_debug/
136
136
 
137
137
  # PyCharm
138
- #.idea/
138
+ .idea/
139
139
 
140
140
  # Ruff stuff:
141
141
  .ruff_cache/
142
142
 
143
143
  # PyPI configuration file
144
- .pypirc
144
+ .pypirc
@@ -5,6 +5,7 @@ sync:
5
5
  .PHONY: format
6
6
  format:
7
7
  uv run ruff format
8
+ uv run ruff check --fix
8
9
 
9
10
  .PHONY: lint
10
11
  lint:
@@ -36,7 +37,6 @@ snapshots-create:
36
37
  .PHONY: old_version_tests
37
38
  old_version_tests:
38
39
  UV_PROJECT_ENVIRONMENT=.venv_39 uv run --python 3.9 -m pytest
39
- UV_PROJECT_ENVIRONMENT=.venv_39 uv run --python 3.9 -m mypy .
40
40
 
41
41
  .PHONY: build-docs
42
42
  build-docs:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.0.6
3
+ Version: 0.0.7
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://github.com/openai/openai-agents-python
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -19,11 +19,14 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
19
  Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.9
21
21
  Requires-Dist: griffe<2,>=1.5.6
22
+ Requires-Dist: mcp; python_version >= '3.10'
22
23
  Requires-Dist: openai>=1.66.5
23
24
  Requires-Dist: pydantic<3,>=2.10
24
25
  Requires-Dist: requests<3,>=2.0
25
26
  Requires-Dist: types-requests<3,>=2.0
26
27
  Requires-Dist: typing-extensions<5,>=4.12.2
28
+ Provides-Extra: viz
29
+ Requires-Dist: graphviz>=0.17; extra == 'viz'
27
30
  Provides-Extra: voice
28
31
  Requires-Dist: numpy<3,>=2.2.0; (python_version >= '3.10') and extra == 'voice'
29
32
  Requires-Dist: websockets<16,>=15.0; extra == 'voice'
@@ -61,7 +64,7 @@ source env/bin/activate
61
64
  pip install openai-agents
62
65
  ```
63
66
 
64
- For voice support, install with the optional `voice` group: `pip install openai-agents[voice]`.
67
+ For voice support, install with the optional `voice` group: `pip install 'openai-agents[voice]'`.
65
68
 
66
69
  ## Hello world example
67
70
 
@@ -30,7 +30,7 @@ source env/bin/activate
30
30
  pip install openai-agents
31
31
  ```
32
32
 
33
- For voice support, install with the optional `voice` group: `pip install openai-agents[voice]`.
33
+ For voice support, install with the optional `voice` group: `pip install 'openai-agents[voice]'`.
34
34
 
35
35
  ## Hello world example
36
36
 
@@ -142,4 +142,6 @@ Supplying a list of tools doesn't always mean the LLM will use a tool. You can f
142
142
 
143
143
  !!! note
144
144
 
145
- If requiring tool use, you should consider setting [`Agent.tool_use_behavior`] to stop the Agent from running when a tool output is produced. Otherwise, the Agent might run in an infinite loop, where the LLM produces a tool call , and the tool result is sent to the LLM, and this infinite loops because the LLM is always forced to use a tool.
145
+ To prevent infinite loops, the framework automatically resets `tool_choice` to "auto" after a tool call. This behavior is configurable via [`agent.reset_tool_choice`][agents.agent.Agent.reset_tool_choice]. The infinite loop is because tool results are sent to the LLM, which then generates another tool call because of `tool_choice`, ad infinitum.
146
+
147
+ If you want the Agent to completely stop after a tool call (rather than continuing with auto mode), you can set [`Agent.tool_use_behavior="stop_on_first_tool"`] which will directly use the tool output as the final response without further LLM processing.
@@ -41,14 +41,14 @@ async def fetch_user_age(wrapper: RunContextWrapper[UserInfo]) -> str: # (2)!
41
41
  return f"User {wrapper.context.name} is 47 years old"
42
42
 
43
43
  async def main():
44
- user_info = UserInfo(name="John", uid=123) # (3)!
44
+ user_info = UserInfo(name="John", uid=123)
45
45
 
46
- agent = Agent[UserInfo]( # (4)!
46
+ agent = Agent[UserInfo]( # (3)!
47
47
  name="Assistant",
48
48
  tools=[fetch_user_age],
49
49
  )
50
50
 
51
- result = await Runner.run(
51
+ result = await Runner.run( # (4)!
52
52
  starting_agent=agent,
53
53
  input="What is the age of the user?",
54
54
  context=user_info,
@@ -29,7 +29,7 @@ Output guardrails run in 3 steps:
29
29
 
30
30
  !!! Note
31
31
 
32
- Output guardrails are intended to run on the final agent input, so an agent's guardrails only run if the agent is the *last* agent. Similar to the input guardrails, we do this because guardrails tend to be related to the actual Agent - you'd run different guardrails for different agents, so colocating the code is useful for readability.
32
+ Output guardrails are intended to run on the final agent output, so an agent's guardrails only run if the agent is the *last* agent. Similar to the input guardrails, we do this because guardrails tend to be related to the actual Agent - you'd run different guardrails for different agents, so colocating the code is useful for readability.
33
33
 
34
34
  ## Tripwires
35
35
 
@@ -0,0 +1,51 @@
1
+ # Model context protocol
2
+
3
+ The [Model context protocol](https://modelcontextprotocol.io/introduction) (aka MCP) is a way to provide tools and context to the LLM. From the MCP docs:
4
+
5
+ > MCP is an open protocol that standardizes how applications provide context to LLMs. Think of MCP like a USB-C port for AI applications. Just as USB-C provides a standardized way to connect your devices to various peripherals and accessories, MCP provides a standardized way to connect AI models to different data sources and tools.
6
+
7
+ The Agents SDK has support for MCP. This enables you to use a wide range of MCP servers to provide tools to your Agents.
8
+
9
+ ## MCP servers
10
+
11
+ Currently, the MCP spec defines two kinds of servers, based on the transport mechanism they use:
12
+
13
+ 1. **stdio** servers run as a subprocess of your application. You can think of them as running "locally".
14
+ 2. **HTTP over SSE** servers run remotely. You connect to them via a URL.
15
+
16
+ You can use the [`MCPServerStdio`][agents.mcp.server.MCPServerStdio] and [`MCPServerSse`][agents.mcp.server.MCPServerSse] classes to connect to these servers.
17
+
18
+ For example, this is how you'd use the [official MCP filesystem server](https://www.npmjs.com/package/@modelcontextprotocol/server-filesystem).
19
+
20
+ ```python
21
+ async with MCPServerStdio(
22
+ params={
23
+ "command": "npx",
24
+ "args": ["-y", "@modelcontextprotocol/server-filesystem", samples_dir],
25
+ }
26
+ ) as server:
27
+ tools = await server.list_tools()
28
+ ```
29
+
30
+ ## Using MCP servers
31
+
32
+ MCP servers can be added to Agents. The Agents SDK will call `list_tools()` on the MCP servers each time the Agent is run. This makes the LLM aware of the MCP server's tools. When the LLM calls a tool from an MCP server, the SDK calls `call_tool()` on that server.
33
+
34
+ ```python
35
+
36
+ agent=Agent(
37
+ name="Assistant",
38
+ instructions="Use the tools to achieve the task",
39
+ mcp_servers=[mcp_server_1, mcp_server_2]
40
+ )
41
+ ```
42
+
43
+ ## Caching
44
+
45
+ Every time an Agent runs, it calls `list_tools()` on the MCP server. This can be a latency hit, especially if the server is a remote server. To automatically cache the list of tools, you can pass `cache_tools_list=True` to both [`MCPServerStdio`][agents.mcp.server.MCPServerStdio] and [`MCPServerSse`][agents.mcp.server.MCPServerSse]. You should only do this if you're certain the tool list will not change.
46
+
47
+ If you want to invalidate the cache, you can call `invalidate_tools_cache()` on the servers.
48
+
49
+ ## End-to-end example
50
+
51
+ View complete working examples at [examples/mcp](https://github.com/openai/openai-agents-python/tree/main/examples/mcp).
@@ -0,0 +1,3 @@
1
+ # `MCP Servers`
2
+
3
+ ::: agents.mcp.server
@@ -0,0 +1,3 @@
1
+ # `MCP Util`
2
+
3
+ ::: agents.mcp.util
@@ -35,6 +35,9 @@ By default, the SDK traces the following:
35
35
  - Function tool calls are each wrapped in `function_span()`
36
36
  - Guardrails are wrapped in `guardrail_span()`
37
37
  - Handoffs are wrapped in `handoff_span()`
38
+ - Audio inputs (speech-to-text) are wrapped in a `transcription_span()`
39
+ - Audio outputs (text-to-speech) are wrapped in a `speech_span()`
40
+ - Related audio spans may be parented under a `speech_group_span()`
38
41
 
39
42
  By default, the trace is named "Agent trace". You can set this name if you use `trace`, or you can can configure the name and other properties with the [`RunConfig`][agents.run.RunConfig].
40
43
 
@@ -76,7 +79,11 @@ Spans are automatically part of the current trace, and are nested under the near
76
79
 
77
80
  ## Sensitive data
78
81
 
79
- Some spans track potentially sensitive data. For example, the `generation_span()` stores the inputs/outputs of the LLM generation, and `function_span()` stores the inputs/outputs of function calls. These may contain sensitive data, so you can disable capturing that data via [`RunConfig.trace_include_sensitive_data`][agents.run.RunConfig.trace_include_sensitive_data].
82
+ Certain spans may capture potentially sensitive data.
83
+
84
+ The `generation_span()` stores the inputs/outputs of the LLM generation, and `function_span()` stores the inputs/outputs of function calls. These may contain sensitive data, so you can disable capturing that data via [`RunConfig.trace_include_sensitive_data`][agents.run.RunConfig.trace_include_sensitive_data].
85
+
86
+ Similarly, Audio spans include base64-encoded PCM data for input and output audio by default. You can disable capturing this audio data by configuring [`VoicePipelineConfig.trace_include_sensitive_audio_data`][agents.voice.pipeline_config.VoicePipelineConfig.trace_include_sensitive_audio_data].
80
87
 
81
88
  ## Custom tracing processors
82
89
 
@@ -92,6 +99,7 @@ To customize this default setup, to send traces to alternative or additional bac
92
99
 
93
100
  ## External tracing processors list
94
101
 
102
+ - [Weights & Biases](https://weave-docs.wandb.ai/guides/integrations/openai_agents)
95
103
  - [Arize-Phoenix](https://docs.arize.com/phoenix/tracing/integrations-tracing/openai-agents-sdk)
96
104
  - [MLflow](https://mlflow.org/docs/latest/tracing/integrations/openai-agent)
97
105
  - [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk)
@@ -102,3 +110,4 @@ To customize this default setup, to send traces to alternative or additional bac
102
110
  - [LangSmith](https://docs.smith.langchain.com/observability/how_to_guides/trace_with_openai_agents_sdk)
103
111
  - [Maxim AI](https://www.getmaxim.ai/docs/observe/integrations/openai-agents-sdk)
104
112
  - [Comet Opik](https://www.comet.com/docs/opik/tracing/integrations/openai_agents)
113
+ - [Langfuse](https://langfuse.com/docs/integrations/openaiagentssdk/openai-agents)
@@ -0,0 +1,86 @@
1
+ # Agent Visualization
2
+
3
+ Agent visualization allows you to generate a structured graphical representation of agents and their relationships using **Graphviz**. This is useful for understanding how agents, tools, and handoffs interact within an application.
4
+
5
+ ## Installation
6
+
7
+ Install the optional `viz` dependency group:
8
+
9
+ ```bash
10
+ pip install "openai-agents[viz]"
11
+ ```
12
+
13
+ ## Generating a Graph
14
+
15
+ You can generate an agent visualization using the `draw_graph` function. This function creates a directed graph where:
16
+
17
+ - **Agents** are represented as yellow boxes.
18
+ - **Tools** are represented as green ellipses.
19
+ - **Handoffs** are directed edges from one agent to another.
20
+
21
+ ### Example Usage
22
+
23
+ ```python
24
+ from agents import Agent, function_tool
25
+ from agents.extensions.visualization import draw_graph
26
+
27
+ @function_tool
28
+ def get_weather(city: str) -> str:
29
+ return f"The weather in {city} is sunny."
30
+
31
+ spanish_agent = Agent(
32
+ name="Spanish agent",
33
+ instructions="You only speak Spanish.",
34
+ )
35
+
36
+ english_agent = Agent(
37
+ name="English agent",
38
+ instructions="You only speak English",
39
+ )
40
+
41
+ triage_agent = Agent(
42
+ name="Triage agent",
43
+ instructions="Handoff to the appropriate agent based on the language of the request.",
44
+ handoffs=[spanish_agent, english_agent],
45
+ tools=[get_weather],
46
+ )
47
+
48
+ draw_graph(triage_agent)
49
+ ```
50
+
51
+ ![Agent Graph](./assets/images/graph.png)
52
+
53
+ This generates a graph that visually represents the structure of the **triage agent** and its connections to sub-agents and tools.
54
+
55
+
56
+ ## Understanding the Visualization
57
+
58
+ The generated graph includes:
59
+
60
+ - A **start node** (`__start__`) indicating the entry point.
61
+ - Agents represented as **rectangles** with yellow fill.
62
+ - Tools represented as **ellipses** with green fill.
63
+ - Directed edges indicating interactions:
64
+ - **Solid arrows** for agent-to-agent handoffs.
65
+ - **Dotted arrows** for tool invocations.
66
+ - An **end node** (`__end__`) indicating where execution terminates.
67
+
68
+ ## Customizing the Graph
69
+
70
+ ### Showing the Graph
71
+ By default, `draw_graph` displays the graph inline. To show the graph in a separate window, write the following:
72
+
73
+ ```python
74
+ draw_graph(triage_agent).view()
75
+ ```
76
+
77
+ ### Saving the Graph
78
+ By default, `draw_graph` displays the graph inline. To save it as a file, specify a filename:
79
+
80
+ ```python
81
+ draw_graph(triage_agent, filename="agent_graph.png")
82
+ ```
83
+
84
+ This will generate `agent_graph.png` in the working directory.
85
+
86
+
@@ -5,7 +5,7 @@
5
5
  Make sure you've followed the base [quickstart instructions](../quickstart.md) for the Agents SDK, and set up a virtual environment. Then, install the optional voice dependencies from the SDK:
6
6
 
7
7
  ```bash
8
- pip install openai-agents[voice]
8
+ pip install 'openai-agents[voice]'
9
9
  ```
10
10
 
11
11
  ## Concepts
@@ -91,7 +91,7 @@ agent = Agent(
91
91
  We'll set up a simple voice pipeline, using [`SingleAgentVoiceWorkflow`][agents.voice.workflow.SingleAgentVoiceWorkflow] as the workflow.
92
92
 
93
93
  ```python
94
- from agents import SingleAgentVoiceWorkflow, VoicePipeline,
94
+ from agents.voice import SingleAgentVoiceWorkflow, VoicePipeline
95
95
  pipeline = VoicePipeline(workflow=SingleAgentVoiceWorkflow(agent))
96
96
  ```
97
97
 
@@ -100,10 +100,13 @@ pipeline = VoicePipeline(workflow=SingleAgentVoiceWorkflow(agent))
100
100
  ```python
101
101
  import numpy as np
102
102
  import sounddevice as sd
103
+ from agents.voice import AudioInput
103
104
 
104
105
  # For simplicity, we'll just create 3 seconds of silence
105
106
  # In reality, you'd get microphone data
106
- audio = np.zeros(24000 * 3, dtype=np.int16)
107
+ buffer = np.zeros(24000 * 3, dtype=np.int16)
108
+ audio_input = AudioInput(buffer=buffer)
109
+
107
110
  result = await pipeline.run(audio_input)
108
111
 
109
112
  # Create an audio player using `sounddevice`
@@ -128,11 +131,13 @@ import sounddevice as sd
128
131
 
129
132
  from agents import (
130
133
  Agent,
134
+ function_tool,
135
+ set_tracing_disabled,
136
+ )
137
+ from agents.voice import (
131
138
  AudioInput,
132
139
  SingleAgentVoiceWorkflow,
133
140
  VoicePipeline,
134
- function_tool,
135
- set_tracing_disabled,
136
141
  )
137
142
  from agents.extensions.handoff_prompt import prompt_with_handoff_instructions
138
143
 
@@ -79,7 +79,7 @@ multiply_agent = Agent(
79
79
 
80
80
  start_agent = Agent(
81
81
  name="Start Agent",
82
- instructions="Generate a random number. If it's even, stop. If it's odd, hand off to the multipler agent.",
82
+ instructions="Generate a random number. If it's even, stop. If it's odd, hand off to the multiplier agent.",
83
83
  tools=[random_number],
84
84
  output_type=FinalResult,
85
85
  handoffs=[multiply_agent],
@@ -0,0 +1,26 @@
1
+ # MCP Filesystem Example
2
+
3
+ This example uses the [filesystem MCP server](https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem), running locally via `npx`.
4
+
5
+ Run it via:
6
+
7
+ ```
8
+ uv run python python examples/mcp/filesystem_example/main.py
9
+ ```
10
+
11
+ ## Details
12
+
13
+ The example uses the `MCPServerStdio` class from `agents`, with the command:
14
+
15
+ ```bash
16
+ npx -y "@modelcontextprotocol/server-filesystem" <samples_directory>
17
+ ```
18
+
19
+ It's only given access to the `sample_files` directory adjacent to the example, which contains some sample data.
20
+
21
+ Under the hood:
22
+
23
+ 1. The server is spun up in a subprocess, and exposes a bunch of tools like `list_directory()`, `read_file()`, etc.
24
+ 2. We add the server instance to the Agent via `mcp_agents`.
25
+ 3. Each time the agent runs, we call out to the MCP server to fetch the list of tools via `server.list_tools()`.
26
+ 4. If the LLM chooses to use an MCP tool, we call the MCP server to run the tool via `server.run_tool()`.
@@ -0,0 +1,57 @@
1
+ import asyncio
2
+ import os
3
+ import shutil
4
+
5
+ from agents import Agent, Runner, gen_trace_id, trace
6
+ from agents.mcp import MCPServer, MCPServerStdio
7
+
8
+
9
+ async def run(mcp_server: MCPServer):
10
+ agent = Agent(
11
+ name="Assistant",
12
+ instructions="Use the tools to read the filesystem and answer questions based on those files.",
13
+ mcp_servers=[mcp_server],
14
+ )
15
+
16
+ # List the files it can read
17
+ message = "Read the files and list them."
18
+ print(f"Running: {message}")
19
+ result = await Runner.run(starting_agent=agent, input=message)
20
+ print(result.final_output)
21
+
22
+ # Ask about books
23
+ message = "What is my #1 favorite book?"
24
+ print(f"\n\nRunning: {message}")
25
+ result = await Runner.run(starting_agent=agent, input=message)
26
+ print(result.final_output)
27
+
28
+ # Ask a question that reads then reasons.
29
+ message = "Look at my favorite songs. Suggest one new song that I might like."
30
+ print(f"\n\nRunning: {message}")
31
+ result = await Runner.run(starting_agent=agent, input=message)
32
+ print(result.final_output)
33
+
34
+
35
+ async def main():
36
+ current_dir = os.path.dirname(os.path.abspath(__file__))
37
+ samples_dir = os.path.join(current_dir, "sample_files")
38
+
39
+ async with MCPServerStdio(
40
+ name="Filesystem Server, via npx",
41
+ params={
42
+ "command": "npx",
43
+ "args": ["-y", "@modelcontextprotocol/server-filesystem", samples_dir],
44
+ },
45
+ ) as server:
46
+ trace_id = gen_trace_id()
47
+ with trace(workflow_name="MCP Filesystem Example", trace_id=trace_id):
48
+ print(f"View trace: https://platform.openai.com/traces/{trace_id}\n")
49
+ await run(server)
50
+
51
+
52
+ if __name__ == "__main__":
53
+ # Let's make sure the user has npx installed
54
+ if not shutil.which("npx"):
55
+ raise RuntimeError("npx is not installed. Please install it with `npm install -g npx`.")
56
+
57
+ asyncio.run(main())
@@ -0,0 +1,20 @@
1
+ 1. To Kill a Mockingbird – Harper Lee
2
+ 2. Pride and Prejudice – Jane Austen
3
+ 3. 1984 – George Orwell
4
+ 4. The Hobbit – J.R.R. Tolkien
5
+ 5. Harry Potter and the Sorcerer’s Stone – J.K. Rowling
6
+ 6. The Great Gatsby – F. Scott Fitzgerald
7
+ 7. Charlotte’s Web – E.B. White
8
+ 8. Anne of Green Gables – Lucy Maud Montgomery
9
+ 9. The Alchemist – Paulo Coelho
10
+ 10. Little Women – Louisa May Alcott
11
+ 11. The Catcher in the Rye – J.D. Salinger
12
+ 12. Animal Farm – George Orwell
13
+ 13. The Chronicles of Narnia: The Lion, the Witch, and the Wardrobe – C.S. Lewis
14
+ 14. The Book Thief – Markus Zusak
15
+ 15. A Wrinkle in Time – Madeleine L’Engle
16
+ 16. The Secret Garden – Frances Hodgson Burnett
17
+ 17. Moby-Dick – Herman Melville
18
+ 18. Fahrenheit 451 – Ray Bradbury
19
+ 19. Jane Eyre – Charlotte Brontë
20
+ 20. The Little Prince – Antoine de Saint-Exupéry
@@ -0,0 +1,4 @@
1
+ - In the summer, I love visiting London.
2
+ - In the winter, Tokyo is great.
3
+ - In the spring, San Francisco.
4
+ - In the fall, New York is the best.
@@ -0,0 +1,10 @@
1
+ 1. "Here Comes the Sun" – The Beatles
2
+ 2. "Imagine" – John Lennon
3
+ 3. "Bohemian Rhapsody" – Queen
4
+ 4. "Shake It Off" – Taylor Swift
5
+ 5. "Billie Jean" – Michael Jackson
6
+ 6. "Uptown Funk" – Mark Ronson ft. Bruno Mars
7
+ 7. "Don’t Stop Believin’" – Journey
8
+ 8. "Dancing Queen" – ABBA
9
+ 9. "Happy" – Pharrell Williams
10
+ 10. "Wonderwall" – Oasis
@@ -0,0 +1,25 @@
1
+ # MCP Git Example
2
+
3
+ This example uses the [git MCP server](https://github.com/modelcontextprotocol/servers/tree/main/src/git), running locally via `uvx`.
4
+
5
+ Run it via:
6
+
7
+ ```
8
+ uv run python examples/mcp/git_example/main.py
9
+ ```
10
+
11
+ ## Details
12
+
13
+ The example uses the `MCPServerStdio` class from `agents`, with the command:
14
+
15
+ ```bash
16
+ uvx mcp-server-git
17
+ ```
18
+ Prior to running the agent, the user is prompted to provide a local directory path to their git repo. Using that, the Agent can invoke Git MCP tools like `git_log` to inspect the git commit log.
19
+
20
+ Under the hood:
21
+
22
+ 1. The server is spun up in a subprocess, and exposes a bunch of tools like `git_log()`
23
+ 2. We add the server instance to the Agent via `mcp_agents`.
24
+ 3. Each time the agent runs, we call out to the MCP server to fetch the list of tools via `server.list_tools()`. The result is cached.
25
+ 4. If the LLM chooses to use an MCP tool, we call the MCP server to run the tool via `server.run_tool()`.
@@ -0,0 +1,48 @@
1
+ import asyncio
2
+ import shutil
3
+
4
+ from agents import Agent, Runner, trace
5
+ from agents.mcp import MCPServer, MCPServerStdio
6
+
7
+
8
+ async def run(mcp_server: MCPServer, directory_path: str):
9
+ agent = Agent(
10
+ name="Assistant",
11
+ instructions=f"Answer questions about the git repository at {directory_path}, use that for repo_path",
12
+ mcp_servers=[mcp_server],
13
+ )
14
+
15
+ message = "Who's the most frequent contributor?"
16
+ print("\n" + "-" * 40)
17
+ print(f"Running: {message}")
18
+ result = await Runner.run(starting_agent=agent, input=message)
19
+ print(result.final_output)
20
+
21
+ message = "Summarize the last change in the repository."
22
+ print("\n" + "-" * 40)
23
+ print(f"Running: {message}")
24
+ result = await Runner.run(starting_agent=agent, input=message)
25
+ print(result.final_output)
26
+
27
+
28
+ async def main():
29
+ # Ask the user for the directory path
30
+ directory_path = input("Please enter the path to the git repository: ")
31
+
32
+ async with MCPServerStdio(
33
+ params={
34
+ "command": "uvx",
35
+ "args": [
36
+ "mcp-server-git"
37
+ ]
38
+ }
39
+ ) as server:
40
+ with trace(workflow_name="MCP Git Example"):
41
+ await run(server, directory_path)
42
+
43
+
44
+ if __name__ == "__main__":
45
+ if not shutil.which("uvx"):
46
+ raise RuntimeError("uvx is not installed. Please install it with `pip install uvx`.")
47
+
48
+ asyncio.run(main())
@@ -4,7 +4,7 @@ from agents.model_settings import ModelSettings
4
4
  INSTRUCTIONS = (
5
5
  "You are a research assistant. Given a search term, you search the web for that term and"
6
6
  "produce a concise summary of the results. The summary must 2-3 paragraphs and less than 300"
7
- "words. Capture the main points. Write succintly, no need to have complete sentences or good"
7
+ "words. Capture the main points. Write succinctly, no need to have complete sentences or good"
8
8
  "grammar. This will be consumed by someone synthesizing a report, so its vital you capture the"
9
9
  "essence and ignore any fluff. Do not include any additional commentary other than the summary"
10
10
  "itself."
@@ -1,6 +1,8 @@
1
1
  import asyncio
2
2
  import random
3
3
 
4
+ import numpy as np
5
+
4
6
  from agents import Agent, function_tool
5
7
  from agents.extensions.handoff_prompt import prompt_with_handoff_instructions
6
8
  from agents.voice import (
@@ -78,6 +80,9 @@ async def main():
78
80
  elif event.type == "voice_stream_event_lifecycle":
79
81
  print(f"Received lifecycle event: {event.event}")
80
82
 
83
+ # Add 1 second of silence to the end of the stream to avoid cutting off the last audio.
84
+ player.add_audio(np.zeros(24000 * 1, dtype=np.int16))
85
+
81
86
 
82
87
  if __name__ == "__main__":
83
88
  asyncio.run(main())
@@ -62,6 +62,7 @@ class AudioPlayer:
62
62
  return self
63
63
 
64
64
  def __exit__(self, exc_type, exc_value, traceback):
65
+ self.stream.stop() # wait for the stream to finish
65
66
  self.stream.close()
66
67
 
67
68
  def add_audio(self, audio_data: npt.NDArray[np.int16]):
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
+ from typing import TYPE_CHECKING
4
5
 
5
6
  import numpy as np
6
7
  import sounddevice as sd
@@ -13,7 +14,18 @@ from typing_extensions import override
13
14
 
14
15
  from agents.voice import StreamedAudioInput, VoicePipeline
15
16
 
16
- from .agents import MyWorkflow
17
+ # Import MyWorkflow class - handle both module and package use cases
18
+ if TYPE_CHECKING:
19
+ # For type checking, use the relative import
20
+ from .my_workflow import MyWorkflow
21
+ else:
22
+ # At runtime, try both import styles
23
+ try:
24
+ # Try relative import first (when used as a package)
25
+ from .my_workflow import MyWorkflow
26
+ except ImportError:
27
+ # Fall back to direct import (when run as a script)
28
+ from my_workflow import MyWorkflow
17
29
 
18
30
  CHUNK_LENGTH_S = 0.05 # 100ms
19
31
  SAMPLE_RATE = 24000