openai-agents 0.0.18__tar.gz → 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.github/workflows/issues.yml +1 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/AGENTS.md +3 -1
- openai_agents-0.1.0/CLAUDE.md +1 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/PKG-INFO +14 -6
- {openai_agents-0.0.18 → openai_agents-0.1.0}/README.md +11 -4
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/context.md +2 -1
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/guardrails.md +1 -1
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/guardrails.md +22 -22
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/mcp.md +8 -1
- openai_agents-0.1.0/docs/ja/models/index.md +155 -0
- openai_agents-0.1.0/docs/mcp.md +155 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/models/index.md +16 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/release.md +11 -5
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/scripts/translate_docs.py +41 -18
- openai_agents-0.1.0/examples/basic/hello_world_jupyter.ipynb +45 -0
- openai_agents-0.1.0/examples/reasoning_content/__init__.py +3 -0
- openai_agents-0.1.0/examples/reasoning_content/main.py +124 -0
- openai_agents-0.1.0/examples/reasoning_content/runner_example.py +88 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/pyproject.toml +3 -3
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/__init__.py +2 -1
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/_run_impl.py +30 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/agent.py +7 -3
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/extensions/models/litellm_model.py +7 -3
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/handoffs.py +14 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/mcp/__init__.py +13 -1
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/mcp/server.py +140 -15
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/mcp/util.py +89 -5
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/model_settings.py +52 -6
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/models/chatcmpl_converter.py +12 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/models/chatcmpl_stream_handler.py +127 -15
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/models/openai_chatcompletions.py +12 -10
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/models/openai_responses.py +14 -4
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/repl.py +1 -4
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/run.py +230 -100
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/tool.py +25 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/tracing/__init__.py +10 -5
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/tracing/create.py +16 -16
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/tracing/processor_interface.py +1 -1
- openai_agents-0.0.18/src/agents/tracing/setup.py → openai_agents-0.1.0/src/agents/tracing/provider.py +88 -8
- openai_agents-0.1.0/src/agents/tracing/setup.py +21 -0
- openai_agents-0.1.0/src/agents/tracing/util.py +21 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/conftest.py +8 -2
- openai_agents-0.1.0/tests/mcp/helpers.py +99 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/mcp/test_caching.py +14 -8
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/mcp/test_mcp_util.py +14 -6
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/mcp/test_server_errors.py +6 -1
- openai_agents-0.1.0/tests/mcp/test_tool_filtering.py +243 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/model_settings/test_serialization.py +32 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/models/test_map.py +5 -4
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_agent_config.py +6 -5
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_computer_action.py +44 -1
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_handoff_tool.py +94 -8
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_output_tool.py +8 -8
- openai_agents-0.1.0/tests/test_reasoning_content.py +289 -0
- openai_agents-0.1.0/tests/test_run.py +26 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_run_config.py +1 -1
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_run_step_execution.py +3 -3
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_run_step_processing.py +6 -6
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/voice/test_workflow.py +3 -2
- {openai_agents-0.0.18 → openai_agents-0.1.0}/uv.lock +1 -1
- openai_agents-0.0.18/docs/ja/models/index.md +0 -116
- openai_agents-0.0.18/docs/ja/models.md +0 -106
- openai_agents-0.0.18/docs/mcp.md +0 -61
- openai_agents-0.0.18/examples/basic/hello_world_jupyter.py +0 -11
- openai_agents-0.0.18/src/agents/tracing/util.py +0 -22
- openai_agents-0.0.18/tests/mcp/helpers.py +0 -58
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.github/ISSUE_TEMPLATE/model_provider.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.github/ISSUE_TEMPLATE/question.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.github/workflows/docs.yml +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.github/workflows/publish.yml +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.github/workflows/tests.yml +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.gitignore +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.prettierrc +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/.vscode/settings.json +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/LICENSE +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/Makefile +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/agents.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/assets/images/favicon-platform.svg +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/assets/images/graph.png +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/assets/images/mcp-tracing.jpg +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/assets/images/orchestration.png +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/assets/logo.svg +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/config.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/examples.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/handoffs.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/index.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/agents.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/config.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/context.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/examples.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/handoffs.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/index.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/models/litellm.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/multi_agent.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/quickstart.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/repl.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/results.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/running_agents.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/streaming.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/tools.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/tracing.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/visualization.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/voice/pipeline.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/voice/quickstart.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ja/voice/tracing.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/models/litellm.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/multi_agent.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/quickstart.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/agent.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/agent_output.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/exceptions.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/extensions/handoff_filters.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/extensions/handoff_prompt.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/extensions/litellm.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/function_schema.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/guardrail.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/handoffs.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/index.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/items.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/lifecycle.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/mcp/server.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/mcp/util.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/model_settings.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/models/interface.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/models/openai_chatcompletions.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/models/openai_responses.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/repl.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/result.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/run.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/run_context.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/stream_events.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/tool.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/tracing/create.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/tracing/index.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/tracing/processor_interface.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/tracing/processors.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/tracing/scope.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/tracing/setup.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/tracing/span_data.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/tracing/spans.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/tracing/traces.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/tracing/util.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/usage.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/events.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/exceptions.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/input.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/model.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/models/openai_provider.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/models/openai_stt.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/models/openai_tts.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/pipeline.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/pipeline_config.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/result.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/utils.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/ref/voice/workflow.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/repl.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/results.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/running_agents.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/streaming.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/stylesheets/extra.css +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/tools.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/tracing.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/visualization.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/voice/pipeline.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/voice/quickstart.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/docs/voice/tracing.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/agent_patterns/README.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/agent_patterns/agents_as_tools.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/agent_patterns/deterministic.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/agent_patterns/forcing_tool_use.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/agent_patterns/input_guardrails.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/agent_patterns/llm_as_a_judge.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/agent_patterns/output_guardrails.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/agent_patterns/parallelization.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/agent_patterns/routing.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/agent_patterns/streaming_guardrails.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/agent_lifecycle_example.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/dynamic_system_prompt.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/hello_world.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/lifecycle_example.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/local_image.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/media/image_bison.jpg +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/non_strict_output_type.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/previous_response_id.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/prompt_template.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/remote_image.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/stream_items.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/stream_text.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/basic/tools.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/customer_service/main.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/README.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/agents/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/agents/financials_agent.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/agents/planner_agent.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/agents/risk_agent.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/agents/search_agent.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/agents/verifier_agent.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/agents/writer_agent.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/main.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/manager.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/financial_research_agent/printer.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/handoffs/message_filter.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/handoffs/message_filter_streaming.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/hosted_mcp/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/hosted_mcp/approvals.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/hosted_mcp/simple.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/filesystem_example/README.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/filesystem_example/main.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/filesystem_example/sample_files/favorite_books.txt +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/filesystem_example/sample_files/favorite_cities.txt +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/filesystem_example/sample_files/favorite_songs.txt +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/git_example/README.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/git_example/main.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/sse_example/README.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/sse_example/main.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/sse_example/server.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/streamablehttp_example/README.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/streamablehttp_example/main.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/mcp/streamablehttp_example/server.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/model_providers/README.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/model_providers/custom_example_agent.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/model_providers/custom_example_global.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/model_providers/custom_example_provider.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/model_providers/litellm_auto.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/model_providers/litellm_provider.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/README.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/agents/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/agents/planner_agent.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/agents/search_agent.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/agents/writer_agent.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/main.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/manager.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/printer.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/sample_outputs/product_recs.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/sample_outputs/product_recs.txt +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/sample_outputs/vacation.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/research_bot/sample_outputs/vacation.txt +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/tools/code_interpreter.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/tools/computer_use.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/tools/file_search.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/tools/image_generator.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/tools/web_search.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/voice/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/voice/static/README.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/voice/static/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/voice/static/main.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/voice/static/util.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/voice/streamed/README.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/voice/streamed/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/voice/streamed/main.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/examples/voice/streamed/my_workflow.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/mkdocs.yml +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/_config.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/_debug.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/agent_output.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/computer.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/exceptions.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/extensions/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/extensions/handoff_filters.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/extensions/handoff_prompt.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/extensions/models/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/extensions/models/litellm_provider.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/extensions/visualization.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/function_schema.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/guardrail.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/items.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/lifecycle.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/logger.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/models/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/models/_openai_shared.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/models/chatcmpl_helpers.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/models/fake_id.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/models/interface.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/models/multi_provider.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/models/openai_provider.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/prompts.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/py.typed +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/result.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/run_context.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/stream_events.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/strict_schema.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/tool_context.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/tracing/logger.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/tracing/processors.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/tracing/scope.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/tracing/span_data.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/tracing/spans.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/tracing/traces.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/usage.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/util/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/util/_coro.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/util/_error_tracing.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/util/_json.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/util/_pretty_print.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/util/_transforms.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/util/_types.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/version.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/events.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/exceptions.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/imports.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/input.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/model.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/models/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/models/openai_model_provider.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/models/openai_stt.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/models/openai_tts.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/pipeline.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/pipeline_config.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/result.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/utils.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/src/agents/voice/workflow.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/README.md +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/fake_model.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/fastapi/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/fastapi/streaming_app.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/fastapi/test_streaming_context.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/mcp/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/mcp/conftest.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/mcp/test_connect_disconnect.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/mcp/test_mcp_tracing.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/mcp/test_runner_calls_mcp.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/models/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/models/conftest.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/models/test_kwargs_functionality.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/models/test_litellm_chatcompletions_stream.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/models/test_litellm_extra_body.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_agent_hooks.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_agent_prompt.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_agent_runner.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_agent_runner_streamed.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_agent_tracing.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_cancel_streaming.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_config.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_doc_parsing.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_extension_filters.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_extra_headers.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_function_schema.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_function_tool.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_function_tool_decorator.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_global_hooks.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_guardrails.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_items_helpers.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_max_turns.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_openai_chatcompletions.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_openai_chatcompletions_converter.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_openai_chatcompletions_stream.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_openai_responses_converter.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_pretty_print.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_repl.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_responses.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_responses_tracing.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_result_cast.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_run_error_details.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_strict_schema.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_tool_choice_reset.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_tool_converter.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_tool_use_behavior.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_trace_processor.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_tracing.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_tracing_errors.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_tracing_errors_streamed.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_usage.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/test_visualization.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/testing_processor.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/tracing/test_processor_api_key.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/voice/__init__.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/voice/conftest.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/voice/fake_models.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/voice/helpers.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/voice/test_input.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/voice/test_openai_stt.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/voice/test_openai_tts.py +0 -0
- {openai_agents-0.0.18 → openai_agents-0.1.0}/tests/voice/test_pipeline.py +0 -0
|
@@ -21,6 +21,7 @@ jobs:
|
|
|
21
21
|
days-before-pr-stale: 10
|
|
22
22
|
days-before-pr-close: 7
|
|
23
23
|
stale-pr-label: "stale"
|
|
24
|
+
exempt-issue-labels: "skip-stale"
|
|
24
25
|
stale-pr-message: "This PR is stale because it has been open for 10 days with no activity."
|
|
25
26
|
close-pr-message: "This PR was closed because it has been inactive for 7 days since being marked as stale."
|
|
26
27
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
@@ -35,6 +35,8 @@ Welcome to the OpenAI Agents SDK repository. This file contains the main points
|
|
|
35
35
|
|
|
36
36
|
Coverage can be generated with `make coverage`.
|
|
37
37
|
|
|
38
|
+
All python commands should be run via `uv run python ...`
|
|
39
|
+
|
|
38
40
|
## Snapshot tests
|
|
39
41
|
|
|
40
42
|
Some tests rely on inline snapshots. See `tests/README.md` for details on updating them:
|
|
@@ -64,6 +66,6 @@ Commit messages should be concise and written in the imperative mood. Small, foc
|
|
|
64
66
|
## What reviewers look for
|
|
65
67
|
|
|
66
68
|
- Tests covering new behaviour.
|
|
67
|
-
- Consistent style: code formatted with `ruff format`, imports sorted, and type hints passing `mypy
|
|
69
|
+
- Consistent style: code formatted with `uv run ruff format`, imports sorted, and type hints passing `uv run mypy .`.
|
|
68
70
|
- Clear documentation for any public API changes.
|
|
69
71
|
- Clean history and a helpful PR description.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
Read the AGENTS.md file for instructions.
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.0
|
|
3
|
+
Version: 0.1.0
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
|
-
Project-URL: Homepage, https://github.
|
|
5
|
+
Project-URL: Homepage, https://openai.github.io/openai-agents-python/
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
7
7
|
Author-email: OpenAI <support@openai.com>
|
|
8
8
|
License-Expression: MIT
|
|
@@ -15,6 +15,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
|
15
15
|
Classifier: Programming Language :: Python :: 3.10
|
|
16
16
|
Classifier: Programming Language :: Python :: 3.11
|
|
17
17
|
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
18
19
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
19
20
|
Classifier: Typing :: Typed
|
|
20
21
|
Requires-Python: >=3.9
|
|
@@ -56,14 +57,21 @@ Explore the [examples](examples) directory to see the SDK in action, and read ou
|
|
|
56
57
|
|
|
57
58
|
1. Set up your Python environment
|
|
58
59
|
|
|
59
|
-
|
|
60
|
+
- Option A: Using venv (traditional method)
|
|
61
|
+
```bash
|
|
60
62
|
python -m venv env
|
|
61
|
-
source env/bin/activate
|
|
63
|
+
source env/bin/activate # On Windows: env\Scripts\activate
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
- Option B: Using uv (recommended)
|
|
67
|
+
```bash
|
|
68
|
+
uv venv
|
|
69
|
+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
62
70
|
```
|
|
63
71
|
|
|
64
72
|
2. Install Agents SDK
|
|
65
73
|
|
|
66
|
-
```
|
|
74
|
+
```bash
|
|
67
75
|
pip install openai-agents
|
|
68
76
|
```
|
|
69
77
|
|
|
@@ -86,7 +94,7 @@ print(result.final_output)
|
|
|
86
94
|
|
|
87
95
|
(_If running this, ensure you set the `OPENAI_API_KEY` environment variable_)
|
|
88
96
|
|
|
89
|
-
(_For Jupyter notebook users, see [hello_world_jupyter.
|
|
97
|
+
(_For Jupyter notebook users, see [hello_world_jupyter.ipynb](examples/basic/hello_world_jupyter.ipynb)_)
|
|
90
98
|
|
|
91
99
|
## Handoffs example
|
|
92
100
|
|
|
@@ -20,14 +20,21 @@ Explore the [examples](examples) directory to see the SDK in action, and read ou
|
|
|
20
20
|
|
|
21
21
|
1. Set up your Python environment
|
|
22
22
|
|
|
23
|
-
|
|
23
|
+
- Option A: Using venv (traditional method)
|
|
24
|
+
```bash
|
|
24
25
|
python -m venv env
|
|
25
|
-
source env/bin/activate
|
|
26
|
+
source env/bin/activate # On Windows: env\Scripts\activate
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
- Option B: Using uv (recommended)
|
|
30
|
+
```bash
|
|
31
|
+
uv venv
|
|
32
|
+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
26
33
|
```
|
|
27
34
|
|
|
28
35
|
2. Install Agents SDK
|
|
29
36
|
|
|
30
|
-
```
|
|
37
|
+
```bash
|
|
31
38
|
pip install openai-agents
|
|
32
39
|
```
|
|
33
40
|
|
|
@@ -50,7 +57,7 @@ print(result.final_output)
|
|
|
50
57
|
|
|
51
58
|
(_If running this, ensure you set the `OPENAI_API_KEY` environment variable_)
|
|
52
59
|
|
|
53
|
-
(_For Jupyter notebook users, see [hello_world_jupyter.
|
|
60
|
+
(_For Jupyter notebook users, see [hello_world_jupyter.ipynb](examples/basic/hello_world_jupyter.ipynb)_)
|
|
54
61
|
|
|
55
62
|
## Handoffs example
|
|
56
63
|
|
|
@@ -38,7 +38,8 @@ class UserInfo: # (1)!
|
|
|
38
38
|
|
|
39
39
|
@function_tool
|
|
40
40
|
async def fetch_user_age(wrapper: RunContextWrapper[UserInfo]) -> str: # (2)!
|
|
41
|
-
|
|
41
|
+
"""Fetch the age of the user. Call this function to get user's age information."""
|
|
42
|
+
return f"The user {wrapper.context.name} is 47 years old"
|
|
42
43
|
|
|
43
44
|
async def main():
|
|
44
45
|
user_info = UserInfo(name="John", uid=123)
|
|
@@ -23,7 +23,7 @@ Input guardrails run in 3 steps:
|
|
|
23
23
|
|
|
24
24
|
Output guardrails run in 3 steps:
|
|
25
25
|
|
|
26
|
-
1. First, the guardrail receives the
|
|
26
|
+
1. First, the guardrail receives the output produced by the agent.
|
|
27
27
|
2. Next, the guardrail function runs to produce a [`GuardrailFunctionOutput`][agents.guardrail.GuardrailFunctionOutput], which is then wrapped in an [`OutputGuardrailResult`][agents.guardrail.OutputGuardrailResult]
|
|
28
28
|
3. Finally, we check if [`.tripwire_triggered`][agents.guardrail.GuardrailFunctionOutput.tripwire_triggered] is true. If true, an [`OutputGuardrailTripwireTriggered`][agents.exceptions.OutputGuardrailTripwireTriggered] exception is raised, so you can appropriately respond to the user or handle the exception.
|
|
29
29
|
|
|
@@ -4,44 +4,44 @@ search:
|
|
|
4
4
|
---
|
|
5
5
|
# ガードレール
|
|
6
6
|
|
|
7
|
-
ガードレールは エージェント と _
|
|
7
|
+
ガードレールは エージェント と _並行して_ 実行され、ユーザー入力のチェックとバリデーションを行えます。例えば、とても賢い(つまり遅く/高価な)モデルを使用してカスタマーリクエストを処理するエージェントがあるとします。悪意のある ユーザー がモデルに数学の宿題を手伝わせようとするのは避けたいでしょう。そこで、速く/安価なモデルで動くガードレールを実行できます。ガードレールが悪意のある利用を検知すると、直ちにエラーを送出して高価なモデルの実行を停止し、時間とコストを節約できます。
|
|
8
8
|
|
|
9
|
-
ガードレールには 2
|
|
9
|
+
ガードレールには 2 種類あります:
|
|
10
10
|
|
|
11
|
-
1.
|
|
12
|
-
2.
|
|
11
|
+
1. 入力ガードレール は初期 ユーザー 入力に対して実行されます
|
|
12
|
+
2. 出力ガードレール は最終的なエージェント出力に対して実行されます
|
|
13
13
|
|
|
14
|
-
##
|
|
14
|
+
## 入力ガードレール
|
|
15
15
|
|
|
16
|
-
|
|
16
|
+
入力ガードレールは 3 ステップで実行されます:
|
|
17
17
|
|
|
18
18
|
1. まず、ガードレールはエージェントに渡されたものと同じ入力を受け取ります。
|
|
19
|
-
2. 次に、ガードレール関数が実行され [`GuardrailFunctionOutput`][agents.guardrail.GuardrailFunctionOutput] を生成し、それが [`InputGuardrailResult`][agents.guardrail.InputGuardrailResult]
|
|
20
|
-
3. 最後に [
|
|
19
|
+
2. 次に、ガードレール関数が実行され [`GuardrailFunctionOutput`][agents.guardrail.GuardrailFunctionOutput] を生成し、それが [`InputGuardrailResult`][agents.guardrail.InputGuardrailResult] にラップされます。
|
|
20
|
+
3. 最後に [.tripwire_triggered][agents.guardrail.GuardrailFunctionOutput.tripwire_triggered] が true かどうかを確認します。true の場合、[`InputGuardrailTripwireTriggered`][agents.exceptions.InputGuardrailTripwireTriggered] 例外が送出されるので、適切に ユーザー に応答したり例外を処理できます。
|
|
21
21
|
|
|
22
22
|
!!! Note
|
|
23
23
|
|
|
24
|
-
|
|
24
|
+
入力ガードレールは ユーザー 入力に対して実行されることを意図しているため、ガードレールは *最初* のエージェントでのみ実行されます。「なぜ `guardrails` プロパティがエージェントにあり、`Runner.run` に渡さないのか」と疑問に思うかもしれません。これは、ガードレールが実際の エージェント と密接に関連していることが多いからです。異なるエージェントには異なるガードレールを実行するため、コードを同じ場所に置くことで可読性が向上します。
|
|
25
25
|
|
|
26
|
-
##
|
|
26
|
+
## 出力ガードレール
|
|
27
27
|
|
|
28
|
-
|
|
28
|
+
出力ガードレールは 3 ステップで実行されます:
|
|
29
29
|
|
|
30
|
-
1.
|
|
31
|
-
2. 次に、ガードレール関数が実行され [`GuardrailFunctionOutput`][agents.guardrail.GuardrailFunctionOutput] を生成し、それが [`OutputGuardrailResult`][agents.guardrail.OutputGuardrailResult]
|
|
32
|
-
3. 最後に [
|
|
30
|
+
1. まず、ガードレールはエージェントが生成した出力を受け取ります。
|
|
31
|
+
2. 次に、ガードレール関数が実行され [`GuardrailFunctionOutput`][agents.guardrail.GuardrailFunctionOutput] を生成し、それが [`OutputGuardrailResult`][agents.guardrail.OutputGuardrailResult] にラップされます。
|
|
32
|
+
3. 最後に [.tripwire_triggered][agents.guardrail.GuardrailFunctionOutput.tripwire_triggered] が true かどうかを確認します。true の場合、[`OutputGuardrailTripwireTriggered`][agents.exceptions.OutputGuardrailTripwireTriggered] 例外が送出されるので、適切に ユーザー に応答したり例外を処理できます。
|
|
33
33
|
|
|
34
34
|
!!! Note
|
|
35
35
|
|
|
36
|
-
|
|
36
|
+
出力ガードレールは最終的なエージェント出力に対して実行されることを意図しているため、ガードレールは *最後* のエージェントでのみ実行されます。入力ガードレールの場合と同様、ガードレールが実際の エージェント と密接に関連していることが多いため、コードを同じ場所に置くことで可読性が向上します。
|
|
37
37
|
|
|
38
|
-
##
|
|
38
|
+
## トリップワイヤー
|
|
39
39
|
|
|
40
|
-
|
|
40
|
+
入力または出力がガードレールを通過できなかった場合、ガードレールはトリップワイヤーでそれを示すことができます。トリップワイヤーがトリガーされたガードレールを検知した時点で、直ちに `{Input,Output}GuardrailTripwireTriggered` 例外を送出し、エージェントの実行を停止します。
|
|
41
41
|
|
|
42
42
|
## ガードレールの実装
|
|
43
43
|
|
|
44
|
-
入力を受け取り、[`GuardrailFunctionOutput`][agents.guardrail.GuardrailFunctionOutput]
|
|
44
|
+
入力を受け取り、[`GuardrailFunctionOutput`][agents.guardrail.GuardrailFunctionOutput] を返す関数を提供する必要があります。この例では、内部で エージェント を実行してこれを行います。
|
|
45
45
|
|
|
46
46
|
```python
|
|
47
47
|
from pydantic import BaseModel
|
|
@@ -94,12 +94,12 @@ async def main():
|
|
|
94
94
|
print("Math homework guardrail tripped")
|
|
95
95
|
```
|
|
96
96
|
|
|
97
|
-
1.
|
|
98
|
-
2.
|
|
97
|
+
1. このエージェントをガードレール関数内で使用します。
|
|
98
|
+
2. これはエージェントの入力/コンテキストを受け取り、結果を返すガードレール関数です。
|
|
99
99
|
3. ガードレール結果に追加情報を含めることができます。
|
|
100
100
|
4. これはワークフローを定義する実際のエージェントです。
|
|
101
101
|
|
|
102
|
-
|
|
102
|
+
出力ガードレールも同様です。
|
|
103
103
|
|
|
104
104
|
```python
|
|
105
105
|
from pydantic import BaseModel
|
|
@@ -155,4 +155,4 @@ async def main():
|
|
|
155
155
|
1. これは実際のエージェントの出力型です。
|
|
156
156
|
2. これはガードレールの出力型です。
|
|
157
157
|
3. これはエージェントの出力を受け取り、結果を返すガードレール関数です。
|
|
158
|
-
4. これはワークフローを定義する実際のエージェントです。
|
|
158
|
+
4. これはワークフローを定義する実際のエージェントです。
|
|
@@ -23,13 +23,20 @@ Agents SDK は MCP をサポートしており、これにより幅広い MCP
|
|
|
23
23
|
たとえば、[公式 MCP filesystem サーバー](https://www.npmjs.com/package/@modelcontextprotocol/server-filesystem)を利用する場合は次のようになります。
|
|
24
24
|
|
|
25
25
|
```python
|
|
26
|
+
from agents.run_context import RunContextWrapper
|
|
27
|
+
|
|
26
28
|
async with MCPServerStdio(
|
|
27
29
|
params={
|
|
28
30
|
"command": "npx",
|
|
29
31
|
"args": ["-y", "@modelcontextprotocol/server-filesystem", samples_dir],
|
|
30
32
|
}
|
|
31
33
|
) as server:
|
|
32
|
-
|
|
34
|
+
# 注意:実際には通常は MCP サーバーをエージェントに追加し、
|
|
35
|
+
# フレームワークがツール一覧の取得を自動的に処理するようにします。
|
|
36
|
+
# list_tools() への直接呼び出しには run_context と agent パラメータが必要です。
|
|
37
|
+
run_context = RunContextWrapper(context=None)
|
|
38
|
+
agent = Agent(name="test", instructions="test")
|
|
39
|
+
tools = await server.list_tools(run_context, agent)
|
|
33
40
|
```
|
|
34
41
|
|
|
35
42
|
## MCP サーバーの利用
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
---
|
|
2
|
+
search:
|
|
3
|
+
exclude: true
|
|
4
|
+
---
|
|
5
|
+
# モデル
|
|
6
|
+
|
|
7
|
+
Agents SDK は OpenAI モデルを 2 つの形態で即利用できます。
|
|
8
|
+
|
|
9
|
+
- **推奨**: [`OpenAIResponsesModel`][agents.models.openai_responses.OpenAIResponsesModel] は、新しい [Responses API](https://platform.openai.com/docs/api-reference/responses) を使用して OpenAI API を呼び出します。
|
|
10
|
+
- [`OpenAIChatCompletionsModel`][agents.models.openai_chatcompletions.OpenAIChatCompletionsModel] は、[Chat Completions API](https://platform.openai.com/docs/api-reference/chat) を使用して OpenAI API を呼び出します。
|
|
11
|
+
|
|
12
|
+
## 非 OpenAI モデル
|
|
13
|
+
|
|
14
|
+
ほとんどの非 OpenAI モデルは [LiteLLM インテグレーション](./litellm.md) 経由で利用できます。まず、litellm 依存グループをインストールします:
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
pip install "openai-agents[litellm]"
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
次に、`litellm/` 接頭辞を付けて任意の [サポート対象モデル](https://docs.litellm.ai/docs/providers) を使用します:
|
|
21
|
+
|
|
22
|
+
```python
|
|
23
|
+
claude_agent = Agent(model="litellm/anthropic/claude-3-5-sonnet-20240620", ...)
|
|
24
|
+
gemini_agent = Agent(model="litellm/gemini/gemini-2.5-flash-preview-04-17", ...)
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
### 非 OpenAI モデルを利用するその他の方法
|
|
28
|
+
|
|
29
|
+
他の LLM プロバイダーを統合する方法は、あと 3 つあります([こちら](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/) に例があります)。
|
|
30
|
+
|
|
31
|
+
1. [`set_default_openai_client`][agents.set_default_openai_client]
|
|
32
|
+
`AsyncOpenAI` インスタンスを LLM クライアントとしてグローバルに使用したい場合に便利です。LLM プロバイダーが OpenAI 互換の API エンドポイントを持ち、`base_url` と `api_key` を設定できる場合に使用します。設定例は [examples/model_providers/custom_example_global.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/custom_example_global.py) にあります。
|
|
33
|
+
2. [`ModelProvider`][agents.models.interface.ModelProvider]
|
|
34
|
+
`Runner.run` レベルでカスタムモデルプロバイダーを指定できます。これにより「この run のすべてのエージェントでカスタムプロバイダーを使う」と宣言できます。設定例は [examples/model_providers/custom_example_provider.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/custom_example_provider.py) にあります。
|
|
35
|
+
3. [`Agent.model`][agents.agent.Agent.model]
|
|
36
|
+
特定のエージェントインスタンスにモデルを指定できます。エージェントごとに異なるプロバイダーを組み合わせることが可能です。設定例は [examples/model_providers/custom_example_agent.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/custom_example_agent.py) にあります。ほとんどのモデルを簡単に利用する方法として [LiteLLM インテグレーション](./litellm.md) を利用できます。
|
|
37
|
+
|
|
38
|
+
`platform.openai.com` の API キーを持っていない場合は、`set_tracing_disabled()` でトレーシングを無効化するか、[別のトレーシングプロセッサー](../tracing.md) を設定することをお勧めします。
|
|
39
|
+
|
|
40
|
+
!!! note
|
|
41
|
+
これらの例では、Responses API をまだサポートしていない LLM プロバイダーが多いため、Chat Completions API/モデルを使用しています。LLM プロバイダーが Responses API をサポートしている場合は、Responses を使用することを推奨します。
|
|
42
|
+
|
|
43
|
+
## モデルの組み合わせ
|
|
44
|
+
|
|
45
|
+
1 つのワークフロー内でエージェントごとに異なるモデルを使用したい場合があります。たとえば、振り分けには小さく高速なモデルを、複雑なタスクには大きく高性能なモデルを使用するといったケースです。[`Agent`][agents.Agent] を設定する際、次のいずれかの方法でモデルを選択できます。
|
|
46
|
+
|
|
47
|
+
1. モデル名を直接指定する
|
|
48
|
+
2. 任意のモデル名と、その名前を Model インスタンスへマッピングできる [`ModelProvider`][agents.models.interface.ModelProvider] を指定する
|
|
49
|
+
3. [`Model`][agents.models.interface.Model] 実装を直接渡す
|
|
50
|
+
|
|
51
|
+
!!!note
|
|
52
|
+
SDK は [`OpenAIResponsesModel`][agents.models.openai_responses.OpenAIResponsesModel] と [`OpenAIChatCompletionsModel`][agents.models.openai_chatcompletions.OpenAIChatCompletionsModel] の両形態をサポートしていますが、各ワークフローで 1 つのモデル形態に統一することを推奨します。2 つの形態はサポートする機能とツールが異なるためです。混在させる場合は、使用する機能が双方で利用可能かを必ず確認してください。
|
|
53
|
+
|
|
54
|
+
```python
|
|
55
|
+
from agents import Agent, Runner, AsyncOpenAI, OpenAIChatCompletionsModel
|
|
56
|
+
import asyncio
|
|
57
|
+
|
|
58
|
+
spanish_agent = Agent(
|
|
59
|
+
name="Spanish agent",
|
|
60
|
+
instructions="You only speak Spanish.",
|
|
61
|
+
model="o3-mini", # (1)!
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
english_agent = Agent(
|
|
65
|
+
name="English agent",
|
|
66
|
+
instructions="You only speak English",
|
|
67
|
+
model=OpenAIChatCompletionsModel( # (2)!
|
|
68
|
+
model="gpt-4o",
|
|
69
|
+
openai_client=AsyncOpenAI()
|
|
70
|
+
),
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
triage_agent = Agent(
|
|
74
|
+
name="Triage agent",
|
|
75
|
+
instructions="Handoff to the appropriate agent based on the language of the request.",
|
|
76
|
+
handoffs=[spanish_agent, english_agent],
|
|
77
|
+
model="gpt-3.5-turbo",
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
async def main():
|
|
81
|
+
result = await Runner.run(triage_agent, input="Hola, ¿cómo estás?")
|
|
82
|
+
print(result.final_output)
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
1. OpenAI のモデル名を直接設定
|
|
86
|
+
2. [`Model`][agents.models.interface.Model] 実装を提供
|
|
87
|
+
|
|
88
|
+
エージェントで使用するモデルをさらに構成したい場合は、`temperature` などのオプションパラメーターを指定できる [`ModelSettings`][agents.models.interface.ModelSettings] を渡せます。
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
from agents import Agent, ModelSettings
|
|
92
|
+
|
|
93
|
+
english_agent = Agent(
|
|
94
|
+
name="English agent",
|
|
95
|
+
instructions="You only speak English",
|
|
96
|
+
model="gpt-4o",
|
|
97
|
+
model_settings=ModelSettings(temperature=0.1),
|
|
98
|
+
)
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
OpenAI の Responses API を使用する場合、`user` や `service_tier` など[その他のオプションパラメーター](https://platform.openai.com/docs/api-reference/responses/create) があります。トップレベルで指定できない場合は、`extra_args` で渡してください。
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
from agents import Agent, ModelSettings
|
|
105
|
+
|
|
106
|
+
english_agent = Agent(
|
|
107
|
+
name="English agent",
|
|
108
|
+
instructions="You only speak English",
|
|
109
|
+
model="gpt-4o",
|
|
110
|
+
model_settings=ModelSettings(
|
|
111
|
+
temperature=0.1,
|
|
112
|
+
extra_args={"service_tier": "flex", "user": "user_12345"},
|
|
113
|
+
),
|
|
114
|
+
)
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
## 他の LLM プロバイダー使用時の一般的な問題
|
|
118
|
+
|
|
119
|
+
### Tracing クライアントの 401 エラー
|
|
120
|
+
|
|
121
|
+
Tracing 関連のエラーが発生する場合、トレースは OpenAI サーバーへアップロードされるため、OpenAI API キーが必要です。対応方法は次の 3 つです。
|
|
122
|
+
|
|
123
|
+
1. トレーシングを完全に無効化する: [`set_tracing_disabled(True)`][agents.set_tracing_disabled]
|
|
124
|
+
2. トレース用に OpenAI キーを設定する: [`set_tracing_export_api_key(...)`][agents.set_tracing_export_api_key]
|
|
125
|
+
この API キーはトレースのアップロードのみに使用され、[platform.openai.com](https://platform.openai.com/) で取得したものが必要です。
|
|
126
|
+
3. OpenAI 以外のトレースプロセッサーを使用する。詳細は [tracing のドキュメント](../tracing.md#custom-tracing-processors) を参照してください。
|
|
127
|
+
|
|
128
|
+
### Responses API のサポート
|
|
129
|
+
|
|
130
|
+
SDK はデフォルトで Responses API を使用しますが、ほとんどの LLM プロバイダーはまだ非対応です。その結果、404 などのエラーが発生することがあります。対処方法は次の 2 つです。
|
|
131
|
+
|
|
132
|
+
1. [`set_default_openai_api("chat_completions")`][agents.set_default_openai_api] を呼び出す
|
|
133
|
+
`OPENAI_API_KEY` と `OPENAI_BASE_URL` を環境変数で設定している場合に有効です。
|
|
134
|
+
2. [`OpenAIChatCompletionsModel`][agents.models.openai_chatcompletions.OpenAIChatCompletionsModel] を使用する
|
|
135
|
+
例は [こちら](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/) にあります。
|
|
136
|
+
|
|
137
|
+
### structured outputs のサポート
|
|
138
|
+
|
|
139
|
+
一部のモデルプロバイダーは [structured outputs](https://platform.openai.com/docs/guides/structured-outputs) をサポートしていません。その場合、次のようなエラーが発生することがあります。
|
|
140
|
+
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
BadRequestError: Error code: 400 - {'error': {'message': "'response_format.type' : value is not one of the allowed values ['text','json_object']", 'type': 'invalid_request_error'}}
|
|
144
|
+
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
これは一部プロバイダーの制限で、JSON 出力自体はサポートしていても `json_schema` を指定できないことが原因です。修正に向けて取り組んでいますが、JSON スキーマ出力をサポートしているプロバイダーを使用することをお勧めします。そうでないと、不正な JSON が返されてアプリが頻繁に壊れる可能性があります。
|
|
148
|
+
|
|
149
|
+
## プロバイダーを跨いだモデルの組み合わせ
|
|
150
|
+
|
|
151
|
+
モデルプロバイダーごとの機能差に注意しないと、エラーが発生します。たとえば OpenAI は structured outputs、マルチモーダル入力、ホスト型の file search や web search をサポートしていますが、多くの他プロバイダーは非対応です。以下の制限に留意してください。
|
|
152
|
+
|
|
153
|
+
- 対応していないプロバイダーには未サポートの `tools` を送らない
|
|
154
|
+
- テキストのみのモデルを呼び出す前にマルチモーダル入力を除外する
|
|
155
|
+
- structured JSON 出力をサポートしていないプロバイダーでは、不正な JSON が返ることがある点に注意する
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
# Model context protocol (MCP)
|
|
2
|
+
|
|
3
|
+
The [Model context protocol](https://modelcontextprotocol.io/introduction) (aka MCP) is a way to provide tools and context to the LLM. From the MCP docs:
|
|
4
|
+
|
|
5
|
+
> MCP is an open protocol that standardizes how applications provide context to LLMs. Think of MCP like a USB-C port for AI applications. Just as USB-C provides a standardized way to connect your devices to various peripherals and accessories, MCP provides a standardized way to connect AI models to different data sources and tools.
|
|
6
|
+
|
|
7
|
+
The Agents SDK has support for MCP. This enables you to use a wide range of MCP servers to provide tools to your Agents.
|
|
8
|
+
|
|
9
|
+
## MCP servers
|
|
10
|
+
|
|
11
|
+
Currently, the MCP spec defines three kinds of servers, based on the transport mechanism they use:
|
|
12
|
+
|
|
13
|
+
1. **stdio** servers run as a subprocess of your application. You can think of them as running "locally".
|
|
14
|
+
2. **HTTP over SSE** servers run remotely. You connect to them via a URL.
|
|
15
|
+
3. **Streamable HTTP** servers run remotely using the Streamable HTTP transport defined in the MCP spec.
|
|
16
|
+
|
|
17
|
+
You can use the [`MCPServerStdio`][agents.mcp.server.MCPServerStdio], [`MCPServerSse`][agents.mcp.server.MCPServerSse], and [`MCPServerStreamableHttp`][agents.mcp.server.MCPServerStreamableHttp] classes to connect to these servers.
|
|
18
|
+
|
|
19
|
+
For example, this is how you'd use the [official MCP filesystem server](https://www.npmjs.com/package/@modelcontextprotocol/server-filesystem).
|
|
20
|
+
|
|
21
|
+
```python
|
|
22
|
+
from agents.run_context import RunContextWrapper
|
|
23
|
+
|
|
24
|
+
async with MCPServerStdio(
|
|
25
|
+
params={
|
|
26
|
+
"command": "npx",
|
|
27
|
+
"args": ["-y", "@modelcontextprotocol/server-filesystem", samples_dir],
|
|
28
|
+
}
|
|
29
|
+
) as server:
|
|
30
|
+
# Note: In practice, you typically add the server to an Agent
|
|
31
|
+
# and let the framework handle tool listing automatically.
|
|
32
|
+
# Direct calls to list_tools() require run_context and agent parameters.
|
|
33
|
+
run_context = RunContextWrapper(context=None)
|
|
34
|
+
agent = Agent(name="test", instructions="test")
|
|
35
|
+
tools = await server.list_tools(run_context, agent)
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## Using MCP servers
|
|
39
|
+
|
|
40
|
+
MCP servers can be added to Agents. The Agents SDK will call `list_tools()` on the MCP servers each time the Agent is run. This makes the LLM aware of the MCP server's tools. When the LLM calls a tool from an MCP server, the SDK calls `call_tool()` on that server.
|
|
41
|
+
|
|
42
|
+
```python
|
|
43
|
+
|
|
44
|
+
agent=Agent(
|
|
45
|
+
name="Assistant",
|
|
46
|
+
instructions="Use the tools to achieve the task",
|
|
47
|
+
mcp_servers=[mcp_server_1, mcp_server_2]
|
|
48
|
+
)
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Tool filtering
|
|
52
|
+
|
|
53
|
+
You can filter which tools are available to your Agent by configuring tool filters on MCP servers. The SDK supports both static and dynamic tool filtering.
|
|
54
|
+
|
|
55
|
+
### Static tool filtering
|
|
56
|
+
|
|
57
|
+
For simple allow/block lists, you can use static filtering:
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
from agents.mcp import create_static_tool_filter
|
|
61
|
+
|
|
62
|
+
# Only expose specific tools from this server
|
|
63
|
+
server = MCPServerStdio(
|
|
64
|
+
params={
|
|
65
|
+
"command": "npx",
|
|
66
|
+
"args": ["-y", "@modelcontextprotocol/server-filesystem", samples_dir],
|
|
67
|
+
},
|
|
68
|
+
tool_filter=create_static_tool_filter(
|
|
69
|
+
allowed_tool_names=["read_file", "write_file"]
|
|
70
|
+
)
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Exclude specific tools from this server
|
|
74
|
+
server = MCPServerStdio(
|
|
75
|
+
params={
|
|
76
|
+
"command": "npx",
|
|
77
|
+
"args": ["-y", "@modelcontextprotocol/server-filesystem", samples_dir],
|
|
78
|
+
},
|
|
79
|
+
tool_filter=create_static_tool_filter(
|
|
80
|
+
blocked_tool_names=["delete_file"]
|
|
81
|
+
)
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
**When both `allowed_tool_names` and `blocked_tool_names` are configured, the processing order is:**
|
|
87
|
+
1. First apply `allowed_tool_names` (allowlist) - only keep the specified tools
|
|
88
|
+
2. Then apply `blocked_tool_names` (blocklist) - exclude specified tools from the remaining tools
|
|
89
|
+
|
|
90
|
+
For example, if you configure `allowed_tool_names=["read_file", "write_file", "delete_file"]` and `blocked_tool_names=["delete_file"]`, only `read_file` and `write_file` tools will be available.
|
|
91
|
+
|
|
92
|
+
### Dynamic tool filtering
|
|
93
|
+
|
|
94
|
+
For more complex filtering logic, you can use dynamic filters with functions:
|
|
95
|
+
|
|
96
|
+
```python
|
|
97
|
+
from agents.mcp import ToolFilterContext
|
|
98
|
+
|
|
99
|
+
# Simple synchronous filter
|
|
100
|
+
def custom_filter(context: ToolFilterContext, tool) -> bool:
|
|
101
|
+
"""Example of a custom tool filter."""
|
|
102
|
+
# Filter logic based on tool name patterns
|
|
103
|
+
return tool.name.startswith("allowed_prefix")
|
|
104
|
+
|
|
105
|
+
# Context-aware filter
|
|
106
|
+
def context_aware_filter(context: ToolFilterContext, tool) -> bool:
|
|
107
|
+
"""Filter tools based on context information."""
|
|
108
|
+
# Access agent information
|
|
109
|
+
agent_name = context.agent.name
|
|
110
|
+
|
|
111
|
+
# Access server information
|
|
112
|
+
server_name = context.server_name
|
|
113
|
+
|
|
114
|
+
# Implement your custom filtering logic here
|
|
115
|
+
return some_filtering_logic(agent_name, server_name, tool)
|
|
116
|
+
|
|
117
|
+
# Asynchronous filter
|
|
118
|
+
async def async_filter(context: ToolFilterContext, tool) -> bool:
|
|
119
|
+
"""Example of an asynchronous filter."""
|
|
120
|
+
# Perform async operations if needed
|
|
121
|
+
result = await some_async_check(context, tool)
|
|
122
|
+
return result
|
|
123
|
+
|
|
124
|
+
server = MCPServerStdio(
|
|
125
|
+
params={
|
|
126
|
+
"command": "npx",
|
|
127
|
+
"args": ["-y", "@modelcontextprotocol/server-filesystem", samples_dir],
|
|
128
|
+
},
|
|
129
|
+
tool_filter=custom_filter # or context_aware_filter or async_filter
|
|
130
|
+
)
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
The `ToolFilterContext` provides access to:
|
|
134
|
+
- `run_context`: The current run context
|
|
135
|
+
- `agent`: The agent requesting the tools
|
|
136
|
+
- `server_name`: The name of the MCP server
|
|
137
|
+
|
|
138
|
+
## Caching
|
|
139
|
+
|
|
140
|
+
Every time an Agent runs, it calls `list_tools()` on the MCP server. This can be a latency hit, especially if the server is a remote server. To automatically cache the list of tools, you can pass `cache_tools_list=True` to [`MCPServerStdio`][agents.mcp.server.MCPServerStdio], [`MCPServerSse`][agents.mcp.server.MCPServerSse], and [`MCPServerStreamableHttp`][agents.mcp.server.MCPServerStreamableHttp]. You should only do this if you're certain the tool list will not change.
|
|
141
|
+
|
|
142
|
+
If you want to invalidate the cache, you can call `invalidate_tools_cache()` on the servers.
|
|
143
|
+
|
|
144
|
+
## End-to-end examples
|
|
145
|
+
|
|
146
|
+
View complete working examples at [examples/mcp](https://github.com/openai/openai-agents-python/tree/main/examples/mcp).
|
|
147
|
+
|
|
148
|
+
## Tracing
|
|
149
|
+
|
|
150
|
+
[Tracing](./tracing.md) automatically captures MCP operations, including:
|
|
151
|
+
|
|
152
|
+
1. Calls to the MCP server to list tools
|
|
153
|
+
2. MCP-related info on function calls
|
|
154
|
+
|
|
155
|
+

|
|
@@ -93,6 +93,22 @@ english_agent = Agent(
|
|
|
93
93
|
)
|
|
94
94
|
```
|
|
95
95
|
|
|
96
|
+
Also, when you use OpenAI's Responses API, [there are a few other optional parameters](https://platform.openai.com/docs/api-reference/responses/create) (e.g., `user`, `service_tier`, and so on). If they are not available at the top level, you can use `extra_args` to pass them as well.
|
|
97
|
+
|
|
98
|
+
```python
|
|
99
|
+
from agents import Agent, ModelSettings
|
|
100
|
+
|
|
101
|
+
english_agent = Agent(
|
|
102
|
+
name="English agent",
|
|
103
|
+
instructions="You only speak English",
|
|
104
|
+
model="gpt-4o",
|
|
105
|
+
model_settings=ModelSettings(
|
|
106
|
+
temperature=0.1,
|
|
107
|
+
extra_args={"service_tier": "flex", "user": "user_12345"},
|
|
108
|
+
),
|
|
109
|
+
)
|
|
110
|
+
```
|
|
111
|
+
|
|
96
112
|
## Common issues with using other LLM providers
|
|
97
113
|
|
|
98
114
|
### Tracing client error 401
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Release process
|
|
1
|
+
# Release process/changelog
|
|
2
2
|
|
|
3
3
|
The project follows a slightly modified version of semantic versioning using the form `0.Y.Z`. The leading `0` indicates the SDK is still evolving rapidly. Increment the components as follows:
|
|
4
4
|
|
|
@@ -12,7 +12,13 @@ If you don't want breaking changes, we recommend pinning to `0.0.x` versions in
|
|
|
12
12
|
|
|
13
13
|
We will increment `Z` for non-breaking changes:
|
|
14
14
|
|
|
15
|
-
-
|
|
16
|
-
-
|
|
17
|
-
-
|
|
18
|
-
-
|
|
15
|
+
- Bug fixes
|
|
16
|
+
- New features
|
|
17
|
+
- Changes to private interfaces
|
|
18
|
+
- Updates to beta features
|
|
19
|
+
|
|
20
|
+
## Breaking change changelog
|
|
21
|
+
|
|
22
|
+
### 0.1.0
|
|
23
|
+
|
|
24
|
+
In this version, [`MCPServer.list_tools()`][agents.mcp.server.MCPServer] has two new params: `run_context` and `agent`. You'll need to add these params to any classes that subclass `MCPServer`.
|