openai-agents 0.0.16__tar.gz → 0.0.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- {openai_agents-0.0.16 → openai_agents-0.0.18}/PKG-INFO +6 -3
- {openai_agents-0.0.16 → openai_agents-0.0.18}/README.md +3 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/mcp.md +5 -4
- openai_agents-0.0.18/docs/ja/repl.md +22 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/tools.md +4 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/tracing.md +2 -1
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/mcp.md +4 -3
- openai_agents-0.0.18/docs/ref/repl.md +6 -0
- openai_agents-0.0.18/docs/release.md +18 -0
- openai_agents-0.0.18/docs/repl.md +19 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/tools.md +32 -1
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/tracing.md +1 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/agent_patterns/input_guardrails.py +1 -1
- openai_agents-0.0.18/examples/basic/prompt_template.py +79 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/hosted_mcp/approvals.py +1 -1
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/hosted_mcp/simple.py +1 -1
- {openai_agents-0.0.16 → openai_agents-0.0.18}/mkdocs.yml +4 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/pyproject.toml +3 -3
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/__init__.py +8 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/_run_impl.py +11 -5
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/agent.py +33 -3
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/agent_output.py +1 -1
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/exceptions.py +38 -5
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/extensions/models/litellm_model.py +13 -2
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/extensions/visualization.py +35 -18
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/function_schema.py +7 -5
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/handoffs.py +3 -3
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/mcp/server.py +9 -9
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/mcp/util.py +1 -1
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/model_settings.py +15 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/models/interface.py +6 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/models/openai_chatcompletions.py +26 -6
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/models/openai_responses.py +10 -0
- openai_agents-0.0.18/src/agents/prompts.py +76 -0
- openai_agents-0.0.18/src/agents/repl.py +65 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/result.py +43 -13
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/run.py +48 -8
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/stream_events.py +1 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tool.py +26 -5
- openai_agents-0.0.18/src/agents/tool_context.py +29 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tracing/processors.py +29 -3
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/util/_pretty_print.py +12 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/model.py +2 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/fake_model.py +2 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/mcp/test_mcp_tracing.py +39 -21
- openai_agents-0.0.18/tests/model_settings/test_serialization.py +133 -0
- openai_agents-0.0.18/tests/models/test_kwargs_functionality.py +177 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/models/test_litellm_chatcompletions_stream.py +3 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/models/test_litellm_extra_body.py +1 -2
- openai_agents-0.0.18/tests/test_agent_prompt.py +97 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_agent_runner.py +35 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_agent_runner_streamed.py +37 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_function_schema.py +12 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_function_tool.py +59 -17
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_function_tool_decorator.py +5 -4
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_handoff_tool.py +8 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_openai_chatcompletions.py +38 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_openai_chatcompletions_stream.py +3 -0
- openai_agents-0.0.18/tests/test_repl.py +28 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_responses.py +4 -2
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_responses_tracing.py +6 -0
- openai_agents-0.0.18/tests/test_run_error_details.py +48 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_run_step_execution.py +40 -1
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_run_step_processing.py +18 -14
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_tracing_errors_streamed.py +0 -4
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_visualization.py +15 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/voice/test_workflow.py +3 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/uv.lock +1691 -1691
- openai_agents-0.0.16/tests/model_settings/test_serialization.py +0 -59
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.github/ISSUE_TEMPLATE/model_provider.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.github/ISSUE_TEMPLATE/question.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.github/workflows/docs.yml +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.github/workflows/issues.yml +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.github/workflows/publish.yml +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.github/workflows/tests.yml +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.gitignore +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.prettierrc +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/.vscode/settings.json +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/AGENTS.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/LICENSE +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/Makefile +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/agents.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/assets/images/favicon-platform.svg +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/assets/images/graph.png +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/assets/images/mcp-tracing.jpg +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/assets/images/orchestration.png +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/assets/logo.svg +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/config.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/context.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/examples.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/guardrails.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/handoffs.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/index.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/agents.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/config.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/context.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/examples.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/guardrails.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/handoffs.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/index.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/models/index.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/models/litellm.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/models.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/multi_agent.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/quickstart.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/results.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/running_agents.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/streaming.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/visualization.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/voice/pipeline.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/voice/quickstart.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ja/voice/tracing.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/models/index.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/models/litellm.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/multi_agent.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/quickstart.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/agent.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/agent_output.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/exceptions.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/extensions/handoff_filters.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/extensions/handoff_prompt.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/extensions/litellm.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/function_schema.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/guardrail.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/handoffs.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/index.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/items.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/lifecycle.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/mcp/server.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/mcp/util.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/model_settings.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/models/interface.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/models/openai_chatcompletions.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/models/openai_responses.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/result.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/run.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/run_context.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/stream_events.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/tool.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/tracing/create.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/tracing/index.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/tracing/processor_interface.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/tracing/processors.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/tracing/scope.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/tracing/setup.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/tracing/span_data.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/tracing/spans.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/tracing/traces.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/tracing/util.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/usage.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/events.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/exceptions.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/input.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/model.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/models/openai_provider.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/models/openai_stt.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/models/openai_tts.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/pipeline.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/pipeline_config.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/result.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/utils.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/ref/voice/workflow.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/results.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/running_agents.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/scripts/translate_docs.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/streaming.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/stylesheets/extra.css +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/visualization.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/voice/pipeline.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/voice/quickstart.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/docs/voice/tracing.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/agent_patterns/README.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/agent_patterns/agents_as_tools.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/agent_patterns/deterministic.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/agent_patterns/forcing_tool_use.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/agent_patterns/llm_as_a_judge.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/agent_patterns/output_guardrails.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/agent_patterns/parallelization.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/agent_patterns/routing.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/agent_patterns/streaming_guardrails.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/agent_lifecycle_example.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/dynamic_system_prompt.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/hello_world.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/hello_world_jupyter.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/lifecycle_example.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/local_image.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/media/image_bison.jpg +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/non_strict_output_type.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/previous_response_id.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/remote_image.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/stream_items.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/stream_text.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/basic/tools.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/customer_service/main.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/README.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/agents/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/agents/financials_agent.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/agents/planner_agent.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/agents/risk_agent.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/agents/search_agent.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/agents/verifier_agent.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/agents/writer_agent.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/main.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/manager.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/financial_research_agent/printer.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/handoffs/message_filter.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/handoffs/message_filter_streaming.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/hosted_mcp/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/filesystem_example/README.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/filesystem_example/main.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/filesystem_example/sample_files/favorite_books.txt +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/filesystem_example/sample_files/favorite_cities.txt +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/filesystem_example/sample_files/favorite_songs.txt +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/git_example/README.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/git_example/main.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/sse_example/README.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/sse_example/main.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/sse_example/server.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/streamablehttp_example/README.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/streamablehttp_example/main.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/mcp/streamablehttp_example/server.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/model_providers/README.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/model_providers/custom_example_agent.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/model_providers/custom_example_global.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/model_providers/custom_example_provider.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/model_providers/litellm_auto.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/model_providers/litellm_provider.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/README.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/agents/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/agents/planner_agent.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/agents/search_agent.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/agents/writer_agent.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/main.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/manager.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/printer.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/sample_outputs/product_recs.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/sample_outputs/product_recs.txt +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/sample_outputs/vacation.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/research_bot/sample_outputs/vacation.txt +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/tools/code_interpreter.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/tools/computer_use.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/tools/file_search.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/tools/image_generator.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/tools/web_search.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/voice/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/voice/static/README.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/voice/static/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/voice/static/main.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/voice/static/util.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/voice/streamed/README.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/voice/streamed/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/voice/streamed/main.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/examples/voice/streamed/my_workflow.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/_config.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/_debug.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/computer.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/extensions/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/extensions/handoff_filters.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/extensions/handoff_prompt.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/extensions/models/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/extensions/models/litellm_provider.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/guardrail.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/items.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/lifecycle.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/logger.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/mcp/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/models/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/models/_openai_shared.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/models/chatcmpl_converter.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/models/chatcmpl_helpers.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/models/chatcmpl_stream_handler.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/models/fake_id.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/models/multi_provider.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/models/openai_provider.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/py.typed +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/run_context.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/strict_schema.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tracing/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tracing/create.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tracing/logger.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tracing/processor_interface.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tracing/scope.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tracing/setup.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tracing/span_data.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tracing/spans.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tracing/traces.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/tracing/util.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/usage.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/util/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/util/_coro.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/util/_error_tracing.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/util/_json.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/util/_transforms.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/util/_types.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/version.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/events.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/exceptions.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/imports.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/input.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/models/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/models/openai_model_provider.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/models/openai_stt.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/models/openai_tts.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/pipeline.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/pipeline_config.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/result.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/utils.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/src/agents/voice/workflow.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/README.md +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/conftest.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/fastapi/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/fastapi/streaming_app.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/fastapi/test_streaming_context.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/mcp/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/mcp/conftest.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/mcp/helpers.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/mcp/test_caching.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/mcp/test_connect_disconnect.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/mcp/test_mcp_util.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/mcp/test_runner_calls_mcp.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/mcp/test_server_errors.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/models/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/models/conftest.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/models/test_map.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_agent_config.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_agent_hooks.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_agent_tracing.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_cancel_streaming.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_computer_action.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_config.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_doc_parsing.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_extension_filters.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_extra_headers.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_global_hooks.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_guardrails.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_items_helpers.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_max_turns.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_openai_chatcompletions_converter.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_openai_responses_converter.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_output_tool.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_pretty_print.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_result_cast.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_run_config.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_strict_schema.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_tool_choice_reset.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_tool_converter.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_tool_use_behavior.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_trace_processor.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_tracing.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_tracing_errors.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/test_usage.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/testing_processor.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/tracing/test_processor_api_key.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/voice/__init__.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/voice/conftest.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/voice/fake_models.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/voice/helpers.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/voice/test_input.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/voice/test_openai_stt.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/voice/test_openai_tts.py +0 -0
- {openai_agents-0.0.16 → openai_agents-0.0.18}/tests/voice/test_pipeline.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.18
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://github.com/openai/openai-agents-python
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -19,8 +19,8 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
|
19
19
|
Classifier: Typing :: Typed
|
|
20
20
|
Requires-Python: >=3.9
|
|
21
21
|
Requires-Dist: griffe<2,>=1.5.6
|
|
22
|
-
Requires-Dist: mcp<2,>=1.
|
|
23
|
-
Requires-Dist: openai>=1.
|
|
22
|
+
Requires-Dist: mcp<2,>=1.9.4; python_version >= '3.10'
|
|
23
|
+
Requires-Dist: openai>=1.87.0
|
|
24
24
|
Requires-Dist: pydantic<3,>=2.10
|
|
25
25
|
Requires-Dist: requests<3,>=2.0
|
|
26
26
|
Requires-Dist: types-requests<3,>=2.0
|
|
@@ -40,6 +40,9 @@ The OpenAI Agents SDK is a lightweight yet powerful framework for building multi
|
|
|
40
40
|
|
|
41
41
|
<img src="https://cdn.openai.com/API/docs/images/orchestration.png" alt="Image of the Agents Tracing UI" style="max-height: 803px;">
|
|
42
42
|
|
|
43
|
+
> [!NOTE]
|
|
44
|
+
> Looking for the JavaScript/TypeScript version? Check out [Agents SDK JS/TS](https://github.com/openai/openai-agents-js).
|
|
45
|
+
|
|
43
46
|
### Core concepts:
|
|
44
47
|
|
|
45
48
|
1. [**Agents**](https://openai.github.io/openai-agents-python/agents): LLMs configured with instructions, tools, guardrails, and handoffs
|
|
@@ -4,6 +4,9 @@ The OpenAI Agents SDK is a lightweight yet powerful framework for building multi
|
|
|
4
4
|
|
|
5
5
|
<img src="https://cdn.openai.com/API/docs/images/orchestration.png" alt="Image of the Agents Tracing UI" style="max-height: 803px;">
|
|
6
6
|
|
|
7
|
+
> [!NOTE]
|
|
8
|
+
> Looking for the JavaScript/TypeScript version? Check out [Agents SDK JS/TS](https://github.com/openai/openai-agents-js).
|
|
9
|
+
|
|
7
10
|
### Core concepts:
|
|
8
11
|
|
|
9
12
|
1. [**Agents**](https://openai.github.io/openai-agents-python/agents): LLMs configured with instructions, tools, guardrails, and handoffs
|
|
@@ -12,12 +12,13 @@ Agents SDK は MCP をサポートしており、これにより幅広い MCP
|
|
|
12
12
|
|
|
13
13
|
## MCP サーバー
|
|
14
14
|
|
|
15
|
-
現在、MCP 仕様では使用するトランスポート方式に基づき
|
|
15
|
+
現在、MCP 仕様では使用するトランスポート方式に基づき 3 種類のサーバーが定義されています。
|
|
16
16
|
|
|
17
|
-
1. **stdio** サーバー: アプリケーションのサブプロセスとして実行されます。ローカルで動かすイメージです。
|
|
17
|
+
1. **stdio** サーバー: アプリケーションのサブプロセスとして実行されます。ローカルで動かすイメージです。
|
|
18
18
|
2. **HTTP over SSE** サーバー: リモートで動作し、 URL 経由で接続します。
|
|
19
|
+
3. **Streamable HTTP** サーバー: MCP 仕様に定義された Streamable HTTP トランスポートを使用してリモートで動作します。
|
|
19
20
|
|
|
20
|
-
これらのサーバーへは [`MCPServerStdio`][agents.mcp.server.MCPServerStdio]
|
|
21
|
+
これらのサーバーへは [`MCPServerStdio`][agents.mcp.server.MCPServerStdio]、[`MCPServerSse`][agents.mcp.server.MCPServerSse]、[`MCPServerStreamableHttp`][agents.mcp.server.MCPServerStreamableHttp] クラスを使用して接続できます。
|
|
21
22
|
|
|
22
23
|
たとえば、[公式 MCP filesystem サーバー](https://www.npmjs.com/package/@modelcontextprotocol/server-filesystem)を利用する場合は次のようになります。
|
|
23
24
|
|
|
@@ -46,7 +47,7 @@ agent=Agent(
|
|
|
46
47
|
|
|
47
48
|
## キャッシュ
|
|
48
49
|
|
|
49
|
-
エージェントが実行されるたびに、MCP サーバーへ `list_tools()` が呼び出されます。サーバーがリモートの場合は特にレイテンシが発生します。ツール一覧を自動でキャッシュしたい場合は、[`MCPServerStdio`][agents.mcp.server.MCPServerStdio]
|
|
50
|
+
エージェントが実行されるたびに、MCP サーバーへ `list_tools()` が呼び出されます。サーバーがリモートの場合は特にレイテンシが発生します。ツール一覧を自動でキャッシュしたい場合は、[`MCPServerStdio`][agents.mcp.server.MCPServerStdio]、[`MCPServerSse`][agents.mcp.server.MCPServerSse]、[`MCPServerStreamableHttp`][agents.mcp.server.MCPServerStreamableHttp] の各クラスに `cache_tools_list=True` を渡してください。ツール一覧が変更されないと確信できる場合のみ使用してください。
|
|
50
51
|
|
|
51
52
|
キャッシュを無効化したい場合は、サーバーで `invalidate_tools_cache()` を呼び出します。
|
|
52
53
|
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
---
|
|
2
|
+
search:
|
|
3
|
+
exclude: true
|
|
4
|
+
---
|
|
5
|
+
# REPL ユーティリティ
|
|
6
|
+
|
|
7
|
+
`run_demo_loop` を使うと、ターミナルから手軽にエージェントを試せます。
|
|
8
|
+
|
|
9
|
+
```python
|
|
10
|
+
import asyncio
|
|
11
|
+
from agents import Agent, run_demo_loop
|
|
12
|
+
|
|
13
|
+
async def main() -> None:
|
|
14
|
+
agent = Agent(name="Assistant", instructions="あなたは親切なアシスタントです")
|
|
15
|
+
await run_demo_loop(agent)
|
|
16
|
+
|
|
17
|
+
if __name__ == "__main__":
|
|
18
|
+
asyncio.run(main())
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
`run_demo_loop` は入力を繰り返し受け取り、会話履歴を保持したままエージェントを実行します。既定ではストリーミング出力を表示します。
|
|
22
|
+
`quit` または `exit` と入力するか `Ctrl-D` を押すと終了します。
|
|
@@ -17,6 +17,10 @@ OpenAI は [`OpenAIResponsesModel`][agents.models.openai_responses.OpenAIRespons
|
|
|
17
17
|
- [`WebSearchTool`][agents.tool.WebSearchTool] はエージェントに Web 検索を行わせます。
|
|
18
18
|
- [`FileSearchTool`][agents.tool.FileSearchTool] は OpenAI ベクトルストアから情報を取得します。
|
|
19
19
|
- [`ComputerTool`][agents.tool.ComputerTool] はコンピュータ操作タスクを自動化します。
|
|
20
|
+
- [`CodeInterpreterTool`][agents.tool.CodeInterpreterTool] はサンドボックス環境でコードを実行します。
|
|
21
|
+
- [`HostedMCPTool`][agents.tool.HostedMCPTool] はリモート MCP サーバーのツールをモデルから直接利用できるようにします。
|
|
22
|
+
- [`ImageGenerationTool`][agents.tool.ImageGenerationTool] はプロンプトから画像を生成します。
|
|
23
|
+
- [`LocalShellTool`][agents.tool.LocalShellTool] はローカルマシンでシェルコマンドを実行します。
|
|
20
24
|
|
|
21
25
|
```python
|
|
22
26
|
from agents import Agent, FileSearchTool, Runner, WebSearchTool
|
|
@@ -119,4 +119,5 @@ async def main():
|
|
|
119
119
|
- [Comet Opik](https://www.comet.com/docs/opik/tracing/integrations/openai_agents)
|
|
120
120
|
- [Langfuse](https://langfuse.com/docs/integrations/openaiagentssdk/openai-agents)
|
|
121
121
|
- [Langtrace](https://docs.langtrace.ai/supported-integrations/llm-frameworks/openai-agents-sdk)
|
|
122
|
-
- [Okahu‑Monocle](https://github.com/monocle2ai/monocle)
|
|
122
|
+
- [Okahu‑Monocle](https://github.com/monocle2ai/monocle)
|
|
123
|
+
- [Portkey AI](https://portkey.ai/docs/integrations/agents/openai-agents)
|
|
@@ -8,12 +8,13 @@ The Agents SDK has support for MCP. This enables you to use a wide range of MCP
|
|
|
8
8
|
|
|
9
9
|
## MCP servers
|
|
10
10
|
|
|
11
|
-
Currently, the MCP spec defines
|
|
11
|
+
Currently, the MCP spec defines three kinds of servers, based on the transport mechanism they use:
|
|
12
12
|
|
|
13
13
|
1. **stdio** servers run as a subprocess of your application. You can think of them as running "locally".
|
|
14
14
|
2. **HTTP over SSE** servers run remotely. You connect to them via a URL.
|
|
15
|
+
3. **Streamable HTTP** servers run remotely using the Streamable HTTP transport defined in the MCP spec.
|
|
15
16
|
|
|
16
|
-
You can use the [`MCPServerStdio`][agents.mcp.server.MCPServerStdio]
|
|
17
|
+
You can use the [`MCPServerStdio`][agents.mcp.server.MCPServerStdio], [`MCPServerSse`][agents.mcp.server.MCPServerSse], and [`MCPServerStreamableHttp`][agents.mcp.server.MCPServerStreamableHttp] classes to connect to these servers.
|
|
17
18
|
|
|
18
19
|
For example, this is how you'd use the [official MCP filesystem server](https://www.npmjs.com/package/@modelcontextprotocol/server-filesystem).
|
|
19
20
|
|
|
@@ -42,7 +43,7 @@ agent=Agent(
|
|
|
42
43
|
|
|
43
44
|
## Caching
|
|
44
45
|
|
|
45
|
-
Every time an Agent runs, it calls `list_tools()` on the MCP server. This can be a latency hit, especially if the server is a remote server. To automatically cache the list of tools, you can pass `cache_tools_list=True` to
|
|
46
|
+
Every time an Agent runs, it calls `list_tools()` on the MCP server. This can be a latency hit, especially if the server is a remote server. To automatically cache the list of tools, you can pass `cache_tools_list=True` to [`MCPServerStdio`][agents.mcp.server.MCPServerStdio], [`MCPServerSse`][agents.mcp.server.MCPServerSse], and [`MCPServerStreamableHttp`][agents.mcp.server.MCPServerStreamableHttp]. You should only do this if you're certain the tool list will not change.
|
|
46
47
|
|
|
47
48
|
If you want to invalidate the cache, you can call `invalidate_tools_cache()` on the servers.
|
|
48
49
|
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# Release process
|
|
2
|
+
|
|
3
|
+
The project follows a slightly modified version of semantic versioning using the form `0.Y.Z`. The leading `0` indicates the SDK is still evolving rapidly. Increment the components as follows:
|
|
4
|
+
|
|
5
|
+
## Minor (`Y`) versions
|
|
6
|
+
|
|
7
|
+
We will increase minor versions `Y` for **breaking changes** to any public interfaces that are not marked as beta. For example, going from `0.0.x` to `0.1.x` might include breaking changes.
|
|
8
|
+
|
|
9
|
+
If you don't want breaking changes, we recommend pinning to `0.0.x` versions in your project.
|
|
10
|
+
|
|
11
|
+
## Patch (`Z`) versions
|
|
12
|
+
|
|
13
|
+
We will increment `Z` for non-breaking changes:
|
|
14
|
+
|
|
15
|
+
- Bug fixes
|
|
16
|
+
- New features
|
|
17
|
+
- Changes to private interfaces
|
|
18
|
+
- Updates to beta features
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# REPL utility
|
|
2
|
+
|
|
3
|
+
The SDK provides `run_demo_loop` for quick interactive testing.
|
|
4
|
+
|
|
5
|
+
```python
|
|
6
|
+
import asyncio
|
|
7
|
+
from agents import Agent, run_demo_loop
|
|
8
|
+
|
|
9
|
+
async def main() -> None:
|
|
10
|
+
agent = Agent(name="Assistant", instructions="You are a helpful assistant.")
|
|
11
|
+
await run_demo_loop(agent)
|
|
12
|
+
|
|
13
|
+
if __name__ == "__main__":
|
|
14
|
+
asyncio.run(main())
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
`run_demo_loop` prompts for user input in a loop, keeping the conversation
|
|
18
|
+
history between turns. By default it streams model output as it is produced.
|
|
19
|
+
Type `quit` or `exit` (or press `Ctrl-D`) to leave the loop.
|
|
@@ -13,6 +13,10 @@ OpenAI offers a few built-in tools when using the [`OpenAIResponsesModel`][agent
|
|
|
13
13
|
- The [`WebSearchTool`][agents.tool.WebSearchTool] lets an agent search the web.
|
|
14
14
|
- The [`FileSearchTool`][agents.tool.FileSearchTool] allows retrieving information from your OpenAI Vector Stores.
|
|
15
15
|
- The [`ComputerTool`][agents.tool.ComputerTool] allows automating computer use tasks.
|
|
16
|
+
- The [`CodeInterpreterTool`][agents.tool.CodeInterpreterTool] lets the LLM execute code in a sandboxed environment.
|
|
17
|
+
- The [`HostedMCPTool`][agents.tool.HostedMCPTool] exposes a remote MCP server's tools to the model.
|
|
18
|
+
- The [`ImageGenerationTool`][agents.tool.ImageGenerationTool] generates images from a prompt.
|
|
19
|
+
- The [`LocalShellTool`][agents.tool.LocalShellTool] runs shell commands on your machine.
|
|
16
20
|
|
|
17
21
|
```python
|
|
18
22
|
from agents import Agent, FileSearchTool, Runner, WebSearchTool
|
|
@@ -266,7 +270,7 @@ The `agent.as_tool` function is a convenience method to make it easy to turn an
|
|
|
266
270
|
```python
|
|
267
271
|
@function_tool
|
|
268
272
|
async def run_my_agent() -> str:
|
|
269
|
-
|
|
273
|
+
"""A tool that runs the agent with custom configs"""
|
|
270
274
|
|
|
271
275
|
agent = Agent(name="My agent", instructions="...")
|
|
272
276
|
|
|
@@ -280,6 +284,33 @@ async def run_my_agent() -> str:
|
|
|
280
284
|
return str(result.final_output)
|
|
281
285
|
```
|
|
282
286
|
|
|
287
|
+
### Custom output extraction
|
|
288
|
+
|
|
289
|
+
In certain cases, you might want to modify the output of the tool-agents before returning it to the central agent. This may be useful if you want to:
|
|
290
|
+
|
|
291
|
+
- Extract a specific piece of information (e.g., a JSON payload) from the sub-agent's chat history.
|
|
292
|
+
- Convert or reformat the agent’s final answer (e.g., transform Markdown into plain text or CSV).
|
|
293
|
+
- Validate the output or provide a fallback value when the agent’s response is missing or malformed.
|
|
294
|
+
|
|
295
|
+
You can do this by supplying the `custom_output_extractor` argument to the `as_tool` method:
|
|
296
|
+
|
|
297
|
+
```python
|
|
298
|
+
async def extract_json_payload(run_result: RunResult) -> str:
|
|
299
|
+
# Scan the agent’s outputs in reverse order until we find a JSON-like message from a tool call.
|
|
300
|
+
for item in reversed(run_result.new_items):
|
|
301
|
+
if isinstance(item, ToolCallOutputItem) and item.output.strip().startswith("{"):
|
|
302
|
+
return item.output.strip()
|
|
303
|
+
# Fallback to an empty JSON object if nothing was found
|
|
304
|
+
return "{}"
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
json_tool = data_agent.as_tool(
|
|
308
|
+
tool_name="get_data_json",
|
|
309
|
+
tool_description="Run the data agent and return only its JSON payload",
|
|
310
|
+
custom_output_extractor=extract_json_payload,
|
|
311
|
+
)
|
|
312
|
+
```
|
|
313
|
+
|
|
283
314
|
## Handling errors in function tools
|
|
284
315
|
|
|
285
316
|
When you create a function tool via `@function_tool`, you can pass a `failure_error_function`. This is a function that provides an error response to the LLM in case the tool call crashes.
|
|
@@ -116,3 +116,4 @@ To customize this default setup, to send traces to alternative or additional bac
|
|
|
116
116
|
- [Langtrace](https://docs.langtrace.ai/supported-integrations/llm-frameworks/openai-agents-sdk)
|
|
117
117
|
- [Okahu-Monocle](https://github.com/monocle2ai/monocle)
|
|
118
118
|
- [Galileo](https://v2docs.galileo.ai/integrations/openai-agent-integration#openai-agent-integration)
|
|
119
|
+
- [Portkey AI](https://portkey.ai/docs/integrations/agents/openai-agents)
|
|
@@ -20,7 +20,7 @@ This example shows how to use guardrails.
|
|
|
20
20
|
Guardrails are checks that run in parallel to the agent's execution.
|
|
21
21
|
They can be used to do things like:
|
|
22
22
|
- Check if input messages are off-topic
|
|
23
|
-
- Check that
|
|
23
|
+
- Check that input messages don't violate any policies
|
|
24
24
|
- Take over control of the agent's execution if an unexpected input is detected
|
|
25
25
|
|
|
26
26
|
In this example, we'll setup an input guardrail that trips if the user is asking to do math homework.
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import asyncio
|
|
3
|
+
import random
|
|
4
|
+
|
|
5
|
+
from agents import Agent, GenerateDynamicPromptData, Runner
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
NOTE: This example will not work out of the box, because the default prompt ID will not be available
|
|
9
|
+
in your project.
|
|
10
|
+
|
|
11
|
+
To use it, please:
|
|
12
|
+
1. Go to https://platform.openai.com/playground/prompts
|
|
13
|
+
2. Create a new prompt variable, `poem_style`.
|
|
14
|
+
3. Create a system prompt with the content:
|
|
15
|
+
```
|
|
16
|
+
Write a poem in {{poem_style}}
|
|
17
|
+
```
|
|
18
|
+
4. Run the example with the `--prompt-id` flag.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
DEFAULT_PROMPT_ID = "pmpt_6850729e8ba481939fd439e058c69ee004afaa19c520b78b"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class DynamicContext:
|
|
25
|
+
def __init__(self, prompt_id: str):
|
|
26
|
+
self.prompt_id = prompt_id
|
|
27
|
+
self.poem_style = random.choice(["limerick", "haiku", "ballad"])
|
|
28
|
+
print(f"[debug] DynamicContext initialized with poem_style: {self.poem_style}")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
async def _get_dynamic_prompt(data: GenerateDynamicPromptData):
|
|
32
|
+
ctx: DynamicContext = data.context.context
|
|
33
|
+
return {
|
|
34
|
+
"id": ctx.prompt_id,
|
|
35
|
+
"version": "1",
|
|
36
|
+
"variables": {
|
|
37
|
+
"poem_style": ctx.poem_style,
|
|
38
|
+
},
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
async def dynamic_prompt(prompt_id: str):
|
|
43
|
+
context = DynamicContext(prompt_id)
|
|
44
|
+
|
|
45
|
+
agent = Agent(
|
|
46
|
+
name="Assistant",
|
|
47
|
+
prompt=_get_dynamic_prompt,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
result = await Runner.run(agent, "Tell me about recursion in programming.", context=context)
|
|
51
|
+
print(result.final_output)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
async def static_prompt(prompt_id: str):
|
|
55
|
+
agent = Agent(
|
|
56
|
+
name="Assistant",
|
|
57
|
+
prompt={
|
|
58
|
+
"id": prompt_id,
|
|
59
|
+
"version": "1",
|
|
60
|
+
"variables": {
|
|
61
|
+
"poem_style": "limerick",
|
|
62
|
+
},
|
|
63
|
+
},
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
result = await Runner.run(agent, "Tell me about recursion in programming.")
|
|
67
|
+
print(result.final_output)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
if __name__ == "__main__":
|
|
71
|
+
parser = argparse.ArgumentParser()
|
|
72
|
+
parser.add_argument("--dynamic", action="store_true")
|
|
73
|
+
parser.add_argument("--prompt-id", type=str, default=DEFAULT_PROMPT_ID)
|
|
74
|
+
args = parser.parse_args()
|
|
75
|
+
|
|
76
|
+
if args.dynamic:
|
|
77
|
+
asyncio.run(dynamic_prompt(args.prompt_id))
|
|
78
|
+
else:
|
|
79
|
+
asyncio.run(static_prompt(args.prompt_id))
|
|
@@ -59,6 +59,7 @@ plugins:
|
|
|
59
59
|
- running_agents.md
|
|
60
60
|
- results.md
|
|
61
61
|
- streaming.md
|
|
62
|
+
- repl.md
|
|
62
63
|
- tools.md
|
|
63
64
|
- mcp.md
|
|
64
65
|
- handoffs.md
|
|
@@ -71,6 +72,7 @@ plugins:
|
|
|
71
72
|
- models/litellm.md
|
|
72
73
|
- config.md
|
|
73
74
|
- visualization.md
|
|
75
|
+
- release.md
|
|
74
76
|
- Voice agents:
|
|
75
77
|
- voice/quickstart.md
|
|
76
78
|
- voice/pipeline.md
|
|
@@ -80,6 +82,7 @@ plugins:
|
|
|
80
82
|
- ref/index.md
|
|
81
83
|
- ref/agent.md
|
|
82
84
|
- ref/run.md
|
|
85
|
+
- ref/repl.md
|
|
83
86
|
- ref/tool.md
|
|
84
87
|
- ref/result.md
|
|
85
88
|
- ref/stream_events.md
|
|
@@ -139,6 +142,7 @@ plugins:
|
|
|
139
142
|
- running_agents.md
|
|
140
143
|
- results.md
|
|
141
144
|
- streaming.md
|
|
145
|
+
- repl.md
|
|
142
146
|
- tools.md
|
|
143
147
|
- mcp.md
|
|
144
148
|
- handoffs.md
|
|
@@ -1,19 +1,19 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "openai-agents"
|
|
3
|
-
version = "0.0.
|
|
3
|
+
version = "0.0.18"
|
|
4
4
|
description = "OpenAI Agents SDK"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.9"
|
|
7
7
|
license = "MIT"
|
|
8
8
|
authors = [{ name = "OpenAI", email = "support@openai.com" }]
|
|
9
9
|
dependencies = [
|
|
10
|
-
"openai>=1.
|
|
10
|
+
"openai>=1.87.0",
|
|
11
11
|
"pydantic>=2.10, <3",
|
|
12
12
|
"griffe>=1.5.6, <2",
|
|
13
13
|
"typing-extensions>=4.12.2, <5",
|
|
14
14
|
"requests>=2.0, <3",
|
|
15
15
|
"types-requests>=2.0, <3",
|
|
16
|
-
"mcp>=1.
|
|
16
|
+
"mcp>=1.9.4, <2; python_version >= '3.10'",
|
|
17
17
|
]
|
|
18
18
|
classifiers = [
|
|
19
19
|
"Typing :: Typed",
|
|
@@ -14,6 +14,7 @@ from .exceptions import (
|
|
|
14
14
|
MaxTurnsExceeded,
|
|
15
15
|
ModelBehaviorError,
|
|
16
16
|
OutputGuardrailTripwireTriggered,
|
|
17
|
+
RunErrorDetails,
|
|
17
18
|
UserError,
|
|
18
19
|
)
|
|
19
20
|
from .guardrail import (
|
|
@@ -44,6 +45,8 @@ from .models.interface import Model, ModelProvider, ModelTracing
|
|
|
44
45
|
from .models.openai_chatcompletions import OpenAIChatCompletionsModel
|
|
45
46
|
from .models.openai_provider import OpenAIProvider
|
|
46
47
|
from .models.openai_responses import OpenAIResponsesModel
|
|
48
|
+
from .prompts import DynamicPromptFunction, GenerateDynamicPromptData, Prompt
|
|
49
|
+
from .repl import run_demo_loop
|
|
47
50
|
from .result import RunResult, RunResultStreaming
|
|
48
51
|
from .run import RunConfig, Runner
|
|
49
52
|
from .run_context import RunContextWrapper, TContext
|
|
@@ -159,6 +162,7 @@ __all__ = [
|
|
|
159
162
|
"ToolsToFinalOutputFunction",
|
|
160
163
|
"ToolsToFinalOutputResult",
|
|
161
164
|
"Runner",
|
|
165
|
+
"run_demo_loop",
|
|
162
166
|
"Model",
|
|
163
167
|
"ModelProvider",
|
|
164
168
|
"ModelTracing",
|
|
@@ -175,6 +179,9 @@ __all__ = [
|
|
|
175
179
|
"AgentsException",
|
|
176
180
|
"InputGuardrailTripwireTriggered",
|
|
177
181
|
"OutputGuardrailTripwireTriggered",
|
|
182
|
+
"DynamicPromptFunction",
|
|
183
|
+
"GenerateDynamicPromptData",
|
|
184
|
+
"Prompt",
|
|
178
185
|
"MaxTurnsExceeded",
|
|
179
186
|
"ModelBehaviorError",
|
|
180
187
|
"UserError",
|
|
@@ -204,6 +211,7 @@ __all__ = [
|
|
|
204
211
|
"AgentHooks",
|
|
205
212
|
"RunContextWrapper",
|
|
206
213
|
"TContext",
|
|
214
|
+
"RunErrorDetails",
|
|
207
215
|
"RunResult",
|
|
208
216
|
"RunResultStreaming",
|
|
209
217
|
"RunConfig",
|
|
@@ -33,6 +33,7 @@ from openai.types.responses.response_output_item import (
|
|
|
33
33
|
ImageGenerationCall,
|
|
34
34
|
LocalShellCall,
|
|
35
35
|
McpApprovalRequest,
|
|
36
|
+
McpCall,
|
|
36
37
|
McpListTools,
|
|
37
38
|
)
|
|
38
39
|
from openai.types.responses.response_reasoning_item import ResponseReasoningItem
|
|
@@ -74,6 +75,7 @@ from .tool import (
|
|
|
74
75
|
MCPToolApprovalRequest,
|
|
75
76
|
Tool,
|
|
76
77
|
)
|
|
78
|
+
from .tool_context import ToolContext
|
|
77
79
|
from .tracing import (
|
|
78
80
|
SpanError,
|
|
79
81
|
Trace,
|
|
@@ -456,6 +458,9 @@ class RunImpl:
|
|
|
456
458
|
)
|
|
457
459
|
elif isinstance(output, McpListTools):
|
|
458
460
|
items.append(MCPListToolsItem(raw_item=output, agent=agent))
|
|
461
|
+
elif isinstance(output, McpCall):
|
|
462
|
+
items.append(ToolCallItem(raw_item=output, agent=agent))
|
|
463
|
+
tools_used.append("mcp")
|
|
459
464
|
elif isinstance(output, ImageGenerationCall):
|
|
460
465
|
items.append(ToolCallItem(raw_item=output, agent=agent))
|
|
461
466
|
tools_used.append("image_generation")
|
|
@@ -539,23 +544,24 @@ class RunImpl:
|
|
|
539
544
|
func_tool: FunctionTool, tool_call: ResponseFunctionToolCall
|
|
540
545
|
) -> Any:
|
|
541
546
|
with function_span(func_tool.name) as span_fn:
|
|
547
|
+
tool_context = ToolContext.from_agent_context(context_wrapper, tool_call.call_id)
|
|
542
548
|
if config.trace_include_sensitive_data:
|
|
543
549
|
span_fn.span_data.input = tool_call.arguments
|
|
544
550
|
try:
|
|
545
551
|
_, _, result = await asyncio.gather(
|
|
546
|
-
hooks.on_tool_start(
|
|
552
|
+
hooks.on_tool_start(tool_context, agent, func_tool),
|
|
547
553
|
(
|
|
548
|
-
agent.hooks.on_tool_start(
|
|
554
|
+
agent.hooks.on_tool_start(tool_context, agent, func_tool)
|
|
549
555
|
if agent.hooks
|
|
550
556
|
else _coro.noop_coroutine()
|
|
551
557
|
),
|
|
552
|
-
func_tool.on_invoke_tool(
|
|
558
|
+
func_tool.on_invoke_tool(tool_context, tool_call.arguments),
|
|
553
559
|
)
|
|
554
560
|
|
|
555
561
|
await asyncio.gather(
|
|
556
|
-
hooks.on_tool_end(
|
|
562
|
+
hooks.on_tool_end(tool_context, agent, func_tool, result),
|
|
557
563
|
(
|
|
558
|
-
agent.hooks.on_tool_end(
|
|
564
|
+
agent.hooks.on_tool_end(tool_context, agent, func_tool, result)
|
|
559
565
|
if agent.hooks
|
|
560
566
|
else _coro.noop_coroutine()
|
|
561
567
|
),
|
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import asyncio
|
|
3
4
|
import dataclasses
|
|
4
5
|
import inspect
|
|
5
6
|
from collections.abc import Awaitable
|
|
6
7
|
from dataclasses import dataclass, field
|
|
7
8
|
from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, cast
|
|
8
9
|
|
|
10
|
+
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
|
9
11
|
from typing_extensions import NotRequired, TypeAlias, TypedDict
|
|
10
12
|
|
|
11
13
|
from .agent_output import AgentOutputSchemaBase
|
|
@@ -16,8 +18,9 @@ from .logger import logger
|
|
|
16
18
|
from .mcp import MCPUtil
|
|
17
19
|
from .model_settings import ModelSettings
|
|
18
20
|
from .models.interface import Model
|
|
21
|
+
from .prompts import DynamicPromptFunction, Prompt, PromptUtil
|
|
19
22
|
from .run_context import RunContextWrapper, TContext
|
|
20
|
-
from .tool import FunctionToolResult, Tool, function_tool
|
|
23
|
+
from .tool import FunctionTool, FunctionToolResult, Tool, function_tool
|
|
21
24
|
from .util import _transforms
|
|
22
25
|
from .util._types import MaybeAwaitable
|
|
23
26
|
|
|
@@ -94,6 +97,12 @@ class Agent(Generic[TContext]):
|
|
|
94
97
|
return a string.
|
|
95
98
|
"""
|
|
96
99
|
|
|
100
|
+
prompt: Prompt | DynamicPromptFunction | None = None
|
|
101
|
+
"""A prompt object (or a function that returns a Prompt). Prompts allow you to dynamically
|
|
102
|
+
configure the instructions, tools and other config for an agent outside of your code. Only
|
|
103
|
+
usable with OpenAI models, using the Responses API.
|
|
104
|
+
"""
|
|
105
|
+
|
|
97
106
|
handoff_description: str | None = None
|
|
98
107
|
"""A description of the agent. This is used when the agent is used as a handoff, so that an
|
|
99
108
|
LLM knows what it does and when to invoke it.
|
|
@@ -241,12 +250,33 @@ class Agent(Generic[TContext]):
|
|
|
241
250
|
|
|
242
251
|
return None
|
|
243
252
|
|
|
253
|
+
async def get_prompt(
|
|
254
|
+
self, run_context: RunContextWrapper[TContext]
|
|
255
|
+
) -> ResponsePromptParam | None:
|
|
256
|
+
"""Get the prompt for the agent."""
|
|
257
|
+
return await PromptUtil.to_model_input(self.prompt, run_context, self)
|
|
258
|
+
|
|
244
259
|
async def get_mcp_tools(self) -> list[Tool]:
|
|
245
260
|
"""Fetches the available tools from the MCP servers."""
|
|
246
261
|
convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False)
|
|
247
262
|
return await MCPUtil.get_all_function_tools(self.mcp_servers, convert_schemas_to_strict)
|
|
248
263
|
|
|
249
|
-
async def get_all_tools(self) -> list[Tool]:
|
|
264
|
+
async def get_all_tools(self, run_context: RunContextWrapper[Any]) -> list[Tool]:
|
|
250
265
|
"""All agent tools, including MCP tools and function tools."""
|
|
251
266
|
mcp_tools = await self.get_mcp_tools()
|
|
252
|
-
|
|
267
|
+
|
|
268
|
+
async def _check_tool_enabled(tool: Tool) -> bool:
|
|
269
|
+
if not isinstance(tool, FunctionTool):
|
|
270
|
+
return True
|
|
271
|
+
|
|
272
|
+
attr = tool.is_enabled
|
|
273
|
+
if isinstance(attr, bool):
|
|
274
|
+
return attr
|
|
275
|
+
res = attr(run_context, self)
|
|
276
|
+
if inspect.isawaitable(res):
|
|
277
|
+
return bool(await res)
|
|
278
|
+
return bool(res)
|
|
279
|
+
|
|
280
|
+
results = await asyncio.gather(*(_check_tool_enabled(t) for t in self.tools))
|
|
281
|
+
enabled: list[Tool] = [t for t, ok in zip(self.tools, results) if ok]
|
|
282
|
+
return [*mcp_tools, *enabled]
|
|
@@ -38,7 +38,7 @@ class AgentOutputSchemaBase(abc.ABC):
|
|
|
38
38
|
@abc.abstractmethod
|
|
39
39
|
def is_strict_json_schema(self) -> bool:
|
|
40
40
|
"""Whether the JSON schema is in strict mode. Strict mode constrains the JSON schema
|
|
41
|
-
features, but guarantees
|
|
41
|
+
features, but guarantees valid JSON. See here for details:
|
|
42
42
|
https://platform.openai.com/docs/guides/structured-outputs#supported-schemas
|
|
43
43
|
"""
|
|
44
44
|
pass
|