openai-agents 0.0.17__tar.gz → 0.0.19__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- {openai_agents-0.0.17 → openai_agents-0.0.19}/PKG-INFO +6 -3
- {openai_agents-0.0.17 → openai_agents-0.0.19}/README.md +3 -0
- openai_agents-0.0.19/docs/ja/repl.md +22 -0
- openai_agents-0.0.19/docs/ref/repl.md +6 -0
- openai_agents-0.0.19/docs/release.md +18 -0
- openai_agents-0.0.19/docs/repl.md +19 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/tools.md +27 -0
- openai_agents-0.0.19/examples/basic/prompt_template.py +79 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/mkdocs.yml +4 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/pyproject.toml +3 -3
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/__init__.py +8 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/_run_impl.py +7 -5
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/agent.py +14 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/extensions/models/litellm_model.py +11 -1
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/function_schema.py +7 -5
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/handoffs.py +2 -2
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/mcp/server.py +4 -4
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/model_settings.py +15 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/models/interface.py +6 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/models/openai_chatcompletions.py +9 -1
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/models/openai_responses.py +10 -0
- openai_agents-0.0.19/src/agents/prompts.py +76 -0
- openai_agents-0.0.19/src/agents/repl.py +65 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/run.py +221 -97
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/tool.py +10 -4
- openai_agents-0.0.19/src/agents/tool_context.py +29 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/tracing/__init__.py +11 -5
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/tracing/create.py +16 -16
- openai_agents-0.0.17/src/agents/tracing/setup.py → openai_agents-0.0.19/src/agents/tracing/provider.py +88 -8
- openai_agents-0.0.19/src/agents/tracing/setup.py +21 -0
- openai_agents-0.0.19/src/agents/tracing/util.py +21 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/conftest.py +8 -2
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/fake_model.py +2 -0
- openai_agents-0.0.19/tests/model_settings/test_serialization.py +133 -0
- openai_agents-0.0.19/tests/models/test_kwargs_functionality.py +177 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/models/test_litellm_chatcompletions_stream.py +3 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/models/test_map.py +5 -4
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_agent_config.py +6 -5
- openai_agents-0.0.19/tests/test_agent_prompt.py +97 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_function_schema.py +12 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_function_tool.py +17 -16
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_function_tool_decorator.py +5 -4
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_handoff_tool.py +13 -5
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_openai_chatcompletions.py +4 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_openai_chatcompletions_stream.py +3 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_output_tool.py +8 -8
- openai_agents-0.0.19/tests/test_repl.py +28 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_responses.py +4 -2
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_responses_tracing.py +6 -0
- openai_agents-0.0.19/tests/test_run.py +26 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_run_config.py +1 -1
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_run_step_execution.py +42 -3
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_run_step_processing.py +6 -6
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/voice/test_workflow.py +6 -2
- {openai_agents-0.0.17 → openai_agents-0.0.19}/uv.lock +1691 -1691
- openai_agents-0.0.17/src/agents/tracing/util.py +0 -22
- openai_agents-0.0.17/tests/model_settings/test_serialization.py +0 -59
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.github/ISSUE_TEMPLATE/model_provider.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.github/ISSUE_TEMPLATE/question.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.github/workflows/docs.yml +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.github/workflows/issues.yml +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.github/workflows/publish.yml +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.github/workflows/tests.yml +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.gitignore +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.prettierrc +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/.vscode/settings.json +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/AGENTS.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/LICENSE +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/Makefile +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/agents.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/assets/images/favicon-platform.svg +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/assets/images/graph.png +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/assets/images/mcp-tracing.jpg +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/assets/images/orchestration.png +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/assets/logo.svg +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/config.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/context.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/examples.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/guardrails.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/handoffs.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/index.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/agents.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/config.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/context.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/examples.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/guardrails.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/handoffs.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/index.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/mcp.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/models/index.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/models/litellm.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/models.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/multi_agent.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/quickstart.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/results.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/running_agents.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/streaming.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/tools.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/tracing.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/visualization.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/voice/pipeline.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/voice/quickstart.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ja/voice/tracing.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/mcp.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/models/index.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/models/litellm.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/multi_agent.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/quickstart.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/agent.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/agent_output.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/exceptions.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/extensions/handoff_filters.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/extensions/handoff_prompt.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/extensions/litellm.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/function_schema.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/guardrail.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/handoffs.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/index.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/items.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/lifecycle.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/mcp/server.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/mcp/util.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/model_settings.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/models/interface.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/models/openai_chatcompletions.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/models/openai_responses.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/result.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/run.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/run_context.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/stream_events.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/tool.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/tracing/create.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/tracing/index.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/tracing/processor_interface.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/tracing/processors.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/tracing/scope.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/tracing/setup.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/tracing/span_data.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/tracing/spans.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/tracing/traces.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/tracing/util.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/usage.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/events.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/exceptions.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/input.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/model.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/models/openai_provider.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/models/openai_stt.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/models/openai_tts.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/pipeline.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/pipeline_config.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/result.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/utils.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/ref/voice/workflow.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/results.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/running_agents.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/scripts/translate_docs.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/streaming.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/stylesheets/extra.css +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/tracing.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/visualization.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/voice/pipeline.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/voice/quickstart.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/docs/voice/tracing.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/agent_patterns/README.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/agent_patterns/agents_as_tools.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/agent_patterns/deterministic.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/agent_patterns/forcing_tool_use.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/agent_patterns/input_guardrails.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/agent_patterns/llm_as_a_judge.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/agent_patterns/output_guardrails.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/agent_patterns/parallelization.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/agent_patterns/routing.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/agent_patterns/streaming_guardrails.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/agent_lifecycle_example.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/dynamic_system_prompt.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/hello_world.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/hello_world_jupyter.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/lifecycle_example.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/local_image.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/media/image_bison.jpg +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/non_strict_output_type.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/previous_response_id.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/remote_image.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/stream_items.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/stream_text.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/basic/tools.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/customer_service/main.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/README.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/agents/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/agents/financials_agent.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/agents/planner_agent.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/agents/risk_agent.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/agents/search_agent.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/agents/verifier_agent.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/agents/writer_agent.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/main.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/manager.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/financial_research_agent/printer.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/handoffs/message_filter.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/handoffs/message_filter_streaming.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/hosted_mcp/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/hosted_mcp/approvals.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/hosted_mcp/simple.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/filesystem_example/README.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/filesystem_example/main.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/filesystem_example/sample_files/favorite_books.txt +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/filesystem_example/sample_files/favorite_cities.txt +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/filesystem_example/sample_files/favorite_songs.txt +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/git_example/README.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/git_example/main.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/sse_example/README.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/sse_example/main.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/sse_example/server.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/streamablehttp_example/README.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/streamablehttp_example/main.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/mcp/streamablehttp_example/server.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/model_providers/README.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/model_providers/custom_example_agent.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/model_providers/custom_example_global.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/model_providers/custom_example_provider.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/model_providers/litellm_auto.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/model_providers/litellm_provider.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/README.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/agents/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/agents/planner_agent.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/agents/search_agent.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/agents/writer_agent.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/main.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/manager.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/printer.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/sample_outputs/product_recs.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/sample_outputs/product_recs.txt +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/sample_outputs/vacation.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/research_bot/sample_outputs/vacation.txt +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/tools/code_interpreter.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/tools/computer_use.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/tools/file_search.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/tools/image_generator.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/tools/web_search.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/voice/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/voice/static/README.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/voice/static/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/voice/static/main.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/voice/static/util.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/voice/streamed/README.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/voice/streamed/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/voice/streamed/main.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/examples/voice/streamed/my_workflow.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/_config.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/_debug.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/agent_output.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/computer.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/exceptions.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/extensions/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/extensions/handoff_filters.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/extensions/handoff_prompt.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/extensions/models/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/extensions/models/litellm_provider.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/extensions/visualization.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/guardrail.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/items.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/lifecycle.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/logger.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/mcp/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/mcp/util.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/models/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/models/_openai_shared.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/models/chatcmpl_converter.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/models/chatcmpl_helpers.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/models/chatcmpl_stream_handler.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/models/fake_id.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/models/multi_provider.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/models/openai_provider.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/py.typed +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/result.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/run_context.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/stream_events.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/strict_schema.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/tracing/logger.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/tracing/processor_interface.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/tracing/processors.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/tracing/scope.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/tracing/span_data.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/tracing/spans.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/tracing/traces.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/usage.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/util/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/util/_coro.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/util/_error_tracing.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/util/_json.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/util/_pretty_print.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/util/_transforms.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/util/_types.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/version.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/events.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/exceptions.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/imports.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/input.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/model.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/models/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/models/openai_model_provider.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/models/openai_stt.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/models/openai_tts.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/pipeline.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/pipeline_config.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/result.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/utils.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/src/agents/voice/workflow.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/README.md +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/fastapi/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/fastapi/streaming_app.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/fastapi/test_streaming_context.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/mcp/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/mcp/conftest.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/mcp/helpers.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/mcp/test_caching.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/mcp/test_connect_disconnect.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/mcp/test_mcp_tracing.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/mcp/test_mcp_util.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/mcp/test_runner_calls_mcp.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/mcp/test_server_errors.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/models/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/models/conftest.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/models/test_litellm_extra_body.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_agent_hooks.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_agent_runner.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_agent_runner_streamed.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_agent_tracing.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_cancel_streaming.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_computer_action.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_config.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_doc_parsing.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_extension_filters.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_extra_headers.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_global_hooks.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_guardrails.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_items_helpers.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_max_turns.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_openai_chatcompletions_converter.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_openai_responses_converter.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_pretty_print.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_result_cast.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_run_error_details.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_strict_schema.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_tool_choice_reset.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_tool_converter.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_tool_use_behavior.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_trace_processor.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_tracing.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_tracing_errors.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_tracing_errors_streamed.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_usage.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/test_visualization.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/testing_processor.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/tracing/test_processor_api_key.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/voice/__init__.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/voice/conftest.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/voice/fake_models.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/voice/helpers.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/voice/test_input.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/voice/test_openai_stt.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/voice/test_openai_tts.py +0 -0
- {openai_agents-0.0.17 → openai_agents-0.0.19}/tests/voice/test_pipeline.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.19
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://github.com/openai/openai-agents-python
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -19,8 +19,8 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
|
19
19
|
Classifier: Typing :: Typed
|
|
20
20
|
Requires-Python: >=3.9
|
|
21
21
|
Requires-Dist: griffe<2,>=1.5.6
|
|
22
|
-
Requires-Dist: mcp<2,>=1.
|
|
23
|
-
Requires-Dist: openai>=1.
|
|
22
|
+
Requires-Dist: mcp<2,>=1.9.4; python_version >= '3.10'
|
|
23
|
+
Requires-Dist: openai>=1.87.0
|
|
24
24
|
Requires-Dist: pydantic<3,>=2.10
|
|
25
25
|
Requires-Dist: requests<3,>=2.0
|
|
26
26
|
Requires-Dist: types-requests<3,>=2.0
|
|
@@ -40,6 +40,9 @@ The OpenAI Agents SDK is a lightweight yet powerful framework for building multi
|
|
|
40
40
|
|
|
41
41
|
<img src="https://cdn.openai.com/API/docs/images/orchestration.png" alt="Image of the Agents Tracing UI" style="max-height: 803px;">
|
|
42
42
|
|
|
43
|
+
> [!NOTE]
|
|
44
|
+
> Looking for the JavaScript/TypeScript version? Check out [Agents SDK JS/TS](https://github.com/openai/openai-agents-js).
|
|
45
|
+
|
|
43
46
|
### Core concepts:
|
|
44
47
|
|
|
45
48
|
1. [**Agents**](https://openai.github.io/openai-agents-python/agents): LLMs configured with instructions, tools, guardrails, and handoffs
|
|
@@ -4,6 +4,9 @@ The OpenAI Agents SDK is a lightweight yet powerful framework for building multi
|
|
|
4
4
|
|
|
5
5
|
<img src="https://cdn.openai.com/API/docs/images/orchestration.png" alt="Image of the Agents Tracing UI" style="max-height: 803px;">
|
|
6
6
|
|
|
7
|
+
> [!NOTE]
|
|
8
|
+
> Looking for the JavaScript/TypeScript version? Check out [Agents SDK JS/TS](https://github.com/openai/openai-agents-js).
|
|
9
|
+
|
|
7
10
|
### Core concepts:
|
|
8
11
|
|
|
9
12
|
1. [**Agents**](https://openai.github.io/openai-agents-python/agents): LLMs configured with instructions, tools, guardrails, and handoffs
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
---
|
|
2
|
+
search:
|
|
3
|
+
exclude: true
|
|
4
|
+
---
|
|
5
|
+
# REPL ユーティリティ
|
|
6
|
+
|
|
7
|
+
`run_demo_loop` を使うと、ターミナルから手軽にエージェントを試せます。
|
|
8
|
+
|
|
9
|
+
```python
|
|
10
|
+
import asyncio
|
|
11
|
+
from agents import Agent, run_demo_loop
|
|
12
|
+
|
|
13
|
+
async def main() -> None:
|
|
14
|
+
agent = Agent(name="Assistant", instructions="あなたは親切なアシスタントです")
|
|
15
|
+
await run_demo_loop(agent)
|
|
16
|
+
|
|
17
|
+
if __name__ == "__main__":
|
|
18
|
+
asyncio.run(main())
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
`run_demo_loop` は入力を繰り返し受け取り、会話履歴を保持したままエージェントを実行します。既定ではストリーミング出力を表示します。
|
|
22
|
+
`quit` または `exit` と入力するか `Ctrl-D` を押すと終了します。
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# Release process
|
|
2
|
+
|
|
3
|
+
The project follows a slightly modified version of semantic versioning using the form `0.Y.Z`. The leading `0` indicates the SDK is still evolving rapidly. Increment the components as follows:
|
|
4
|
+
|
|
5
|
+
## Minor (`Y`) versions
|
|
6
|
+
|
|
7
|
+
We will increase minor versions `Y` for **breaking changes** to any public interfaces that are not marked as beta. For example, going from `0.0.x` to `0.1.x` might include breaking changes.
|
|
8
|
+
|
|
9
|
+
If you don't want breaking changes, we recommend pinning to `0.0.x` versions in your project.
|
|
10
|
+
|
|
11
|
+
## Patch (`Z`) versions
|
|
12
|
+
|
|
13
|
+
We will increment `Z` for non-breaking changes:
|
|
14
|
+
|
|
15
|
+
- Bug fixes
|
|
16
|
+
- New features
|
|
17
|
+
- Changes to private interfaces
|
|
18
|
+
- Updates to beta features
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# REPL utility
|
|
2
|
+
|
|
3
|
+
The SDK provides `run_demo_loop` for quick interactive testing.
|
|
4
|
+
|
|
5
|
+
```python
|
|
6
|
+
import asyncio
|
|
7
|
+
from agents import Agent, run_demo_loop
|
|
8
|
+
|
|
9
|
+
async def main() -> None:
|
|
10
|
+
agent = Agent(name="Assistant", instructions="You are a helpful assistant.")
|
|
11
|
+
await run_demo_loop(agent)
|
|
12
|
+
|
|
13
|
+
if __name__ == "__main__":
|
|
14
|
+
asyncio.run(main())
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
`run_demo_loop` prompts for user input in a loop, keeping the conversation
|
|
18
|
+
history between turns. By default it streams model output as it is produced.
|
|
19
|
+
Type `quit` or `exit` (or press `Ctrl-D`) to leave the loop.
|
|
@@ -284,6 +284,33 @@ async def run_my_agent() -> str:
|
|
|
284
284
|
return str(result.final_output)
|
|
285
285
|
```
|
|
286
286
|
|
|
287
|
+
### Custom output extraction
|
|
288
|
+
|
|
289
|
+
In certain cases, you might want to modify the output of the tool-agents before returning it to the central agent. This may be useful if you want to:
|
|
290
|
+
|
|
291
|
+
- Extract a specific piece of information (e.g., a JSON payload) from the sub-agent's chat history.
|
|
292
|
+
- Convert or reformat the agent’s final answer (e.g., transform Markdown into plain text or CSV).
|
|
293
|
+
- Validate the output or provide a fallback value when the agent’s response is missing or malformed.
|
|
294
|
+
|
|
295
|
+
You can do this by supplying the `custom_output_extractor` argument to the `as_tool` method:
|
|
296
|
+
|
|
297
|
+
```python
|
|
298
|
+
async def extract_json_payload(run_result: RunResult) -> str:
|
|
299
|
+
# Scan the agent’s outputs in reverse order until we find a JSON-like message from a tool call.
|
|
300
|
+
for item in reversed(run_result.new_items):
|
|
301
|
+
if isinstance(item, ToolCallOutputItem) and item.output.strip().startswith("{"):
|
|
302
|
+
return item.output.strip()
|
|
303
|
+
# Fallback to an empty JSON object if nothing was found
|
|
304
|
+
return "{}"
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
json_tool = data_agent.as_tool(
|
|
308
|
+
tool_name="get_data_json",
|
|
309
|
+
tool_description="Run the data agent and return only its JSON payload",
|
|
310
|
+
custom_output_extractor=extract_json_payload,
|
|
311
|
+
)
|
|
312
|
+
```
|
|
313
|
+
|
|
287
314
|
## Handling errors in function tools
|
|
288
315
|
|
|
289
316
|
When you create a function tool via `@function_tool`, you can pass a `failure_error_function`. This is a function that provides an error response to the LLM in case the tool call crashes.
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import asyncio
|
|
3
|
+
import random
|
|
4
|
+
|
|
5
|
+
from agents import Agent, GenerateDynamicPromptData, Runner
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
NOTE: This example will not work out of the box, because the default prompt ID will not be available
|
|
9
|
+
in your project.
|
|
10
|
+
|
|
11
|
+
To use it, please:
|
|
12
|
+
1. Go to https://platform.openai.com/playground/prompts
|
|
13
|
+
2. Create a new prompt variable, `poem_style`.
|
|
14
|
+
3. Create a system prompt with the content:
|
|
15
|
+
```
|
|
16
|
+
Write a poem in {{poem_style}}
|
|
17
|
+
```
|
|
18
|
+
4. Run the example with the `--prompt-id` flag.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
DEFAULT_PROMPT_ID = "pmpt_6850729e8ba481939fd439e058c69ee004afaa19c520b78b"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class DynamicContext:
|
|
25
|
+
def __init__(self, prompt_id: str):
|
|
26
|
+
self.prompt_id = prompt_id
|
|
27
|
+
self.poem_style = random.choice(["limerick", "haiku", "ballad"])
|
|
28
|
+
print(f"[debug] DynamicContext initialized with poem_style: {self.poem_style}")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
async def _get_dynamic_prompt(data: GenerateDynamicPromptData):
|
|
32
|
+
ctx: DynamicContext = data.context.context
|
|
33
|
+
return {
|
|
34
|
+
"id": ctx.prompt_id,
|
|
35
|
+
"version": "1",
|
|
36
|
+
"variables": {
|
|
37
|
+
"poem_style": ctx.poem_style,
|
|
38
|
+
},
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
async def dynamic_prompt(prompt_id: str):
|
|
43
|
+
context = DynamicContext(prompt_id)
|
|
44
|
+
|
|
45
|
+
agent = Agent(
|
|
46
|
+
name="Assistant",
|
|
47
|
+
prompt=_get_dynamic_prompt,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
result = await Runner.run(agent, "Tell me about recursion in programming.", context=context)
|
|
51
|
+
print(result.final_output)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
async def static_prompt(prompt_id: str):
|
|
55
|
+
agent = Agent(
|
|
56
|
+
name="Assistant",
|
|
57
|
+
prompt={
|
|
58
|
+
"id": prompt_id,
|
|
59
|
+
"version": "1",
|
|
60
|
+
"variables": {
|
|
61
|
+
"poem_style": "limerick",
|
|
62
|
+
},
|
|
63
|
+
},
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
result = await Runner.run(agent, "Tell me about recursion in programming.")
|
|
67
|
+
print(result.final_output)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
if __name__ == "__main__":
|
|
71
|
+
parser = argparse.ArgumentParser()
|
|
72
|
+
parser.add_argument("--dynamic", action="store_true")
|
|
73
|
+
parser.add_argument("--prompt-id", type=str, default=DEFAULT_PROMPT_ID)
|
|
74
|
+
args = parser.parse_args()
|
|
75
|
+
|
|
76
|
+
if args.dynamic:
|
|
77
|
+
asyncio.run(dynamic_prompt(args.prompt_id))
|
|
78
|
+
else:
|
|
79
|
+
asyncio.run(static_prompt(args.prompt_id))
|
|
@@ -59,6 +59,7 @@ plugins:
|
|
|
59
59
|
- running_agents.md
|
|
60
60
|
- results.md
|
|
61
61
|
- streaming.md
|
|
62
|
+
- repl.md
|
|
62
63
|
- tools.md
|
|
63
64
|
- mcp.md
|
|
64
65
|
- handoffs.md
|
|
@@ -71,6 +72,7 @@ plugins:
|
|
|
71
72
|
- models/litellm.md
|
|
72
73
|
- config.md
|
|
73
74
|
- visualization.md
|
|
75
|
+
- release.md
|
|
74
76
|
- Voice agents:
|
|
75
77
|
- voice/quickstart.md
|
|
76
78
|
- voice/pipeline.md
|
|
@@ -80,6 +82,7 @@ plugins:
|
|
|
80
82
|
- ref/index.md
|
|
81
83
|
- ref/agent.md
|
|
82
84
|
- ref/run.md
|
|
85
|
+
- ref/repl.md
|
|
83
86
|
- ref/tool.md
|
|
84
87
|
- ref/result.md
|
|
85
88
|
- ref/stream_events.md
|
|
@@ -139,6 +142,7 @@ plugins:
|
|
|
139
142
|
- running_agents.md
|
|
140
143
|
- results.md
|
|
141
144
|
- streaming.md
|
|
145
|
+
- repl.md
|
|
142
146
|
- tools.md
|
|
143
147
|
- mcp.md
|
|
144
148
|
- handoffs.md
|
|
@@ -1,19 +1,19 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "openai-agents"
|
|
3
|
-
version = "0.0.
|
|
3
|
+
version = "0.0.19"
|
|
4
4
|
description = "OpenAI Agents SDK"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.9"
|
|
7
7
|
license = "MIT"
|
|
8
8
|
authors = [{ name = "OpenAI", email = "support@openai.com" }]
|
|
9
9
|
dependencies = [
|
|
10
|
-
"openai>=1.
|
|
10
|
+
"openai>=1.87.0",
|
|
11
11
|
"pydantic>=2.10, <3",
|
|
12
12
|
"griffe>=1.5.6, <2",
|
|
13
13
|
"typing-extensions>=4.12.2, <5",
|
|
14
14
|
"requests>=2.0, <3",
|
|
15
15
|
"types-requests>=2.0, <3",
|
|
16
|
-
"mcp>=1.
|
|
16
|
+
"mcp>=1.9.4, <2; python_version >= '3.10'",
|
|
17
17
|
]
|
|
18
18
|
classifiers = [
|
|
19
19
|
"Typing :: Typed",
|
|
@@ -45,6 +45,8 @@ from .models.interface import Model, ModelProvider, ModelTracing
|
|
|
45
45
|
from .models.openai_chatcompletions import OpenAIChatCompletionsModel
|
|
46
46
|
from .models.openai_provider import OpenAIProvider
|
|
47
47
|
from .models.openai_responses import OpenAIResponsesModel
|
|
48
|
+
from .prompts import DynamicPromptFunction, GenerateDynamicPromptData, Prompt
|
|
49
|
+
from .repl import run_demo_loop
|
|
48
50
|
from .result import RunResult, RunResultStreaming
|
|
49
51
|
from .run import RunConfig, Runner
|
|
50
52
|
from .run_context import RunContextWrapper, TContext
|
|
@@ -102,6 +104,7 @@ from .tracing import (
|
|
|
102
104
|
handoff_span,
|
|
103
105
|
mcp_tools_span,
|
|
104
106
|
set_trace_processors,
|
|
107
|
+
set_trace_provider,
|
|
105
108
|
set_tracing_disabled,
|
|
106
109
|
set_tracing_export_api_key,
|
|
107
110
|
speech_group_span,
|
|
@@ -160,6 +163,7 @@ __all__ = [
|
|
|
160
163
|
"ToolsToFinalOutputFunction",
|
|
161
164
|
"ToolsToFinalOutputResult",
|
|
162
165
|
"Runner",
|
|
166
|
+
"run_demo_loop",
|
|
163
167
|
"Model",
|
|
164
168
|
"ModelProvider",
|
|
165
169
|
"ModelTracing",
|
|
@@ -176,6 +180,9 @@ __all__ = [
|
|
|
176
180
|
"AgentsException",
|
|
177
181
|
"InputGuardrailTripwireTriggered",
|
|
178
182
|
"OutputGuardrailTripwireTriggered",
|
|
183
|
+
"DynamicPromptFunction",
|
|
184
|
+
"GenerateDynamicPromptData",
|
|
185
|
+
"Prompt",
|
|
179
186
|
"MaxTurnsExceeded",
|
|
180
187
|
"ModelBehaviorError",
|
|
181
188
|
"UserError",
|
|
@@ -240,6 +247,7 @@ __all__ = [
|
|
|
240
247
|
"guardrail_span",
|
|
241
248
|
"handoff_span",
|
|
242
249
|
"set_trace_processors",
|
|
250
|
+
"set_trace_provider",
|
|
243
251
|
"set_tracing_disabled",
|
|
244
252
|
"speech_group_span",
|
|
245
253
|
"transcription_span",
|
|
@@ -75,6 +75,7 @@ from .tool import (
|
|
|
75
75
|
MCPToolApprovalRequest,
|
|
76
76
|
Tool,
|
|
77
77
|
)
|
|
78
|
+
from .tool_context import ToolContext
|
|
78
79
|
from .tracing import (
|
|
79
80
|
SpanError,
|
|
80
81
|
Trace,
|
|
@@ -543,23 +544,24 @@ class RunImpl:
|
|
|
543
544
|
func_tool: FunctionTool, tool_call: ResponseFunctionToolCall
|
|
544
545
|
) -> Any:
|
|
545
546
|
with function_span(func_tool.name) as span_fn:
|
|
547
|
+
tool_context = ToolContext.from_agent_context(context_wrapper, tool_call.call_id)
|
|
546
548
|
if config.trace_include_sensitive_data:
|
|
547
549
|
span_fn.span_data.input = tool_call.arguments
|
|
548
550
|
try:
|
|
549
551
|
_, _, result = await asyncio.gather(
|
|
550
|
-
hooks.on_tool_start(
|
|
552
|
+
hooks.on_tool_start(tool_context, agent, func_tool),
|
|
551
553
|
(
|
|
552
|
-
agent.hooks.on_tool_start(
|
|
554
|
+
agent.hooks.on_tool_start(tool_context, agent, func_tool)
|
|
553
555
|
if agent.hooks
|
|
554
556
|
else _coro.noop_coroutine()
|
|
555
557
|
),
|
|
556
|
-
func_tool.on_invoke_tool(
|
|
558
|
+
func_tool.on_invoke_tool(tool_context, tool_call.arguments),
|
|
557
559
|
)
|
|
558
560
|
|
|
559
561
|
await asyncio.gather(
|
|
560
|
-
hooks.on_tool_end(
|
|
562
|
+
hooks.on_tool_end(tool_context, agent, func_tool, result),
|
|
561
563
|
(
|
|
562
|
-
agent.hooks.on_tool_end(
|
|
564
|
+
agent.hooks.on_tool_end(tool_context, agent, func_tool, result)
|
|
563
565
|
if agent.hooks
|
|
564
566
|
else _coro.noop_coroutine()
|
|
565
567
|
),
|
|
@@ -7,6 +7,7 @@ from collections.abc import Awaitable
|
|
|
7
7
|
from dataclasses import dataclass, field
|
|
8
8
|
from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, cast
|
|
9
9
|
|
|
10
|
+
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
|
10
11
|
from typing_extensions import NotRequired, TypeAlias, TypedDict
|
|
11
12
|
|
|
12
13
|
from .agent_output import AgentOutputSchemaBase
|
|
@@ -17,6 +18,7 @@ from .logger import logger
|
|
|
17
18
|
from .mcp import MCPUtil
|
|
18
19
|
from .model_settings import ModelSettings
|
|
19
20
|
from .models.interface import Model
|
|
21
|
+
from .prompts import DynamicPromptFunction, Prompt, PromptUtil
|
|
20
22
|
from .run_context import RunContextWrapper, TContext
|
|
21
23
|
from .tool import FunctionTool, FunctionToolResult, Tool, function_tool
|
|
22
24
|
from .util import _transforms
|
|
@@ -95,6 +97,12 @@ class Agent(Generic[TContext]):
|
|
|
95
97
|
return a string.
|
|
96
98
|
"""
|
|
97
99
|
|
|
100
|
+
prompt: Prompt | DynamicPromptFunction | None = None
|
|
101
|
+
"""A prompt object (or a function that returns a Prompt). Prompts allow you to dynamically
|
|
102
|
+
configure the instructions, tools and other config for an agent outside of your code. Only
|
|
103
|
+
usable with OpenAI models, using the Responses API.
|
|
104
|
+
"""
|
|
105
|
+
|
|
98
106
|
handoff_description: str | None = None
|
|
99
107
|
"""A description of the agent. This is used when the agent is used as a handoff, so that an
|
|
100
108
|
LLM knows what it does and when to invoke it.
|
|
@@ -242,6 +250,12 @@ class Agent(Generic[TContext]):
|
|
|
242
250
|
|
|
243
251
|
return None
|
|
244
252
|
|
|
253
|
+
async def get_prompt(
|
|
254
|
+
self, run_context: RunContextWrapper[TContext]
|
|
255
|
+
) -> ResponsePromptParam | None:
|
|
256
|
+
"""Get the prompt for the agent."""
|
|
257
|
+
return await PromptUtil.to_model_input(self.prompt, run_context, self)
|
|
258
|
+
|
|
245
259
|
async def get_mcp_tools(self) -> list[Tool]:
|
|
246
260
|
"""Fetches the available tools from the MCP servers."""
|
|
247
261
|
convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False)
|
|
@@ -71,6 +71,7 @@ class LitellmModel(Model):
|
|
|
71
71
|
handoffs: list[Handoff],
|
|
72
72
|
tracing: ModelTracing,
|
|
73
73
|
previous_response_id: str | None,
|
|
74
|
+
prompt: Any | None = None,
|
|
74
75
|
) -> ModelResponse:
|
|
75
76
|
with generation_span(
|
|
76
77
|
model=str(self.model),
|
|
@@ -88,6 +89,7 @@ class LitellmModel(Model):
|
|
|
88
89
|
span_generation,
|
|
89
90
|
tracing,
|
|
90
91
|
stream=False,
|
|
92
|
+
prompt=prompt,
|
|
91
93
|
)
|
|
92
94
|
|
|
93
95
|
assert isinstance(response.choices[0], litellm.types.utils.Choices)
|
|
@@ -153,8 +155,8 @@ class LitellmModel(Model):
|
|
|
153
155
|
output_schema: AgentOutputSchemaBase | None,
|
|
154
156
|
handoffs: list[Handoff],
|
|
155
157
|
tracing: ModelTracing,
|
|
156
|
-
*,
|
|
157
158
|
previous_response_id: str | None,
|
|
159
|
+
prompt: Any | None = None,
|
|
158
160
|
) -> AsyncIterator[TResponseStreamEvent]:
|
|
159
161
|
with generation_span(
|
|
160
162
|
model=str(self.model),
|
|
@@ -172,6 +174,7 @@ class LitellmModel(Model):
|
|
|
172
174
|
span_generation,
|
|
173
175
|
tracing,
|
|
174
176
|
stream=True,
|
|
177
|
+
prompt=prompt,
|
|
175
178
|
)
|
|
176
179
|
|
|
177
180
|
final_response: Response | None = None
|
|
@@ -202,6 +205,7 @@ class LitellmModel(Model):
|
|
|
202
205
|
span: Span[GenerationSpanData],
|
|
203
206
|
tracing: ModelTracing,
|
|
204
207
|
stream: Literal[True],
|
|
208
|
+
prompt: Any | None = None,
|
|
205
209
|
) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...
|
|
206
210
|
|
|
207
211
|
@overload
|
|
@@ -216,6 +220,7 @@ class LitellmModel(Model):
|
|
|
216
220
|
span: Span[GenerationSpanData],
|
|
217
221
|
tracing: ModelTracing,
|
|
218
222
|
stream: Literal[False],
|
|
223
|
+
prompt: Any | None = None,
|
|
219
224
|
) -> litellm.types.utils.ModelResponse: ...
|
|
220
225
|
|
|
221
226
|
async def _fetch_response(
|
|
@@ -229,6 +234,7 @@ class LitellmModel(Model):
|
|
|
229
234
|
span: Span[GenerationSpanData],
|
|
230
235
|
tracing: ModelTracing,
|
|
231
236
|
stream: bool = False,
|
|
237
|
+
prompt: Any | None = None,
|
|
232
238
|
) -> litellm.types.utils.ModelResponse | tuple[Response, AsyncStream[ChatCompletionChunk]]:
|
|
233
239
|
converted_messages = Converter.items_to_messages(input)
|
|
234
240
|
|
|
@@ -284,6 +290,10 @@ class LitellmModel(Model):
|
|
|
284
290
|
if model_settings.extra_body and isinstance(model_settings.extra_body, dict):
|
|
285
291
|
extra_kwargs.update(model_settings.extra_body)
|
|
286
292
|
|
|
293
|
+
# Add kwargs from model_settings.extra_args, filtering out None values
|
|
294
|
+
if model_settings.extra_args:
|
|
295
|
+
extra_kwargs.update(model_settings.extra_args)
|
|
296
|
+
|
|
287
297
|
ret = await litellm.acompletion(
|
|
288
298
|
model=self.model,
|
|
289
299
|
messages=converted_messages,
|
|
@@ -13,6 +13,7 @@ from pydantic import BaseModel, Field, create_model
|
|
|
13
13
|
from .exceptions import UserError
|
|
14
14
|
from .run_context import RunContextWrapper
|
|
15
15
|
from .strict_schema import ensure_strict_json_schema
|
|
16
|
+
from .tool_context import ToolContext
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
@dataclass
|
|
@@ -222,7 +223,8 @@ def function_schema(
|
|
|
222
223
|
doc_info = None
|
|
223
224
|
param_descs = {}
|
|
224
225
|
|
|
225
|
-
|
|
226
|
+
# Ensure name_override takes precedence even if docstring info is disabled.
|
|
227
|
+
func_name = name_override or (doc_info.name if doc_info else func.__name__)
|
|
226
228
|
|
|
227
229
|
# 2. Inspect function signature and get type hints
|
|
228
230
|
sig = inspect.signature(func)
|
|
@@ -237,21 +239,21 @@ def function_schema(
|
|
|
237
239
|
ann = type_hints.get(first_name, first_param.annotation)
|
|
238
240
|
if ann != inspect._empty:
|
|
239
241
|
origin = get_origin(ann) or ann
|
|
240
|
-
if origin is RunContextWrapper:
|
|
242
|
+
if origin is RunContextWrapper or origin is ToolContext:
|
|
241
243
|
takes_context = True # Mark that the function takes context
|
|
242
244
|
else:
|
|
243
245
|
filtered_params.append((first_name, first_param))
|
|
244
246
|
else:
|
|
245
247
|
filtered_params.append((first_name, first_param))
|
|
246
248
|
|
|
247
|
-
# For parameters other than the first, raise error if any use RunContextWrapper.
|
|
249
|
+
# For parameters other than the first, raise error if any use RunContextWrapper or ToolContext.
|
|
248
250
|
for name, param in params[1:]:
|
|
249
251
|
ann = type_hints.get(name, param.annotation)
|
|
250
252
|
if ann != inspect._empty:
|
|
251
253
|
origin = get_origin(ann) or ann
|
|
252
|
-
if origin is RunContextWrapper:
|
|
254
|
+
if origin is RunContextWrapper or origin is ToolContext:
|
|
253
255
|
raise UserError(
|
|
254
|
-
f"RunContextWrapper param found at non-first position in function"
|
|
256
|
+
f"RunContextWrapper/ToolContext param found at non-first position in function"
|
|
255
257
|
f" {func.__name__}"
|
|
256
258
|
)
|
|
257
259
|
filtered_params.append((name, param))
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import inspect
|
|
4
|
+
import json
|
|
4
5
|
from collections.abc import Awaitable
|
|
5
6
|
from dataclasses import dataclass
|
|
6
7
|
from typing import TYPE_CHECKING, Any, Callable, Generic, cast, overload
|
|
@@ -99,8 +100,7 @@ class Handoff(Generic[TContext]):
|
|
|
99
100
|
"""
|
|
100
101
|
|
|
101
102
|
def get_transfer_message(self, agent: Agent[Any]) -> str:
|
|
102
|
-
|
|
103
|
-
return base
|
|
103
|
+
return json.dumps({"assistant": agent.name})
|
|
104
104
|
|
|
105
105
|
@classmethod
|
|
106
106
|
def default_tool_name(cls, agent: Agent[Any]) -> str:
|
|
@@ -340,10 +340,10 @@ class MCPServerStreamableHttpParams(TypedDict):
|
|
|
340
340
|
headers: NotRequired[dict[str, str]]
|
|
341
341
|
"""The headers to send to the server."""
|
|
342
342
|
|
|
343
|
-
timeout: NotRequired[timedelta]
|
|
343
|
+
timeout: NotRequired[timedelta | float]
|
|
344
344
|
"""The timeout for the HTTP request. Defaults to 5 seconds."""
|
|
345
345
|
|
|
346
|
-
sse_read_timeout: NotRequired[timedelta]
|
|
346
|
+
sse_read_timeout: NotRequired[timedelta | float]
|
|
347
347
|
"""The timeout for the SSE connection, in seconds. Defaults to 5 minutes."""
|
|
348
348
|
|
|
349
349
|
terminate_on_close: NotRequired[bool]
|
|
@@ -401,8 +401,8 @@ class MCPServerStreamableHttp(_MCPServerWithClientSession):
|
|
|
401
401
|
return streamablehttp_client(
|
|
402
402
|
url=self.params["url"],
|
|
403
403
|
headers=self.params.get("headers", None),
|
|
404
|
-
timeout=self.params.get("timeout",
|
|
405
|
-
sse_read_timeout=self.params.get("sse_read_timeout",
|
|
404
|
+
timeout=self.params.get("timeout", 5),
|
|
405
|
+
sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5),
|
|
406
406
|
terminate_on_close=self.params.get("terminate_on_close", True),
|
|
407
407
|
)
|
|
408
408
|
|
|
@@ -73,6 +73,11 @@ class ModelSettings:
|
|
|
73
73
|
"""Additional headers to provide with the request.
|
|
74
74
|
Defaults to None if not provided."""
|
|
75
75
|
|
|
76
|
+
extra_args: dict[str, Any] | None = None
|
|
77
|
+
"""Arbitrary keyword arguments to pass to the model API call.
|
|
78
|
+
These will be passed directly to the underlying model provider's API.
|
|
79
|
+
Use with caution as not all models support all parameters."""
|
|
80
|
+
|
|
76
81
|
def resolve(self, override: ModelSettings | None) -> ModelSettings:
|
|
77
82
|
"""Produce a new ModelSettings by overlaying any non-None values from the
|
|
78
83
|
override on top of this instance."""
|
|
@@ -84,6 +89,16 @@ class ModelSettings:
|
|
|
84
89
|
for field in fields(self)
|
|
85
90
|
if getattr(override, field.name) is not None
|
|
86
91
|
}
|
|
92
|
+
|
|
93
|
+
# Handle extra_args merging specially - merge dictionaries instead of replacing
|
|
94
|
+
if self.extra_args is not None or override.extra_args is not None:
|
|
95
|
+
merged_args = {}
|
|
96
|
+
if self.extra_args:
|
|
97
|
+
merged_args.update(self.extra_args)
|
|
98
|
+
if override.extra_args:
|
|
99
|
+
merged_args.update(override.extra_args)
|
|
100
|
+
changes["extra_args"] = merged_args if merged_args else None
|
|
101
|
+
|
|
87
102
|
return replace(self, **changes)
|
|
88
103
|
|
|
89
104
|
def to_json_dict(self) -> dict[str, Any]:
|
|
@@ -5,6 +5,8 @@ import enum
|
|
|
5
5
|
from collections.abc import AsyncIterator
|
|
6
6
|
from typing import TYPE_CHECKING
|
|
7
7
|
|
|
8
|
+
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
|
9
|
+
|
|
8
10
|
from ..agent_output import AgentOutputSchemaBase
|
|
9
11
|
from ..handoffs import Handoff
|
|
10
12
|
from ..items import ModelResponse, TResponseInputItem, TResponseStreamEvent
|
|
@@ -46,6 +48,7 @@ class Model(abc.ABC):
|
|
|
46
48
|
tracing: ModelTracing,
|
|
47
49
|
*,
|
|
48
50
|
previous_response_id: str | None,
|
|
51
|
+
prompt: ResponsePromptParam | None,
|
|
49
52
|
) -> ModelResponse:
|
|
50
53
|
"""Get a response from the model.
|
|
51
54
|
|
|
@@ -59,6 +62,7 @@ class Model(abc.ABC):
|
|
|
59
62
|
tracing: Tracing configuration.
|
|
60
63
|
previous_response_id: the ID of the previous response. Generally not used by the model,
|
|
61
64
|
except for the OpenAI Responses API.
|
|
65
|
+
prompt: The prompt config to use for the model.
|
|
62
66
|
|
|
63
67
|
Returns:
|
|
64
68
|
The full model response.
|
|
@@ -77,6 +81,7 @@ class Model(abc.ABC):
|
|
|
77
81
|
tracing: ModelTracing,
|
|
78
82
|
*,
|
|
79
83
|
previous_response_id: str | None,
|
|
84
|
+
prompt: ResponsePromptParam | None,
|
|
80
85
|
) -> AsyncIterator[TResponseStreamEvent]:
|
|
81
86
|
"""Stream a response from the model.
|
|
82
87
|
|
|
@@ -90,6 +95,7 @@ class Model(abc.ABC):
|
|
|
90
95
|
tracing: Tracing configuration.
|
|
91
96
|
previous_response_id: the ID of the previous response. Generally not used by the model,
|
|
92
97
|
except for the OpenAI Responses API.
|
|
98
|
+
prompt: The prompt config to use for the model.
|
|
93
99
|
|
|
94
100
|
Returns:
|
|
95
101
|
An iterator of response stream events, in OpenAI Responses format.
|