openai-agents 0.0.12__tar.gz → 0.0.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- {openai_agents-0.0.12 → openai_agents-0.0.14}/PKG-INFO +3 -3
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/tracing.md +1 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/main.py +1 -1
- {openai_agents-0.0.12 → openai_agents-0.0.14}/pyproject.toml +3 -3
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/extensions/models/litellm_model.py +3 -4
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/mcp/server.py +22 -4
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/model_settings.py +21 -2
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/models/chatcmpl_stream_handler.py +4 -2
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/models/openai_chatcompletions.py +3 -6
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/models/openai_responses.py +1 -1
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/result.py +16 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/run.py +3 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/__init__.py +2 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/model.py +3 -4
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/fake_model.py +20 -4
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/mcp/test_server_errors.py +1 -1
- openai_agents-0.0.14/tests/model_settings/test_serialization.py +59 -0
- openai_agents-0.0.14/tests/models/test_litellm_chatcompletions_stream.py +286 -0
- openai_agents-0.0.14/tests/test_cancel_streaming.py +116 -0
- openai_agents-0.0.14/tests/test_extra_headers.py +92 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_result_cast.py +2 -1
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/voice/conftest.py +0 -1
- {openai_agents-0.0.12 → openai_agents-0.0.14}/uv.lock +8 -11
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.github/ISSUE_TEMPLATE/model_provider.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.github/ISSUE_TEMPLATE/question.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.github/workflows/docs.yml +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.github/workflows/issues.yml +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.github/workflows/publish.yml +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.github/workflows/tests.yml +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.gitignore +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.prettierrc +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/.vscode/settings.json +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/LICENSE +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/Makefile +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/README.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/agents.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/assets/images/favicon-platform.svg +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/assets/images/graph.png +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/assets/images/mcp-tracing.jpg +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/assets/images/orchestration.png +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/assets/logo.svg +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/config.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/context.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/examples.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/guardrails.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/handoffs.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/index.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/agents.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/config.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/context.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/examples.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/guardrails.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/handoffs.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/index.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/mcp.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/models/index.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/models/litellm.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/models.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/multi_agent.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/quickstart.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/results.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/running_agents.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/streaming.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/tools.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/tracing.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/visualization.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/voice/pipeline.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/voice/quickstart.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ja/voice/tracing.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/mcp.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/models/index.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/models/litellm.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/multi_agent.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/quickstart.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/agent.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/agent_output.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/exceptions.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/extensions/handoff_filters.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/extensions/handoff_prompt.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/extensions/litellm.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/function_schema.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/guardrail.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/handoffs.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/index.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/items.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/lifecycle.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/mcp/server.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/mcp/util.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/model_settings.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/models/interface.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/models/openai_chatcompletions.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/models/openai_responses.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/result.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/run.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/run_context.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/stream_events.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/tool.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/tracing/create.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/tracing/index.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/tracing/processor_interface.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/tracing/processors.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/tracing/scope.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/tracing/setup.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/tracing/span_data.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/tracing/spans.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/tracing/traces.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/tracing/util.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/usage.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/events.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/exceptions.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/input.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/model.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/models/openai_provider.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/models/openai_stt.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/models/openai_tts.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/pipeline.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/pipeline_config.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/result.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/utils.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/ref/voice/workflow.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/results.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/running_agents.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/scripts/translate_docs.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/streaming.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/stylesheets/extra.css +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/tools.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/visualization.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/voice/pipeline.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/voice/quickstart.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/docs/voice/tracing.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/agent_patterns/README.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/agent_patterns/agents_as_tools.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/agent_patterns/deterministic.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/agent_patterns/forcing_tool_use.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/agent_patterns/input_guardrails.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/agent_patterns/llm_as_a_judge.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/agent_patterns/output_guardrails.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/agent_patterns/parallelization.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/agent_patterns/routing.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/agent_patterns/streaming_guardrails.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/agent_lifecycle_example.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/dynamic_system_prompt.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/hello_world.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/hello_world_jupyter.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/lifecycle_example.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/local_image.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/media/image_bison.jpg +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/non_strict_output_type.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/previous_response_id.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/remote_image.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/stream_items.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/stream_text.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/basic/tools.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/customer_service/main.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/README.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/agents/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/agents/financials_agent.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/agents/planner_agent.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/agents/risk_agent.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/agents/search_agent.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/agents/verifier_agent.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/agents/writer_agent.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/manager.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/financial_research_agent/printer.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/handoffs/message_filter.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/handoffs/message_filter_streaming.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/mcp/filesystem_example/README.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/mcp/filesystem_example/main.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/mcp/filesystem_example/sample_files/favorite_books.txt +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/mcp/filesystem_example/sample_files/favorite_cities.txt +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/mcp/filesystem_example/sample_files/favorite_songs.txt +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/mcp/git_example/README.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/mcp/git_example/main.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/mcp/sse_example/README.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/mcp/sse_example/main.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/mcp/sse_example/server.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/model_providers/README.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/model_providers/custom_example_agent.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/model_providers/custom_example_global.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/model_providers/custom_example_provider.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/model_providers/litellm_auto.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/model_providers/litellm_provider.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/README.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/agents/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/agents/planner_agent.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/agents/search_agent.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/agents/writer_agent.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/main.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/manager.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/printer.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/sample_outputs/product_recs.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/sample_outputs/product_recs.txt +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/sample_outputs/vacation.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/research_bot/sample_outputs/vacation.txt +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/tools/computer_use.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/tools/file_search.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/tools/web_search.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/voice/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/voice/static/README.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/voice/static/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/voice/static/main.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/voice/static/util.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/voice/streamed/README.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/voice/streamed/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/voice/streamed/main.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/examples/voice/streamed/my_workflow.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/mkdocs.yml +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/_config.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/_debug.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/_run_impl.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/agent.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/agent_output.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/computer.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/exceptions.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/extensions/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/extensions/handoff_filters.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/extensions/handoff_prompt.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/extensions/models/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/extensions/models/litellm_provider.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/extensions/visualization.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/function_schema.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/guardrail.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/handoffs.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/items.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/lifecycle.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/logger.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/mcp/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/mcp/util.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/models/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/models/_openai_shared.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/models/chatcmpl_converter.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/models/chatcmpl_helpers.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/models/fake_id.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/models/interface.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/models/multi_provider.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/models/openai_provider.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/py.typed +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/run_context.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/stream_events.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/strict_schema.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tool.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tracing/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tracing/create.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tracing/logger.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tracing/processor_interface.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tracing/processors.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tracing/scope.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tracing/setup.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tracing/span_data.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tracing/spans.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tracing/traces.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/tracing/util.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/usage.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/util/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/util/_coro.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/util/_error_tracing.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/util/_json.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/util/_pretty_print.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/util/_transforms.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/util/_types.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/version.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/events.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/exceptions.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/imports.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/input.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/models/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/models/openai_model_provider.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/models/openai_stt.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/models/openai_tts.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/pipeline.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/pipeline_config.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/result.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/utils.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/src/agents/voice/workflow.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/README.md +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/conftest.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/fastapi/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/fastapi/streaming_app.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/fastapi/test_streaming_context.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/mcp/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/mcp/conftest.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/mcp/helpers.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/mcp/test_caching.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/mcp/test_connect_disconnect.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/mcp/test_mcp_tracing.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/mcp/test_mcp_util.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/mcp/test_runner_calls_mcp.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/models/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/models/conftest.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/models/test_map.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_agent_config.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_agent_hooks.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_agent_runner.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_agent_runner_streamed.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_agent_tracing.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_computer_action.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_config.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_doc_parsing.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_extension_filters.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_function_schema.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_function_tool.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_function_tool_decorator.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_global_hooks.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_guardrails.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_handoff_tool.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_items_helpers.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_max_turns.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_openai_chatcompletions.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_openai_chatcompletions_converter.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_openai_chatcompletions_stream.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_openai_responses_converter.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_output_tool.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_pretty_print.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_responses.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_responses_tracing.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_run_config.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_run_step_execution.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_run_step_processing.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_strict_schema.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_tool_choice_reset.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_tool_converter.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_tool_use_behavior.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_trace_processor.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_tracing.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_tracing_errors.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_tracing_errors_streamed.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/test_visualization.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/testing_processor.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/tracing/test_processor_api_key.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/voice/__init__.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/voice/fake_models.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/voice/helpers.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/voice/test_input.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/voice/test_openai_stt.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/voice/test_openai_tts.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/voice/test_pipeline.py +0 -0
- {openai_agents-0.0.12 → openai_agents-0.0.14}/tests/voice/test_workflow.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.14
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://github.com/openai/openai-agents-python
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -20,13 +20,13 @@ Classifier: Typing :: Typed
|
|
|
20
20
|
Requires-Python: >=3.9
|
|
21
21
|
Requires-Dist: griffe<2,>=1.5.6
|
|
22
22
|
Requires-Dist: mcp<2,>=1.6.0; python_version >= '3.10'
|
|
23
|
-
Requires-Dist: openai>=1.
|
|
23
|
+
Requires-Dist: openai>=1.76.0
|
|
24
24
|
Requires-Dist: pydantic<3,>=2.10
|
|
25
25
|
Requires-Dist: requests<3,>=2.0
|
|
26
26
|
Requires-Dist: types-requests<3,>=2.0
|
|
27
27
|
Requires-Dist: typing-extensions<5,>=4.12.2
|
|
28
28
|
Provides-Extra: litellm
|
|
29
|
-
Requires-Dist: litellm<2,>=1.
|
|
29
|
+
Requires-Dist: litellm<2,>=1.67.4.post1; extra == 'litellm'
|
|
30
30
|
Provides-Extra: viz
|
|
31
31
|
Requires-Dist: graphviz>=0.17; extra == 'viz'
|
|
32
32
|
Provides-Extra: voice
|
|
@@ -101,6 +101,7 @@ To customize this default setup, to send traces to alternative or additional bac
|
|
|
101
101
|
|
|
102
102
|
- [Weights & Biases](https://weave-docs.wandb.ai/guides/integrations/openai_agents)
|
|
103
103
|
- [Arize-Phoenix](https://docs.arize.com/phoenix/tracing/integrations-tracing/openai-agents-sdk)
|
|
104
|
+
- [Future AGI](https://docs.futureagi.com/future-agi/products/observability/auto-instrumentation/openai_agents)
|
|
104
105
|
- [MLflow (self-hosted/OSS](https://mlflow.org/docs/latest/tracing/integrations/openai-agent)
|
|
105
106
|
- [MLflow (Databricks hosted](https://docs.databricks.com/aws/en/mlflow/mlflow-tracing#-automatic-tracing)
|
|
106
107
|
- [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk)
|
|
@@ -4,7 +4,7 @@ from .manager import FinancialResearchManager
|
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
# Entrypoint for the financial bot example.
|
|
7
|
-
# Run this as `python -m examples.
|
|
7
|
+
# Run this as `python -m examples.financial_research_agent.main` and enter a
|
|
8
8
|
# financial research query, for example:
|
|
9
9
|
# "Write up an analysis of Apple Inc.'s most recent quarter."
|
|
10
10
|
async def main() -> None:
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "openai-agents"
|
|
3
|
-
version = "0.0.
|
|
3
|
+
version = "0.0.14"
|
|
4
4
|
description = "OpenAI Agents SDK"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.9"
|
|
7
7
|
license = "MIT"
|
|
8
8
|
authors = [{ name = "OpenAI", email = "support@openai.com" }]
|
|
9
9
|
dependencies = [
|
|
10
|
-
"openai>=1.
|
|
10
|
+
"openai>=1.76.0",
|
|
11
11
|
"pydantic>=2.10, <3",
|
|
12
12
|
"griffe>=1.5.6, <2",
|
|
13
13
|
"typing-extensions>=4.12.2, <5",
|
|
@@ -36,7 +36,7 @@ Repository = "https://github.com/openai/openai-agents-python"
|
|
|
36
36
|
[project.optional-dependencies]
|
|
37
37
|
voice = ["numpy>=2.2.0, <3; python_version>='3.10'", "websockets>=15.0, <16"]
|
|
38
38
|
viz = ["graphviz>=0.17"]
|
|
39
|
-
litellm = ["litellm>=1.
|
|
39
|
+
litellm = ["litellm>=1.67.4.post1, <2"]
|
|
40
40
|
|
|
41
41
|
[dependency-groups]
|
|
42
42
|
dev = [
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import dataclasses
|
|
4
3
|
import json
|
|
5
4
|
import time
|
|
6
5
|
from collections.abc import AsyncIterator
|
|
@@ -75,7 +74,7 @@ class LitellmModel(Model):
|
|
|
75
74
|
) -> ModelResponse:
|
|
76
75
|
with generation_span(
|
|
77
76
|
model=str(self.model),
|
|
78
|
-
model_config=
|
|
77
|
+
model_config=model_settings.to_json_dict()
|
|
79
78
|
| {"base_url": str(self.base_url or ""), "model_impl": "litellm"},
|
|
80
79
|
disabled=tracing.is_disabled(),
|
|
81
80
|
) as span_generation:
|
|
@@ -147,7 +146,7 @@ class LitellmModel(Model):
|
|
|
147
146
|
) -> AsyncIterator[TResponseStreamEvent]:
|
|
148
147
|
with generation_span(
|
|
149
148
|
model=str(self.model),
|
|
150
|
-
model_config=
|
|
149
|
+
model_config=model_settings.to_json_dict()
|
|
151
150
|
| {"base_url": str(self.base_url or ""), "model_impl": "litellm"},
|
|
152
151
|
disabled=tracing.is_disabled(),
|
|
153
152
|
) as span_generation:
|
|
@@ -286,7 +285,7 @@ class LitellmModel(Model):
|
|
|
286
285
|
stream=stream,
|
|
287
286
|
stream_options=stream_options,
|
|
288
287
|
reasoning_effort=reasoning_effort,
|
|
289
|
-
extra_headers=HEADERS,
|
|
288
|
+
extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
|
|
290
289
|
api_key=self.api_key,
|
|
291
290
|
base_url=self.base_url,
|
|
292
291
|
**extra_kwargs,
|
|
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import abc
|
|
4
4
|
import asyncio
|
|
5
5
|
from contextlib import AbstractAsyncContextManager, AsyncExitStack
|
|
6
|
+
from datetime import timedelta
|
|
6
7
|
from pathlib import Path
|
|
7
8
|
from typing import Any, Literal
|
|
8
9
|
|
|
@@ -54,7 +55,7 @@ class MCPServer(abc.ABC):
|
|
|
54
55
|
class _MCPServerWithClientSession(MCPServer, abc.ABC):
|
|
55
56
|
"""Base class for MCP servers that use a `ClientSession` to communicate with the server."""
|
|
56
57
|
|
|
57
|
-
def __init__(self, cache_tools_list: bool):
|
|
58
|
+
def __init__(self, cache_tools_list: bool, client_session_timeout_seconds: float | None):
|
|
58
59
|
"""
|
|
59
60
|
Args:
|
|
60
61
|
cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
|
|
@@ -63,12 +64,16 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
|
|
|
63
64
|
by calling `invalidate_tools_cache()`. You should set this to `True` if you know the
|
|
64
65
|
server will not change its tools list, because it can drastically improve latency
|
|
65
66
|
(by avoiding a round-trip to the server every time).
|
|
67
|
+
|
|
68
|
+
client_session_timeout_seconds: the read timeout passed to the MCP ClientSession.
|
|
66
69
|
"""
|
|
67
70
|
self.session: ClientSession | None = None
|
|
68
71
|
self.exit_stack: AsyncExitStack = AsyncExitStack()
|
|
69
72
|
self._cleanup_lock: asyncio.Lock = asyncio.Lock()
|
|
70
73
|
self.cache_tools_list = cache_tools_list
|
|
71
74
|
|
|
75
|
+
self.client_session_timeout_seconds = client_session_timeout_seconds
|
|
76
|
+
|
|
72
77
|
# The cache is always dirty at startup, so that we fetch tools at least once
|
|
73
78
|
self._cache_dirty = True
|
|
74
79
|
self._tools_list: list[MCPTool] | None = None
|
|
@@ -101,7 +106,15 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
|
|
|
101
106
|
try:
|
|
102
107
|
transport = await self.exit_stack.enter_async_context(self.create_streams())
|
|
103
108
|
read, write = transport
|
|
104
|
-
session = await self.exit_stack.enter_async_context(
|
|
109
|
+
session = await self.exit_stack.enter_async_context(
|
|
110
|
+
ClientSession(
|
|
111
|
+
read,
|
|
112
|
+
write,
|
|
113
|
+
timedelta(seconds=self.client_session_timeout_seconds)
|
|
114
|
+
if self.client_session_timeout_seconds
|
|
115
|
+
else None,
|
|
116
|
+
)
|
|
117
|
+
)
|
|
105
118
|
await session.initialize()
|
|
106
119
|
self.session = session
|
|
107
120
|
except Exception as e:
|
|
@@ -183,6 +196,7 @@ class MCPServerStdio(_MCPServerWithClientSession):
|
|
|
183
196
|
params: MCPServerStdioParams,
|
|
184
197
|
cache_tools_list: bool = False,
|
|
185
198
|
name: str | None = None,
|
|
199
|
+
client_session_timeout_seconds: float | None = 5,
|
|
186
200
|
):
|
|
187
201
|
"""Create a new MCP server based on the stdio transport.
|
|
188
202
|
|
|
@@ -199,8 +213,9 @@ class MCPServerStdio(_MCPServerWithClientSession):
|
|
|
199
213
|
improve latency (by avoiding a round-trip to the server every time).
|
|
200
214
|
name: A readable name for the server. If not provided, we'll create one from the
|
|
201
215
|
command.
|
|
216
|
+
client_session_timeout_seconds: the read timeout passed to the MCP ClientSession.
|
|
202
217
|
"""
|
|
203
|
-
super().__init__(cache_tools_list)
|
|
218
|
+
super().__init__(cache_tools_list, client_session_timeout_seconds)
|
|
204
219
|
|
|
205
220
|
self.params = StdioServerParameters(
|
|
206
221
|
command=params["command"],
|
|
@@ -257,6 +272,7 @@ class MCPServerSse(_MCPServerWithClientSession):
|
|
|
257
272
|
params: MCPServerSseParams,
|
|
258
273
|
cache_tools_list: bool = False,
|
|
259
274
|
name: str | None = None,
|
|
275
|
+
client_session_timeout_seconds: float | None = 5,
|
|
260
276
|
):
|
|
261
277
|
"""Create a new MCP server based on the HTTP with SSE transport.
|
|
262
278
|
|
|
@@ -274,8 +290,10 @@ class MCPServerSse(_MCPServerWithClientSession):
|
|
|
274
290
|
|
|
275
291
|
name: A readable name for the server. If not provided, we'll create one from the
|
|
276
292
|
URL.
|
|
293
|
+
|
|
294
|
+
client_session_timeout_seconds: the read timeout passed to the MCP ClientSession.
|
|
277
295
|
"""
|
|
278
|
-
super().__init__(cache_tools_list)
|
|
296
|
+
super().__init__(cache_tools_list, client_session_timeout_seconds)
|
|
279
297
|
|
|
280
298
|
self.params = params
|
|
281
299
|
self._name = name or f"sse: {self.params['url']}"
|
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import dataclasses
|
|
3
4
|
from dataclasses import dataclass, fields, replace
|
|
4
|
-
from typing import Literal
|
|
5
|
+
from typing import Any, Literal
|
|
5
6
|
|
|
6
|
-
from openai._types import Body, Query
|
|
7
|
+
from openai._types import Body, Headers, Query
|
|
7
8
|
from openai.types.shared import Reasoning
|
|
9
|
+
from pydantic import BaseModel
|
|
8
10
|
|
|
9
11
|
|
|
10
12
|
@dataclass
|
|
@@ -67,6 +69,10 @@ class ModelSettings:
|
|
|
67
69
|
"""Additional body fields to provide with the request.
|
|
68
70
|
Defaults to None if not provided."""
|
|
69
71
|
|
|
72
|
+
extra_headers: Headers | None = None
|
|
73
|
+
"""Additional headers to provide with the request.
|
|
74
|
+
Defaults to None if not provided."""
|
|
75
|
+
|
|
70
76
|
def resolve(self, override: ModelSettings | None) -> ModelSettings:
|
|
71
77
|
"""Produce a new ModelSettings by overlaying any non-None values from the
|
|
72
78
|
override on top of this instance."""
|
|
@@ -79,3 +85,16 @@ class ModelSettings:
|
|
|
79
85
|
if getattr(override, field.name) is not None
|
|
80
86
|
}
|
|
81
87
|
return replace(self, **changes)
|
|
88
|
+
|
|
89
|
+
def to_json_dict(self) -> dict[str, Any]:
|
|
90
|
+
dataclass_dict = dataclasses.asdict(self)
|
|
91
|
+
|
|
92
|
+
json_dict: dict[str, Any] = {}
|
|
93
|
+
|
|
94
|
+
for field_name, value in dataclass_dict.items():
|
|
95
|
+
if isinstance(value, BaseModel):
|
|
96
|
+
json_dict[field_name] = value.model_dump(mode="json")
|
|
97
|
+
else:
|
|
98
|
+
json_dict[field_name] = value
|
|
99
|
+
|
|
100
|
+
return json_dict
|
|
@@ -56,7 +56,8 @@ class ChatCmplStreamHandler:
|
|
|
56
56
|
type="response.created",
|
|
57
57
|
)
|
|
58
58
|
|
|
59
|
-
|
|
59
|
+
# This is always set by the OpenAI API, but not by others e.g. LiteLLM
|
|
60
|
+
usage = chunk.usage if hasattr(chunk, "usage") else None
|
|
60
61
|
|
|
61
62
|
if not chunk.choices or not chunk.choices[0].delta:
|
|
62
63
|
continue
|
|
@@ -112,7 +113,8 @@ class ChatCmplStreamHandler:
|
|
|
112
113
|
state.text_content_index_and_output[1].text += delta.content
|
|
113
114
|
|
|
114
115
|
# Handle refusals (model declines to answer)
|
|
115
|
-
|
|
116
|
+
# This is always set by the OpenAI API, but not by others e.g. LiteLLM
|
|
117
|
+
if hasattr(delta, "refusal") and delta.refusal:
|
|
116
118
|
if not state.refusal_content_index_and_output:
|
|
117
119
|
# Initialize a content tracker for streaming refusal text
|
|
118
120
|
state.refusal_content_index_and_output = (
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import dataclasses
|
|
4
3
|
import json
|
|
5
4
|
import time
|
|
6
5
|
from collections.abc import AsyncIterator
|
|
@@ -56,8 +55,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
56
55
|
) -> ModelResponse:
|
|
57
56
|
with generation_span(
|
|
58
57
|
model=str(self.model),
|
|
59
|
-
model_config=
|
|
60
|
-
| {"base_url": str(self._client.base_url)},
|
|
58
|
+
model_config=model_settings.to_json_dict() | {"base_url": str(self._client.base_url)},
|
|
61
59
|
disabled=tracing.is_disabled(),
|
|
62
60
|
) as span_generation:
|
|
63
61
|
response = await self._fetch_response(
|
|
@@ -121,8 +119,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
121
119
|
"""
|
|
122
120
|
with generation_span(
|
|
123
121
|
model=str(self.model),
|
|
124
|
-
model_config=
|
|
125
|
-
| {"base_url": str(self._client.base_url)},
|
|
122
|
+
model_config=model_settings.to_json_dict() | {"base_url": str(self._client.base_url)},
|
|
126
123
|
disabled=tracing.is_disabled(),
|
|
127
124
|
) as span_generation:
|
|
128
125
|
response, stream = await self._fetch_response(
|
|
@@ -255,7 +252,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
255
252
|
stream_options=self._non_null_or_not_given(stream_options),
|
|
256
253
|
store=self._non_null_or_not_given(store),
|
|
257
254
|
reasoning_effort=self._non_null_or_not_given(reasoning_effort),
|
|
258
|
-
extra_headers=HEADERS,
|
|
255
|
+
extra_headers={ **HEADERS, **(model_settings.extra_headers or {}) },
|
|
259
256
|
extra_query=model_settings.extra_query,
|
|
260
257
|
extra_body=model_settings.extra_body,
|
|
261
258
|
metadata=self._non_null_or_not_given(model_settings.metadata),
|
|
@@ -253,7 +253,7 @@ class OpenAIResponsesModel(Model):
|
|
|
253
253
|
tool_choice=tool_choice,
|
|
254
254
|
parallel_tool_calls=parallel_tool_calls,
|
|
255
255
|
stream=stream,
|
|
256
|
-
extra_headers=_HEADERS,
|
|
256
|
+
extra_headers={**_HEADERS, **(model_settings.extra_headers or {})},
|
|
257
257
|
extra_query=model_settings.extra_query,
|
|
258
258
|
extra_body=model_settings.extra_body,
|
|
259
259
|
text=response_format,
|
|
@@ -15,6 +15,7 @@ from .exceptions import InputGuardrailTripwireTriggered, MaxTurnsExceeded
|
|
|
15
15
|
from .guardrail import InputGuardrailResult, OutputGuardrailResult
|
|
16
16
|
from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
|
|
17
17
|
from .logger import logger
|
|
18
|
+
from .run_context import RunContextWrapper
|
|
18
19
|
from .stream_events import StreamEvent
|
|
19
20
|
from .tracing import Trace
|
|
20
21
|
from .util._pretty_print import pretty_print_result, pretty_print_run_result_streaming
|
|
@@ -50,6 +51,9 @@ class RunResultBase(abc.ABC):
|
|
|
50
51
|
output_guardrail_results: list[OutputGuardrailResult]
|
|
51
52
|
"""Guardrail results for the final output of the agent."""
|
|
52
53
|
|
|
54
|
+
context_wrapper: RunContextWrapper[Any]
|
|
55
|
+
"""The context wrapper for the agent run."""
|
|
56
|
+
|
|
53
57
|
@property
|
|
54
58
|
@abc.abstractmethod
|
|
55
59
|
def last_agent(self) -> Agent[Any]:
|
|
@@ -152,6 +156,18 @@ class RunResultStreaming(RunResultBase):
|
|
|
152
156
|
"""
|
|
153
157
|
return self.current_agent
|
|
154
158
|
|
|
159
|
+
def cancel(self) -> None:
|
|
160
|
+
"""Cancels the streaming run, stopping all background tasks and marking the run as
|
|
161
|
+
complete."""
|
|
162
|
+
self._cleanup_tasks() # Cancel all running tasks
|
|
163
|
+
self.is_complete = True # Mark the run as complete to stop event streaming
|
|
164
|
+
|
|
165
|
+
# Optionally, clear the event queue to prevent processing stale events
|
|
166
|
+
while not self._event_queue.empty():
|
|
167
|
+
self._event_queue.get_nowait()
|
|
168
|
+
while not self._input_guardrail_queue.empty():
|
|
169
|
+
self._input_guardrail_queue.get_nowait()
|
|
170
|
+
|
|
155
171
|
async def stream_events(self) -> AsyncIterator[StreamEvent]:
|
|
156
172
|
"""Stream deltas for new items as they are generated. We're using the types from the
|
|
157
173
|
OpenAI Responses API, so these are semantic events: each event has a `type` field that
|
|
@@ -270,6 +270,7 @@ class Runner:
|
|
|
270
270
|
_last_agent=current_agent,
|
|
271
271
|
input_guardrail_results=input_guardrail_results,
|
|
272
272
|
output_guardrail_results=output_guardrail_results,
|
|
273
|
+
context_wrapper=context_wrapper,
|
|
273
274
|
)
|
|
274
275
|
elif isinstance(turn_result.next_step, NextStepHandoff):
|
|
275
276
|
current_agent = cast(Agent[TContext], turn_result.next_step.new_agent)
|
|
@@ -423,6 +424,7 @@ class Runner:
|
|
|
423
424
|
output_guardrail_results=[],
|
|
424
425
|
_current_agent_output_schema=output_schema,
|
|
425
426
|
trace=new_trace,
|
|
427
|
+
context_wrapper=context_wrapper,
|
|
426
428
|
)
|
|
427
429
|
|
|
428
430
|
# Kick off the actual agent loop in the background and return the streamed result object.
|
|
@@ -696,6 +698,7 @@ class Runner:
|
|
|
696
698
|
usage=usage,
|
|
697
699
|
response_id=event.response.id,
|
|
698
700
|
)
|
|
701
|
+
context_wrapper.usage.add(usage)
|
|
699
702
|
|
|
700
703
|
streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
|
|
701
704
|
|
|
@@ -7,6 +7,7 @@ from .model import (
|
|
|
7
7
|
STTModelSettings,
|
|
8
8
|
TTSModel,
|
|
9
9
|
TTSModelSettings,
|
|
10
|
+
TTSVoice,
|
|
10
11
|
VoiceModelProvider,
|
|
11
12
|
)
|
|
12
13
|
from .models.openai_model_provider import OpenAIVoiceModelProvider
|
|
@@ -30,6 +31,7 @@ __all__ = [
|
|
|
30
31
|
"STTModelSettings",
|
|
31
32
|
"TTSModel",
|
|
32
33
|
"TTSModelSettings",
|
|
34
|
+
"TTSVoice",
|
|
33
35
|
"VoiceModelProvider",
|
|
34
36
|
"StreamedAudioResult",
|
|
35
37
|
"SingleAgentVoiceWorkflow",
|
|
@@ -14,14 +14,13 @@ DEFAULT_TTS_INSTRUCTIONS = (
|
|
|
14
14
|
)
|
|
15
15
|
DEFAULT_TTS_BUFFER_SIZE = 120
|
|
16
16
|
|
|
17
|
+
TTSVoice = Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]
|
|
18
|
+
"""Exportable type for the TTSModelSettings voice enum"""
|
|
17
19
|
|
|
18
20
|
@dataclass
|
|
19
21
|
class TTSModelSettings:
|
|
20
22
|
"""Settings for a TTS model."""
|
|
21
|
-
|
|
22
|
-
voice: (
|
|
23
|
-
Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"] | None
|
|
24
|
-
) = None
|
|
23
|
+
voice: TTSVoice | None = None
|
|
25
24
|
"""
|
|
26
25
|
The voice to use for the TTS model. If not provided, the default voice for the respective model
|
|
27
26
|
will be used.
|
|
@@ -3,7 +3,8 @@ from __future__ import annotations
|
|
|
3
3
|
from collections.abc import AsyncIterator
|
|
4
4
|
from typing import Any
|
|
5
5
|
|
|
6
|
-
from openai.types.responses import Response, ResponseCompletedEvent
|
|
6
|
+
from openai.types.responses import Response, ResponseCompletedEvent, ResponseUsage
|
|
7
|
+
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
|
|
7
8
|
|
|
8
9
|
from agents.agent_output import AgentOutputSchemaBase
|
|
9
10
|
from agents.handoffs import Handoff
|
|
@@ -33,6 +34,10 @@ class FakeModel(Model):
|
|
|
33
34
|
)
|
|
34
35
|
self.tracing_enabled = tracing_enabled
|
|
35
36
|
self.last_turn_args: dict[str, Any] = {}
|
|
37
|
+
self.hardcoded_usage: Usage | None = None
|
|
38
|
+
|
|
39
|
+
def set_hardcoded_usage(self, usage: Usage):
|
|
40
|
+
self.hardcoded_usage = usage
|
|
36
41
|
|
|
37
42
|
def set_next_output(self, output: list[TResponseOutputItem] | Exception):
|
|
38
43
|
self.turn_outputs.append(output)
|
|
@@ -83,7 +88,7 @@ class FakeModel(Model):
|
|
|
83
88
|
|
|
84
89
|
return ModelResponse(
|
|
85
90
|
output=output,
|
|
86
|
-
usage=Usage(),
|
|
91
|
+
usage=self.hardcoded_usage or Usage(),
|
|
87
92
|
response_id=None,
|
|
88
93
|
)
|
|
89
94
|
|
|
@@ -123,11 +128,15 @@ class FakeModel(Model):
|
|
|
123
128
|
|
|
124
129
|
yield ResponseCompletedEvent(
|
|
125
130
|
type="response.completed",
|
|
126
|
-
response=get_response_obj(output),
|
|
131
|
+
response=get_response_obj(output, usage=self.hardcoded_usage),
|
|
127
132
|
)
|
|
128
133
|
|
|
129
134
|
|
|
130
|
-
def get_response_obj(
|
|
135
|
+
def get_response_obj(
|
|
136
|
+
output: list[TResponseOutputItem],
|
|
137
|
+
response_id: str | None = None,
|
|
138
|
+
usage: Usage | None = None,
|
|
139
|
+
) -> Response:
|
|
131
140
|
return Response(
|
|
132
141
|
id=response_id or "123",
|
|
133
142
|
created_at=123,
|
|
@@ -138,4 +147,11 @@ def get_response_obj(output: list[TResponseOutputItem], response_id: str | None
|
|
|
138
147
|
tools=[],
|
|
139
148
|
top_p=None,
|
|
140
149
|
parallel_tool_calls=False,
|
|
150
|
+
usage=ResponseUsage(
|
|
151
|
+
input_tokens=usage.input_tokens if usage else 0,
|
|
152
|
+
output_tokens=usage.output_tokens if usage else 0,
|
|
153
|
+
total_tokens=usage.total_tokens if usage else 0,
|
|
154
|
+
input_tokens_details=InputTokensDetails(cached_tokens=0),
|
|
155
|
+
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
|
|
156
|
+
),
|
|
141
157
|
)
|
|
@@ -6,7 +6,7 @@ from agents.mcp.server import _MCPServerWithClientSession
|
|
|
6
6
|
|
|
7
7
|
class CrashingClientSessionServer(_MCPServerWithClientSession):
|
|
8
8
|
def __init__(self):
|
|
9
|
-
super().__init__(cache_tools_list=False)
|
|
9
|
+
super().__init__(cache_tools_list=False, client_session_timeout_seconds=5)
|
|
10
10
|
self.cleanup_called = False
|
|
11
11
|
|
|
12
12
|
def create_streams(self):
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from dataclasses import fields
|
|
3
|
+
|
|
4
|
+
from openai.types.shared import Reasoning
|
|
5
|
+
|
|
6
|
+
from agents.model_settings import ModelSettings
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def verify_serialization(model_settings: ModelSettings) -> None:
|
|
10
|
+
"""Verify that ModelSettings can be serialized to a JSON string."""
|
|
11
|
+
json_dict = model_settings.to_json_dict()
|
|
12
|
+
json_string = json.dumps(json_dict)
|
|
13
|
+
assert json_string is not None
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def test_basic_serialization() -> None:
|
|
17
|
+
"""Tests whether ModelSettings can be serialized to a JSON string."""
|
|
18
|
+
|
|
19
|
+
# First, lets create a ModelSettings instance
|
|
20
|
+
model_settings = ModelSettings(
|
|
21
|
+
temperature=0.5,
|
|
22
|
+
top_p=0.9,
|
|
23
|
+
max_tokens=100,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
# Now, lets serialize the ModelSettings instance to a JSON string
|
|
27
|
+
verify_serialization(model_settings)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def test_all_fields_serialization() -> None:
|
|
31
|
+
"""Tests whether ModelSettings can be serialized to a JSON string."""
|
|
32
|
+
|
|
33
|
+
# First, lets create a ModelSettings instance
|
|
34
|
+
model_settings = ModelSettings(
|
|
35
|
+
temperature=0.5,
|
|
36
|
+
top_p=0.9,
|
|
37
|
+
frequency_penalty=0.0,
|
|
38
|
+
presence_penalty=0.0,
|
|
39
|
+
tool_choice="auto",
|
|
40
|
+
parallel_tool_calls=True,
|
|
41
|
+
truncation="auto",
|
|
42
|
+
max_tokens=100,
|
|
43
|
+
reasoning=Reasoning(),
|
|
44
|
+
metadata={"foo": "bar"},
|
|
45
|
+
store=False,
|
|
46
|
+
include_usage=False,
|
|
47
|
+
extra_query={"foo": "bar"},
|
|
48
|
+
extra_body={"foo": "bar"},
|
|
49
|
+
extra_headers={"foo": "bar"},
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# Verify that every single field is set to a non-None value
|
|
53
|
+
for field in fields(model_settings):
|
|
54
|
+
assert getattr(model_settings, field.name) is not None, (
|
|
55
|
+
f"You must set the {field.name} field"
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Now, lets serialize the ModelSettings instance to a JSON string
|
|
59
|
+
verify_serialization(model_settings)
|