openai-agents 0.0.4__tar.gz → 0.0.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- openai_agents-0.0.6/.github/ISSUE_TEMPLATE/model_provider.md +26 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/.github/workflows/tests.yml +2 -2
- {openai_agents-0.0.4 → openai_agents-0.0.6}/Makefile +17 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/PKG-INFO +9 -4
- {openai_agents-0.0.4 → openai_agents-0.0.6}/README.md +4 -2
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/agents.md +13 -0
- openai_agents-0.0.6/docs/examples.md +36 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/models.md +27 -0
- openai_agents-0.0.6/docs/ref/voice/events.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/exceptions.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/input.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/model.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/models/openai_provider.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/models/openai_stt.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/models/openai_tts.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/pipeline.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/pipeline_config.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/result.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/utils.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/workflow.md +3 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/tracing.md +9 -2
- openai_agents-0.0.6/docs/voice/pipeline.md +75 -0
- openai_agents-0.0.6/docs/voice/quickstart.md +189 -0
- openai_agents-0.0.6/docs/voice/tracing.md +14 -0
- openai_agents-0.0.6/examples/agent_patterns/forcing_tool_use.py +99 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/basic/agent_lifecycle_example.py +1 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/basic/hello_world_jupyter.py +1 -1
- openai_agents-0.0.6/examples/basic/tools.py +34 -0
- openai_agents-0.0.6/examples/financial_research_agent/README.md +38 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/financials_agent.py +23 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/planner_agent.py +35 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/risk_agent.py +22 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/search_agent.py +18 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/verifier_agent.py +27 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/writer_agent.py +34 -0
- openai_agents-0.0.6/examples/financial_research_agent/main.py +17 -0
- openai_agents-0.0.6/examples/financial_research_agent/manager.py +135 -0
- openai_agents-0.0.6/examples/financial_research_agent/printer.py +46 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/handoffs/message_filter.py +2 -2
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/handoffs/message_filter_streaming.py +2 -2
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/model_providers/custom_example_agent.py +9 -5
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/model_providers/custom_example_global.py +9 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/model_providers/custom_example_provider.py +10 -6
- openai_agents-0.0.6/examples/voice/static/README.md +26 -0
- openai_agents-0.0.6/examples/voice/static/__init__.py +0 -0
- openai_agents-0.0.6/examples/voice/static/main.py +83 -0
- openai_agents-0.0.6/examples/voice/static/util.py +68 -0
- openai_agents-0.0.6/examples/voice/streamed/README.md +25 -0
- openai_agents-0.0.6/examples/voice/streamed/__init__.py +0 -0
- openai_agents-0.0.6/examples/voice/streamed/agents.py +81 -0
- openai_agents-0.0.6/examples/voice/streamed/main.py +221 -0
- openai_agents-0.0.6/mkdocs.yml +143 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/pyproject.toml +26 -14
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/__init__.py +22 -5
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/_run_impl.py +101 -22
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/agent.py +55 -7
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/agent_output.py +4 -4
- openai_agents-0.0.6/src/agents/extensions/__init__.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/function_schema.py +4 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/guardrail.py +1 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/handoffs.py +4 -4
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/items.py +4 -2
- openai_agents-0.0.6/src/agents/models/__init__.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/models/openai_chatcompletions.py +6 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/models/openai_provider.py +13 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/result.py +7 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/run.py +10 -10
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tool.py +34 -10
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tracing/__init__.py +12 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tracing/create.py +122 -2
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tracing/processors.py +2 -2
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tracing/scope.py +1 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tracing/setup.py +1 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tracing/span_data.py +98 -2
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tracing/spans.py +1 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tracing/traces.py +1 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tracing/util.py +5 -0
- openai_agents-0.0.6/src/agents/util/__init__.py +0 -0
- openai_agents-0.0.6/src/agents/util/_coro.py +2 -0
- openai_agents-0.0.6/src/agents/util/_error_tracing.py +16 -0
- openai_agents-0.0.6/src/agents/util/_json.py +31 -0
- openai_agents-0.0.6/src/agents/util/_pretty_print.py +56 -0
- openai_agents-0.0.6/src/agents/util/_transforms.py +11 -0
- openai_agents-0.0.6/src/agents/util/_types.py +7 -0
- openai_agents-0.0.6/src/agents/voice/__init__.py +51 -0
- openai_agents-0.0.6/src/agents/voice/events.py +47 -0
- openai_agents-0.0.6/src/agents/voice/exceptions.py +8 -0
- openai_agents-0.0.6/src/agents/voice/imports.py +11 -0
- openai_agents-0.0.6/src/agents/voice/input.py +88 -0
- openai_agents-0.0.6/src/agents/voice/model.py +193 -0
- openai_agents-0.0.6/src/agents/voice/models/__init__.py +0 -0
- openai_agents-0.0.6/src/agents/voice/models/openai_model_provider.py +97 -0
- openai_agents-0.0.6/src/agents/voice/models/openai_stt.py +457 -0
- openai_agents-0.0.6/src/agents/voice/models/openai_tts.py +54 -0
- openai_agents-0.0.6/src/agents/voice/pipeline.py +151 -0
- openai_agents-0.0.6/src/agents/voice/pipeline_config.py +46 -0
- openai_agents-0.0.6/src/agents/voice/result.py +287 -0
- openai_agents-0.0.6/src/agents/voice/utils.py +37 -0
- openai_agents-0.0.6/src/agents/voice/workflow.py +93 -0
- openai_agents-0.0.6/tests/README.md +25 -0
- openai_agents-0.0.6/tests/__init__.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_agent_runner.py +82 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_agent_tracing.py +114 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_function_tool.py +2 -2
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_function_tool_decorator.py +57 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_output_tool.py +4 -2
- openai_agents-0.0.6/tests/test_pretty_print.py +201 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_responses_tracing.py +32 -1
- openai_agents-0.0.6/tests/test_tool_use_behavior.py +194 -0
- openai_agents-0.0.6/tests/test_tracing_errors.py +605 -0
- openai_agents-0.0.6/tests/test_tracing_errors_streamed.py +777 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/testing_processor.py +35 -0
- openai_agents-0.0.6/tests/voice/__init__.py +0 -0
- openai_agents-0.0.6/tests/voice/conftest.py +14 -0
- openai_agents-0.0.6/tests/voice/fake_models.py +115 -0
- openai_agents-0.0.6/tests/voice/helpers.py +21 -0
- openai_agents-0.0.6/tests/voice/test_input.py +127 -0
- openai_agents-0.0.6/tests/voice/test_openai_stt.py +369 -0
- openai_agents-0.0.6/tests/voice/test_openai_tts.py +94 -0
- openai_agents-0.0.6/tests/voice/test_pipeline.py +179 -0
- openai_agents-0.0.6/tests/voice/test_workflow.py +188 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/uv.lock +627 -90
- openai_agents-0.0.4/mkdocs.yml +0 -121
- openai_agents-0.0.4/src/agents/_utils.py +0 -61
- openai_agents-0.0.4/tests/test_tracing_errors.py +0 -328
- openai_agents-0.0.4/tests/test_tracing_errors_streamed.py +0 -397
- {openai_agents-0.0.4 → openai_agents-0.0.6}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/.github/ISSUE_TEMPLATE/question.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/.github/workflows/docs.yml +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/.github/workflows/issues.yml +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/.github/workflows/publish.yml +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/.gitignore +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/.prettierrc +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/LICENSE +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/assets/images/favicon-platform.svg +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/assets/images/orchestration.png +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/assets/logo.svg +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/config.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/context.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/guardrails.md +1 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/handoffs.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/index.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/multi_agent.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/quickstart.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/agent.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/agent_output.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/exceptions.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/extensions/handoff_filters.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/extensions/handoff_prompt.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/function_schema.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/guardrail.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/handoffs.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/index.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/items.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/lifecycle.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/model_settings.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/models/interface.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/models/openai_chatcompletions.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/models/openai_responses.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/result.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/run.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/run_context.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/stream_events.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/tool.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/tracing/create.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/tracing/index.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/tracing/processor_interface.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/tracing/processors.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/tracing/scope.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/tracing/setup.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/tracing/span_data.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/tracing/spans.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/tracing/traces.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/tracing/util.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/ref/usage.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/results.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/running_agents.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/streaming.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/stylesheets/extra.css +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/docs/tools.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/__init__.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/agent_patterns/README.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/agent_patterns/agents_as_tools.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/agent_patterns/deterministic.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/agent_patterns/input_guardrails.py +1 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/agent_patterns/llm_as_a_judge.py +1 -1
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/agent_patterns/output_guardrails.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/agent_patterns/parallelization.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/agent_patterns/routing.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/basic/dynamic_system_prompt.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/basic/hello_world.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/basic/lifecycle_example.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/basic/stream_items.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/basic/stream_text.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/customer_service/main.py +0 -0
- {openai_agents-0.0.4/examples/research_bot/agents → openai_agents-0.0.6/examples/financial_research_agent}/__init__.py +0 -0
- {openai_agents-0.0.4/src/agents/extensions → openai_agents-0.0.6/examples/financial_research_agent/agents}/__init__.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/model_providers/README.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/README.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/__init__.py +0 -0
- {openai_agents-0.0.4/src/agents/models → openai_agents-0.0.6/examples/research_bot/agents}/__init__.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/agents/planner_agent.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/agents/search_agent.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/agents/writer_agent.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/main.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/manager.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/printer.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/sample_outputs/product_recs.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/sample_outputs/product_recs.txt +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/sample_outputs/vacation.md +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/research_bot/sample_outputs/vacation.txt +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/tools/computer_use.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/tools/file_search.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/examples/tools/web_search.py +0 -0
- {openai_agents-0.0.4/tests → openai_agents-0.0.6/examples/voice}/__init__.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/_config.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/_debug.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/computer.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/exceptions.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/extensions/handoff_filters.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/extensions/handoff_prompt.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/lifecycle.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/logger.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/model_settings.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/models/_openai_shared.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/models/fake_id.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/models/interface.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/models/openai_responses.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/run_context.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/stream_events.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/strict_schema.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tracing/logger.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/tracing/processor_interface.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/usage.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/src/agents/version.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/conftest.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/fake_model.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_agent_config.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_agent_hooks.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_agent_runner_streamed.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_computer_action.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_config.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_doc_parsing.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_extension_filters.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_function_schema.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_global_hooks.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_guardrails.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_handoff_tool.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_items_helpers.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_max_turns.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_openai_chatcompletions.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_openai_chatcompletions_converter.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_openai_chatcompletions_stream.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_openai_responses_converter.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_responses.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_result_cast.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_run_config.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_run_step_execution.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_run_step_processing.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_strict_schema.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_tool_converter.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_trace_processor.py +0 -0
- {openai_agents-0.0.4 → openai_agents-0.0.6}/tests/test_tracing.py +0 -0
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: Custom model providers
|
|
3
|
+
about: Questions or bugs about using non-OpenAI models
|
|
4
|
+
title: ''
|
|
5
|
+
labels: bug
|
|
6
|
+
assignees: ''
|
|
7
|
+
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
### Please read this first
|
|
11
|
+
|
|
12
|
+
- **Have you read the custom model provider docs, including the 'Common issues' section?** [Model provider docs](https://openai.github.io/openai-agents-python/models/#using-other-llm-providers)
|
|
13
|
+
- **Have you searched for related issues?** Others may have faced similar issues.
|
|
14
|
+
|
|
15
|
+
### Describe the question
|
|
16
|
+
A clear and concise description of what the question or bug is.
|
|
17
|
+
|
|
18
|
+
### Debug information
|
|
19
|
+
- Agents SDK version: (e.g. `v0.0.3`)
|
|
20
|
+
- Python version (e.g. Python 3.10)
|
|
21
|
+
|
|
22
|
+
### Repro steps
|
|
23
|
+
Ideally provide a minimal python script that can be run to reproduce the issue.
|
|
24
|
+
|
|
25
|
+
### Expected behavior
|
|
26
|
+
A clear and concise description of what you expected to happen.
|
|
@@ -18,6 +18,21 @@ mypy:
|
|
|
18
18
|
tests:
|
|
19
19
|
uv run pytest
|
|
20
20
|
|
|
21
|
+
.PHONY: coverage
|
|
22
|
+
coverage:
|
|
23
|
+
|
|
24
|
+
uv run coverage run -m pytest
|
|
25
|
+
uv run coverage xml -o coverage.xml
|
|
26
|
+
uv run coverage report -m --fail-under=95
|
|
27
|
+
|
|
28
|
+
.PHONY: snapshots-fix
|
|
29
|
+
snapshots-fix:
|
|
30
|
+
uv run pytest --inline-snapshot=fix
|
|
31
|
+
|
|
32
|
+
.PHONY: snapshots-create
|
|
33
|
+
snapshots-create:
|
|
34
|
+
uv run pytest --inline-snapshot=create
|
|
35
|
+
|
|
21
36
|
.PHONY: old_version_tests
|
|
22
37
|
old_version_tests:
|
|
23
38
|
UV_PROJECT_ENVIRONMENT=.venv_39 uv run --python 3.9 -m pytest
|
|
@@ -34,4 +49,6 @@ serve-docs:
|
|
|
34
49
|
.PHONY: deploy-docs
|
|
35
50
|
deploy-docs:
|
|
36
51
|
uv run mkdocs gh-deploy --force --verbose
|
|
52
|
+
|
|
53
|
+
|
|
37
54
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.6
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://github.com/openai/openai-agents-python
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -19,11 +19,14 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
|
19
19
|
Classifier: Typing :: Typed
|
|
20
20
|
Requires-Python: >=3.9
|
|
21
21
|
Requires-Dist: griffe<2,>=1.5.6
|
|
22
|
-
Requires-Dist: openai>=1.66.
|
|
22
|
+
Requires-Dist: openai>=1.66.5
|
|
23
23
|
Requires-Dist: pydantic<3,>=2.10
|
|
24
24
|
Requires-Dist: requests<3,>=2.0
|
|
25
25
|
Requires-Dist: types-requests<3,>=2.0
|
|
26
26
|
Requires-Dist: typing-extensions<5,>=4.12.2
|
|
27
|
+
Provides-Extra: voice
|
|
28
|
+
Requires-Dist: numpy<3,>=2.2.0; (python_version >= '3.10') and extra == 'voice'
|
|
29
|
+
Requires-Dist: websockets<16,>=15.0; extra == 'voice'
|
|
27
30
|
Description-Content-Type: text/markdown
|
|
28
31
|
|
|
29
32
|
# OpenAI Agents SDK
|
|
@@ -35,7 +38,7 @@ The OpenAI Agents SDK is a lightweight yet powerful framework for building multi
|
|
|
35
38
|
### Core concepts:
|
|
36
39
|
|
|
37
40
|
1. [**Agents**](https://openai.github.io/openai-agents-python/agents): LLMs configured with instructions, tools, guardrails, and handoffs
|
|
38
|
-
2. [**Handoffs**](https://openai.github.io/openai-agents-python/handoffs/):
|
|
41
|
+
2. [**Handoffs**](https://openai.github.io/openai-agents-python/handoffs/): A specialized tool call used by the Agents SDK for transferring control between agents
|
|
39
42
|
3. [**Guardrails**](https://openai.github.io/openai-agents-python/guardrails/): Configurable safety checks for input and output validation
|
|
40
43
|
4. [**Tracing**](https://openai.github.io/openai-agents-python/tracing/): Built-in tracking of agent runs, allowing you to view, debug and optimize your workflows
|
|
41
44
|
|
|
@@ -58,6 +61,8 @@ source env/bin/activate
|
|
|
58
61
|
pip install openai-agents
|
|
59
62
|
```
|
|
60
63
|
|
|
64
|
+
For voice support, install with the optional `voice` group: `pip install openai-agents[voice]`.
|
|
65
|
+
|
|
61
66
|
## Hello world example
|
|
62
67
|
|
|
63
68
|
```python
|
|
@@ -170,7 +175,7 @@ The Agents SDK is designed to be highly flexible, allowing you to model a wide r
|
|
|
170
175
|
|
|
171
176
|
## Tracing
|
|
172
177
|
|
|
173
|
-
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk), [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration), and [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing).
|
|
178
|
+
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk), [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration), and [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing), which also includes a larger list of [external tracing processors](http://openai.github.io/openai-agents-python/tracing/#external-tracing-processors-list).
|
|
174
179
|
|
|
175
180
|
## Development (only needed if you need to edit the SDK/examples)
|
|
176
181
|
|
|
@@ -7,7 +7,7 @@ The OpenAI Agents SDK is a lightweight yet powerful framework for building multi
|
|
|
7
7
|
### Core concepts:
|
|
8
8
|
|
|
9
9
|
1. [**Agents**](https://openai.github.io/openai-agents-python/agents): LLMs configured with instructions, tools, guardrails, and handoffs
|
|
10
|
-
2. [**Handoffs**](https://openai.github.io/openai-agents-python/handoffs/):
|
|
10
|
+
2. [**Handoffs**](https://openai.github.io/openai-agents-python/handoffs/): A specialized tool call used by the Agents SDK for transferring control between agents
|
|
11
11
|
3. [**Guardrails**](https://openai.github.io/openai-agents-python/guardrails/): Configurable safety checks for input and output validation
|
|
12
12
|
4. [**Tracing**](https://openai.github.io/openai-agents-python/tracing/): Built-in tracking of agent runs, allowing you to view, debug and optimize your workflows
|
|
13
13
|
|
|
@@ -30,6 +30,8 @@ source env/bin/activate
|
|
|
30
30
|
pip install openai-agents
|
|
31
31
|
```
|
|
32
32
|
|
|
33
|
+
For voice support, install with the optional `voice` group: `pip install openai-agents[voice]`.
|
|
34
|
+
|
|
33
35
|
## Hello world example
|
|
34
36
|
|
|
35
37
|
```python
|
|
@@ -142,7 +144,7 @@ The Agents SDK is designed to be highly flexible, allowing you to model a wide r
|
|
|
142
144
|
|
|
143
145
|
## Tracing
|
|
144
146
|
|
|
145
|
-
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk), [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration), and [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing).
|
|
147
|
+
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk), [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration), and [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing), which also includes a larger list of [external tracing processors](http://openai.github.io/openai-agents-python/tracing/#external-tracing-processors-list).
|
|
146
148
|
|
|
147
149
|
## Development (only needed if you need to edit the SDK/examples)
|
|
148
150
|
|
|
@@ -130,3 +130,16 @@ robot_agent = pirate_agent.clone(
|
|
|
130
130
|
instructions="Write like a robot",
|
|
131
131
|
)
|
|
132
132
|
```
|
|
133
|
+
|
|
134
|
+
## Forcing tool use
|
|
135
|
+
|
|
136
|
+
Supplying a list of tools doesn't always mean the LLM will use a tool. You can force tool use by setting [`ModelSettings.tool_choice`][agents.model_settings.ModelSettings.tool_choice]. Valid values are:
|
|
137
|
+
|
|
138
|
+
1. `auto`, which allows the LLM to decide whether or not to use a tool.
|
|
139
|
+
2. `required`, which requires the LLM to use a tool (but it can intelligently decide which tool).
|
|
140
|
+
3. `none`, which requires the LLM to _not_ use a tool.
|
|
141
|
+
4. Setting a specific string e.g. `my_tool`, which requires the LLM to use that specific tool.
|
|
142
|
+
|
|
143
|
+
!!! note
|
|
144
|
+
|
|
145
|
+
If requiring tool use, you should consider setting [`Agent.tool_use_behavior`] to stop the Agent from running when a tool output is produced. Otherwise, the Agent might run in an infinite loop, where the LLM produces a tool call , and the tool result is sent to the LLM, and this infinite loops because the LLM is always forced to use a tool.
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# Examples
|
|
2
|
+
|
|
3
|
+
Check out a variety of sample implementations of the SDK in the examples section of the [repo](https://github.com/openai/openai-agents-python/tree/main/examples). The examples are organized into several categories that demonstrate different patterns and capabilities.
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
## Categories
|
|
7
|
+
|
|
8
|
+
- **agent_patterns:**
|
|
9
|
+
Examples in this category illustrate common agent design patterns, such as
|
|
10
|
+
|
|
11
|
+
- Deterministic workflows
|
|
12
|
+
- Agents as tools
|
|
13
|
+
- Parallel agent execution
|
|
14
|
+
|
|
15
|
+
- **basic:**
|
|
16
|
+
These examples showcase foundational capabilities of the SDK, such as
|
|
17
|
+
|
|
18
|
+
- Dynamic system prompts
|
|
19
|
+
- Streaming outputs
|
|
20
|
+
- Lifecycle events
|
|
21
|
+
|
|
22
|
+
- **tool examples:**
|
|
23
|
+
Learn how to implement OAI hosted tools such as web search and file search,
|
|
24
|
+
and integrate them into your agents.
|
|
25
|
+
|
|
26
|
+
- **model providers:**
|
|
27
|
+
Explore how to use non-OpenAI models with the SDK.
|
|
28
|
+
|
|
29
|
+
- **handoffs:**
|
|
30
|
+
See practical examples of agent handoffs.
|
|
31
|
+
|
|
32
|
+
- **customer_service** and **research_bot:**
|
|
33
|
+
Two more built-out examples that illustrate real-world applications
|
|
34
|
+
|
|
35
|
+
- **customer_service**: Example customer service system for an airline.
|
|
36
|
+
- **research_bot**: Simple deep research clone.
|
|
@@ -64,3 +64,30 @@ In cases where you do not have an API key from `platform.openai.com`, we recomme
|
|
|
64
64
|
!!! note
|
|
65
65
|
|
|
66
66
|
In these examples, we use the Chat Completions API/model, because most LLM providers don't yet support the Responses API. If your LLM provider does support it, we recommend using Responses.
|
|
67
|
+
|
|
68
|
+
## Common issues with using other LLM providers
|
|
69
|
+
|
|
70
|
+
### Tracing client error 401
|
|
71
|
+
|
|
72
|
+
If you get errors related to tracing, this is because traces are uploaded to OpenAI servers, and you don't have an OpenAI API key. You have three options to resolve this:
|
|
73
|
+
|
|
74
|
+
1. Disable tracing entirely: [`set_tracing_disabled(True)`][agents.set_tracing_disabled].
|
|
75
|
+
2. Set an OpenAI key for tracing: [`set_tracing_export_api_key(...)`][agents.set_tracing_export_api_key]. This API key will only be used for uploading traces, and must be from [platform.openai.com](https://platform.openai.com/).
|
|
76
|
+
3. Use a non-OpenAI trace processor. See the [tracing docs](tracing.md#custom-tracing-processors).
|
|
77
|
+
|
|
78
|
+
### Responses API support
|
|
79
|
+
|
|
80
|
+
The SDK uses the Responses API by default, but most other LLM providers don't yet support it. You may see 404s or similar issues as a result. To resolve, you have two options:
|
|
81
|
+
|
|
82
|
+
1. Call [`set_default_openai_api("chat_completions")`][agents.set_default_openai_api]. This works if you are setting `OPENAI_API_KEY` and `OPENAI_BASE_URL` via environment vars.
|
|
83
|
+
2. Use [`OpenAIChatCompletionsModel`][agents.models.openai_chatcompletions.OpenAIChatCompletionsModel]. There are examples [here](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/).
|
|
84
|
+
|
|
85
|
+
### Structured outputs support
|
|
86
|
+
|
|
87
|
+
Some model providers don't have support for [structured outputs](https://platform.openai.com/docs/guides/structured-outputs). This sometimes results in an error that looks something like this:
|
|
88
|
+
|
|
89
|
+
```
|
|
90
|
+
BadRequestError: Error code: 400 - {'error': {'message': "'response_format.type' : value is not one of the allowed values ['text','json_object']", 'type': 'invalid_request_error'}}
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
This is a shortcoming of some model providers - they support JSON outputs, but don't allow you to specify the `json_schema` to use for the output. We are working on a fix for this, but we suggest relying on providers that do have support for JSON schema output, because otherwise your app will often break because of malformed JSON.
|
|
@@ -9,6 +9,8 @@ The Agents SDK includes built-in tracing, collecting a comprehensive record of e
|
|
|
9
9
|
1. You can globally disable tracing by setting the env var `OPENAI_AGENTS_DISABLE_TRACING=1`
|
|
10
10
|
2. You can disable tracing for a single run by setting [`agents.run.RunConfig.tracing_disabled`][] to `True`
|
|
11
11
|
|
|
12
|
+
***For organizations operating under a Zero Data Retention (ZDR) policy using OpenAI's APIs, tracing is unavailable.***
|
|
13
|
+
|
|
12
14
|
## Traces and spans
|
|
13
15
|
|
|
14
16
|
- **Traces** represent a single end-to-end operation of a "workflow". They're composed of Spans. Traces have the following properties:
|
|
@@ -88,10 +90,15 @@ To customize this default setup, to send traces to alternative or additional bac
|
|
|
88
90
|
1. [`add_trace_processor()`][agents.tracing.add_trace_processor] lets you add an **additional** trace processor that will receive traces and spans as they are ready. This lets you do your own processing in addition to sending traces to OpenAI's backend.
|
|
89
91
|
2. [`set_trace_processors()`][agents.tracing.set_trace_processors] lets you **replace** the default processors with your own trace processors. This means traces will not be sent to the OpenAI backend unless you include a `TracingProcessor` that does so.
|
|
90
92
|
|
|
91
|
-
External
|
|
93
|
+
## External tracing processors list
|
|
92
94
|
|
|
95
|
+
- [Arize-Phoenix](https://docs.arize.com/phoenix/tracing/integrations-tracing/openai-agents-sdk)
|
|
96
|
+
- [MLflow](https://mlflow.org/docs/latest/tracing/integrations/openai-agent)
|
|
93
97
|
- [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk)
|
|
94
98
|
- [Pydantic Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents)
|
|
95
99
|
- [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk)
|
|
96
|
-
- [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration)
|
|
100
|
+
- [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration)
|
|
97
101
|
- [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent)
|
|
102
|
+
- [LangSmith](https://docs.smith.langchain.com/observability/how_to_guides/trace_with_openai_agents_sdk)
|
|
103
|
+
- [Maxim AI](https://www.getmaxim.ai/docs/observe/integrations/openai-agents-sdk)
|
|
104
|
+
- [Comet Opik](https://www.comet.com/docs/opik/tracing/integrations/openai_agents)
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# Pipelines and workflows
|
|
2
|
+
|
|
3
|
+
[`VoicePipeline`][agents.voice.pipeline.VoicePipeline] is a class that makes it easy to turn your agentic workflows into a voice app. You pass in a workflow to run, and the pipeline takes care of transcribing input audio, detecting when the audio ends, calling your workflow at the right time, and turning the workflow output back into audio.
|
|
4
|
+
|
|
5
|
+
```mermaid
|
|
6
|
+
graph LR
|
|
7
|
+
%% Input
|
|
8
|
+
A["🎤 Audio Input"]
|
|
9
|
+
|
|
10
|
+
%% Voice Pipeline
|
|
11
|
+
subgraph Voice_Pipeline [Voice Pipeline]
|
|
12
|
+
direction TB
|
|
13
|
+
B["Transcribe (speech-to-text)"]
|
|
14
|
+
C["Your Code"]:::highlight
|
|
15
|
+
D["Text-to-speech"]
|
|
16
|
+
B --> C --> D
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
%% Output
|
|
20
|
+
E["🎧 Audio Output"]
|
|
21
|
+
|
|
22
|
+
%% Flow
|
|
23
|
+
A --> Voice_Pipeline
|
|
24
|
+
Voice_Pipeline --> E
|
|
25
|
+
|
|
26
|
+
%% Custom styling
|
|
27
|
+
classDef highlight fill:#ffcc66,stroke:#333,stroke-width:1px,font-weight:700;
|
|
28
|
+
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
## Configuring a pipeline
|
|
32
|
+
|
|
33
|
+
When you create a pipeline, you can set a few things:
|
|
34
|
+
|
|
35
|
+
1. The [`workflow`][agents.voice.workflow.VoiceWorkflowBase], which is the code that runs each time new audio is transcribed.
|
|
36
|
+
2. The [`speech-to-text`][agents.voice.model.STTModel] and [`text-to-speech`][agents.voice.model.TTSModel] models used
|
|
37
|
+
3. The [`config`][agents.voice.pipeline_config.VoicePipelineConfig], which lets you configure things like:
|
|
38
|
+
- A model provider, which can map model names to models
|
|
39
|
+
- Tracing, including whether to disable tracing, whether audio files are uploaded, the workflow name, trace IDs etc.
|
|
40
|
+
- Settings on the TTS and STT models, like the prompt, language and data types used.
|
|
41
|
+
|
|
42
|
+
## Running a pipeline
|
|
43
|
+
|
|
44
|
+
You can run a pipeline via the [`run()`][agents.voice.pipeline.VoicePipeline.run] method, which lets you pass in audio input in two forms:
|
|
45
|
+
|
|
46
|
+
1. [`AudioInput`][agents.voice.input.AudioInput] is used when you have a full audio transcript, and just want to produce a result for it. This is useful in cases where you don't need to detect when a speaker is done speaking; for example, when you have pre-recorded audio or in push-to-talk apps where it's clear when the user is done speaking.
|
|
47
|
+
2. [`StreamedAudioInput`][agents.voice.input.StreamedAudioInput] is used when you might need to detect when a user is done speaking. It allows you to push audio chunks as they are detected, and the voice pipeline will automatically run the agent workflow at the right time, via a process called "activity detection".
|
|
48
|
+
|
|
49
|
+
## Results
|
|
50
|
+
|
|
51
|
+
The result of a voice pipeline run is a [`StreamedAudioResult`][agents.voice.result.StreamedAudioResult]. This is an object that lets you stream events as they occur. There are a few kinds of [`VoiceStreamEvent`][agents.voice.events.VoiceStreamEvent], including:
|
|
52
|
+
|
|
53
|
+
1. [`VoiceStreamEventAudio`][agents.voice.events.VoiceStreamEventAudio], which contains a chunk of audio.
|
|
54
|
+
2. [`VoiceStreamEventLifecycle`][agents.voice.events.VoiceStreamEventLifecycle], which informs you of lifecycle events like a turn starting or ending.
|
|
55
|
+
3. [`VoiceStreamEventError`][agents.voice.events.VoiceStreamEventError], is an error event.
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
|
|
59
|
+
result = await pipeline.run(input)
|
|
60
|
+
|
|
61
|
+
async for event in result.stream():
|
|
62
|
+
if event.type == "voice_stream_event_audio":
|
|
63
|
+
# play audio
|
|
64
|
+
elif event.type == "voice_stream_event_lifecycle":
|
|
65
|
+
# lifecycle
|
|
66
|
+
elif event.type == "voice_stream_event_error"
|
|
67
|
+
# error
|
|
68
|
+
...
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
## Best practices
|
|
72
|
+
|
|
73
|
+
### Interruptions
|
|
74
|
+
|
|
75
|
+
The Agents SDK currently does not support any built-in interruptions support for [`StreamedAudioInput`][agents.voice.input.StreamedAudioInput]. Instead for every detected turn it will trigger a separate run of your workflow. If you want to handle interruptions inside your application you can listen to the [`VoiceStreamEventLifecycle`][agents.voice.events.VoiceStreamEventLifecycle] events. `turn_started` will indicate that a new turn was transcribed and processing is beginning. `turn_ended` will trigger after all the audio was dispatched for a respective turn. You could use these events to mute the microphone of the speaker when the model starts a turn and unmute it after you flushed all the related audio for a turn.
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
# Quickstart
|
|
2
|
+
|
|
3
|
+
## Prerequisites
|
|
4
|
+
|
|
5
|
+
Make sure you've followed the base [quickstart instructions](../quickstart.md) for the Agents SDK, and set up a virtual environment. Then, install the optional voice dependencies from the SDK:
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install openai-agents[voice]
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Concepts
|
|
12
|
+
|
|
13
|
+
The main concept to know about is a [`VoicePipeline`][agents.voice.pipeline.VoicePipeline], which is a 3 step process:
|
|
14
|
+
|
|
15
|
+
1. Run a speech-to-text model to turn audio into text.
|
|
16
|
+
2. Run your code, which is usually an agentic workflow, to produce a result.
|
|
17
|
+
3. Run a text-to-speech model to turn the result text back into audio.
|
|
18
|
+
|
|
19
|
+
```mermaid
|
|
20
|
+
graph LR
|
|
21
|
+
%% Input
|
|
22
|
+
A["🎤 Audio Input"]
|
|
23
|
+
|
|
24
|
+
%% Voice Pipeline
|
|
25
|
+
subgraph Voice_Pipeline [Voice Pipeline]
|
|
26
|
+
direction TB
|
|
27
|
+
B["Transcribe (speech-to-text)"]
|
|
28
|
+
C["Your Code"]:::highlight
|
|
29
|
+
D["Text-to-speech"]
|
|
30
|
+
B --> C --> D
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
%% Output
|
|
34
|
+
E["🎧 Audio Output"]
|
|
35
|
+
|
|
36
|
+
%% Flow
|
|
37
|
+
A --> Voice_Pipeline
|
|
38
|
+
Voice_Pipeline --> E
|
|
39
|
+
|
|
40
|
+
%% Custom styling
|
|
41
|
+
classDef highlight fill:#ffcc66,stroke:#333,stroke-width:1px,font-weight:700;
|
|
42
|
+
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## Agents
|
|
46
|
+
|
|
47
|
+
First, let's set up some Agents. This should feel familiar to you if you've built any agents with this SDK. We'll have a couple of Agents, a handoff, and a tool.
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
import asyncio
|
|
51
|
+
import random
|
|
52
|
+
|
|
53
|
+
from agents import (
|
|
54
|
+
Agent,
|
|
55
|
+
function_tool,
|
|
56
|
+
)
|
|
57
|
+
from agents.extensions.handoff_prompt import prompt_with_handoff_instructions
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@function_tool
|
|
62
|
+
def get_weather(city: str) -> str:
|
|
63
|
+
"""Get the weather for a given city."""
|
|
64
|
+
print(f"[debug] get_weather called with city: {city}")
|
|
65
|
+
choices = ["sunny", "cloudy", "rainy", "snowy"]
|
|
66
|
+
return f"The weather in {city} is {random.choice(choices)}."
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
spanish_agent = Agent(
|
|
70
|
+
name="Spanish",
|
|
71
|
+
handoff_description="A spanish speaking agent.",
|
|
72
|
+
instructions=prompt_with_handoff_instructions(
|
|
73
|
+
"You're speaking to a human, so be polite and concise. Speak in Spanish.",
|
|
74
|
+
),
|
|
75
|
+
model="gpt-4o-mini",
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
agent = Agent(
|
|
79
|
+
name="Assistant",
|
|
80
|
+
instructions=prompt_with_handoff_instructions(
|
|
81
|
+
"You're speaking to a human, so be polite and concise. If the user speaks in Spanish, handoff to the spanish agent.",
|
|
82
|
+
),
|
|
83
|
+
model="gpt-4o-mini",
|
|
84
|
+
handoffs=[spanish_agent],
|
|
85
|
+
tools=[get_weather],
|
|
86
|
+
)
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
## Voice pipeline
|
|
90
|
+
|
|
91
|
+
We'll set up a simple voice pipeline, using [`SingleAgentVoiceWorkflow`][agents.voice.workflow.SingleAgentVoiceWorkflow] as the workflow.
|
|
92
|
+
|
|
93
|
+
```python
|
|
94
|
+
from agents import SingleAgentVoiceWorkflow, VoicePipeline,
|
|
95
|
+
pipeline = VoicePipeline(workflow=SingleAgentVoiceWorkflow(agent))
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## Run the pipeline
|
|
99
|
+
|
|
100
|
+
```python
|
|
101
|
+
import numpy as np
|
|
102
|
+
import sounddevice as sd
|
|
103
|
+
|
|
104
|
+
# For simplicity, we'll just create 3 seconds of silence
|
|
105
|
+
# In reality, you'd get microphone data
|
|
106
|
+
audio = np.zeros(24000 * 3, dtype=np.int16)
|
|
107
|
+
result = await pipeline.run(audio_input)
|
|
108
|
+
|
|
109
|
+
# Create an audio player using `sounddevice`
|
|
110
|
+
player = sd.OutputStream(samplerate=24000, channels=1, dtype=np.int16)
|
|
111
|
+
player.start()
|
|
112
|
+
|
|
113
|
+
# Play the audio stream as it comes in
|
|
114
|
+
async for event in result.stream():
|
|
115
|
+
if event.type == "voice_stream_event_audio":
|
|
116
|
+
player.write(event.data)
|
|
117
|
+
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
## Put it all together
|
|
121
|
+
|
|
122
|
+
```python
|
|
123
|
+
import asyncio
|
|
124
|
+
import random
|
|
125
|
+
|
|
126
|
+
import numpy as np
|
|
127
|
+
import sounddevice as sd
|
|
128
|
+
|
|
129
|
+
from agents import (
|
|
130
|
+
Agent,
|
|
131
|
+
AudioInput,
|
|
132
|
+
SingleAgentVoiceWorkflow,
|
|
133
|
+
VoicePipeline,
|
|
134
|
+
function_tool,
|
|
135
|
+
set_tracing_disabled,
|
|
136
|
+
)
|
|
137
|
+
from agents.extensions.handoff_prompt import prompt_with_handoff_instructions
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
@function_tool
|
|
141
|
+
def get_weather(city: str) -> str:
|
|
142
|
+
"""Get the weather for a given city."""
|
|
143
|
+
print(f"[debug] get_weather called with city: {city}")
|
|
144
|
+
choices = ["sunny", "cloudy", "rainy", "snowy"]
|
|
145
|
+
return f"The weather in {city} is {random.choice(choices)}."
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
spanish_agent = Agent(
|
|
149
|
+
name="Spanish",
|
|
150
|
+
handoff_description="A spanish speaking agent.",
|
|
151
|
+
instructions=prompt_with_handoff_instructions(
|
|
152
|
+
"You're speaking to a human, so be polite and concise. Speak in Spanish.",
|
|
153
|
+
),
|
|
154
|
+
model="gpt-4o-mini",
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
agent = Agent(
|
|
158
|
+
name="Assistant",
|
|
159
|
+
instructions=prompt_with_handoff_instructions(
|
|
160
|
+
"You're speaking to a human, so be polite and concise. If the user speaks in Spanish, handoff to the spanish agent.",
|
|
161
|
+
),
|
|
162
|
+
model="gpt-4o-mini",
|
|
163
|
+
handoffs=[spanish_agent],
|
|
164
|
+
tools=[get_weather],
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
async def main():
|
|
169
|
+
pipeline = VoicePipeline(workflow=SingleAgentVoiceWorkflow(agent))
|
|
170
|
+
buffer = np.zeros(24000 * 3, dtype=np.int16)
|
|
171
|
+
audio_input = AudioInput(buffer=buffer)
|
|
172
|
+
|
|
173
|
+
result = await pipeline.run(audio_input)
|
|
174
|
+
|
|
175
|
+
# Create an audio player using `sounddevice`
|
|
176
|
+
player = sd.OutputStream(samplerate=24000, channels=1, dtype=np.int16)
|
|
177
|
+
player.start()
|
|
178
|
+
|
|
179
|
+
# Play the audio stream as it comes in
|
|
180
|
+
async for event in result.stream():
|
|
181
|
+
if event.type == "voice_stream_event_audio":
|
|
182
|
+
player.write(event.data)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
if __name__ == "__main__":
|
|
186
|
+
asyncio.run(main())
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
If you run this example, the agent will speak to you! Check out the example in [examples/voice/static](https://github.com/openai/openai-agents-python/tree/main/examples/voice/static) to see a demo where you can speak to the agent yourself.
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# Tracing
|
|
2
|
+
|
|
3
|
+
Just like the way [agents are traced](../tracing.md), voice pipelines are also automatically traced.
|
|
4
|
+
|
|
5
|
+
You can read the tracing doc above for basic tracing information, but you can additionally configure tracing of a pipeline via [`VoicePipelineConfig`][agents.voice.pipeline_config.VoicePipelineConfig].
|
|
6
|
+
|
|
7
|
+
Key tracing related fields are:
|
|
8
|
+
|
|
9
|
+
- [`tracing_disabled`][agents.voice.pipeline_config.VoicePipelineConfig.tracing_disabled]: controls whether tracing is disabled. By default, tracing is enabled.
|
|
10
|
+
- [`trace_include_sensitive_data`][agents.voice.pipeline_config.VoicePipelineConfig.trace_include_sensitive_data]: controls whether traces include potentially sensitive data, like audio transcripts. This is specifically for the voice pipeline, and not for anything that goes on inside your Workflow.
|
|
11
|
+
- [`trace_include_sensitive_audio_data`][agents.voice.pipeline_config.VoicePipelineConfig.trace_include_sensitive_audio_data]: controls whether traces include audio data.
|
|
12
|
+
- [`workflow_name`][agents.voice.pipeline_config.VoicePipelineConfig.workflow_name]: The name of the trace workflow.
|
|
13
|
+
- [`group_id`][agents.voice.pipeline_config.VoicePipelineConfig.group_id]: The `group_id` of the trace, which lets you link multiple traces.
|
|
14
|
+
- [`trace_metadata`][agents.voice.pipeline_config.VoicePipelineConfig.tracing_disabled]: Additional metadata to include with the trace.
|