openai-agents 0.0.5__tar.gz → 0.0.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- {openai_agents-0.0.5 → openai_agents-0.0.6}/PKG-INFO +6 -1
- {openai_agents-0.0.5 → openai_agents-0.0.6}/README.md +2 -0
- openai_agents-0.0.6/docs/examples.md +36 -0
- openai_agents-0.0.6/docs/ref/voice/events.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/exceptions.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/input.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/model.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/models/openai_provider.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/models/openai_stt.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/models/openai_tts.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/pipeline.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/pipeline_config.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/result.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/utils.md +3 -0
- openai_agents-0.0.6/docs/ref/voice/workflow.md +3 -0
- openai_agents-0.0.6/docs/voice/pipeline.md +75 -0
- openai_agents-0.0.6/docs/voice/quickstart.md +189 -0
- openai_agents-0.0.6/docs/voice/tracing.md +14 -0
- openai_agents-0.0.6/examples/financial_research_agent/README.md +38 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/financials_agent.py +23 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/planner_agent.py +35 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/risk_agent.py +22 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/search_agent.py +18 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/verifier_agent.py +27 -0
- openai_agents-0.0.6/examples/financial_research_agent/agents/writer_agent.py +34 -0
- openai_agents-0.0.6/examples/financial_research_agent/main.py +17 -0
- openai_agents-0.0.6/examples/financial_research_agent/manager.py +135 -0
- openai_agents-0.0.6/examples/financial_research_agent/printer.py +46 -0
- openai_agents-0.0.6/examples/voice/static/README.md +26 -0
- openai_agents-0.0.6/examples/voice/static/main.py +83 -0
- openai_agents-0.0.6/examples/voice/static/util.py +68 -0
- openai_agents-0.0.6/examples/voice/streamed/README.md +25 -0
- openai_agents-0.0.6/examples/voice/streamed/__init__.py +0 -0
- openai_agents-0.0.6/examples/voice/streamed/agents.py +81 -0
- openai_agents-0.0.6/examples/voice/streamed/main.py +221 -0
- openai_agents-0.0.6/mkdocs.yml +143 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/pyproject.toml +21 -13
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/__init__.py +12 -0
- openai_agents-0.0.6/src/agents/extensions/__init__.py +0 -0
- openai_agents-0.0.6/src/agents/models/__init__.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/models/openai_provider.py +13 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tracing/__init__.py +12 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tracing/create.py +121 -1
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tracing/span_data.py +96 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tracing/util.py +5 -0
- openai_agents-0.0.6/src/agents/util/__init__.py +0 -0
- openai_agents-0.0.6/src/agents/voice/__init__.py +51 -0
- openai_agents-0.0.6/src/agents/voice/events.py +47 -0
- openai_agents-0.0.6/src/agents/voice/exceptions.py +8 -0
- openai_agents-0.0.6/src/agents/voice/imports.py +11 -0
- openai_agents-0.0.6/src/agents/voice/input.py +88 -0
- openai_agents-0.0.6/src/agents/voice/model.py +193 -0
- openai_agents-0.0.6/src/agents/voice/models/__init__.py +0 -0
- openai_agents-0.0.6/src/agents/voice/models/openai_model_provider.py +97 -0
- openai_agents-0.0.6/src/agents/voice/models/openai_stt.py +457 -0
- openai_agents-0.0.6/src/agents/voice/models/openai_tts.py +54 -0
- openai_agents-0.0.6/src/agents/voice/pipeline.py +151 -0
- openai_agents-0.0.6/src/agents/voice/pipeline_config.py +46 -0
- openai_agents-0.0.6/src/agents/voice/result.py +287 -0
- openai_agents-0.0.6/src/agents/voice/utils.py +37 -0
- openai_agents-0.0.6/src/agents/voice/workflow.py +93 -0
- openai_agents-0.0.6/tests/__init__.py +0 -0
- openai_agents-0.0.6/tests/voice/__init__.py +0 -0
- openai_agents-0.0.6/tests/voice/conftest.py +14 -0
- openai_agents-0.0.6/tests/voice/fake_models.py +115 -0
- openai_agents-0.0.6/tests/voice/helpers.py +21 -0
- openai_agents-0.0.6/tests/voice/test_input.py +127 -0
- openai_agents-0.0.6/tests/voice/test_openai_stt.py +369 -0
- openai_agents-0.0.6/tests/voice/test_openai_tts.py +94 -0
- openai_agents-0.0.6/tests/voice/test_pipeline.py +179 -0
- openai_agents-0.0.6/tests/voice/test_workflow.py +188 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/uv.lock +523 -20
- openai_agents-0.0.5/mkdocs.yml +0 -121
- {openai_agents-0.0.5 → openai_agents-0.0.6}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/.github/ISSUE_TEMPLATE/model_provider.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/.github/ISSUE_TEMPLATE/question.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/.github/workflows/docs.yml +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/.github/workflows/issues.yml +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/.github/workflows/publish.yml +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/.github/workflows/tests.yml +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/.gitignore +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/.prettierrc +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/LICENSE +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/Makefile +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/agents.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/assets/images/favicon-platform.svg +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/assets/images/orchestration.png +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/assets/logo.svg +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/config.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/context.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/guardrails.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/handoffs.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/index.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/models.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/multi_agent.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/quickstart.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/agent.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/agent_output.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/exceptions.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/extensions/handoff_filters.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/extensions/handoff_prompt.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/function_schema.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/guardrail.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/handoffs.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/index.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/items.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/lifecycle.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/model_settings.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/models/interface.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/models/openai_chatcompletions.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/models/openai_responses.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/result.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/run.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/run_context.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/stream_events.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/tool.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/tracing/create.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/tracing/index.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/tracing/processor_interface.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/tracing/processors.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/tracing/scope.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/tracing/setup.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/tracing/span_data.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/tracing/spans.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/tracing/traces.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/tracing/util.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/ref/usage.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/results.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/running_agents.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/streaming.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/stylesheets/extra.css +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/tools.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/docs/tracing.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/__init__.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/agent_patterns/README.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/agent_patterns/agents_as_tools.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/agent_patterns/deterministic.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/agent_patterns/forcing_tool_use.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/agent_patterns/input_guardrails.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/agent_patterns/llm_as_a_judge.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/agent_patterns/output_guardrails.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/agent_patterns/parallelization.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/agent_patterns/routing.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/basic/agent_lifecycle_example.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/basic/dynamic_system_prompt.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/basic/hello_world.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/basic/hello_world_jupyter.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/basic/lifecycle_example.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/basic/stream_items.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/basic/stream_text.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/basic/tools.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/customer_service/main.py +0 -0
- {openai_agents-0.0.5/examples/research_bot/agents → openai_agents-0.0.6/examples/financial_research_agent}/__init__.py +0 -0
- {openai_agents-0.0.5/src/agents/extensions → openai_agents-0.0.6/examples/financial_research_agent/agents}/__init__.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/handoffs/message_filter.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/handoffs/message_filter_streaming.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/model_providers/README.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/model_providers/custom_example_agent.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/model_providers/custom_example_global.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/model_providers/custom_example_provider.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/README.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/__init__.py +0 -0
- {openai_agents-0.0.5/src/agents/models → openai_agents-0.0.6/examples/research_bot/agents}/__init__.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/agents/planner_agent.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/agents/search_agent.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/agents/writer_agent.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/main.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/manager.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/printer.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/sample_outputs/product_recs.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/sample_outputs/product_recs.txt +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/sample_outputs/vacation.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/research_bot/sample_outputs/vacation.txt +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/tools/computer_use.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/tools/file_search.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/examples/tools/web_search.py +0 -0
- {openai_agents-0.0.5/src/agents/util → openai_agents-0.0.6/examples/voice}/__init__.py +0 -0
- {openai_agents-0.0.5/tests → openai_agents-0.0.6/examples/voice/static}/__init__.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/_config.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/_debug.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/_run_impl.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/agent.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/agent_output.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/computer.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/exceptions.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/extensions/handoff_filters.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/extensions/handoff_prompt.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/function_schema.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/guardrail.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/handoffs.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/items.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/lifecycle.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/logger.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/model_settings.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/models/_openai_shared.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/models/fake_id.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/models/interface.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/models/openai_chatcompletions.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/models/openai_responses.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/result.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/run.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/run_context.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/stream_events.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/strict_schema.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tool.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tracing/logger.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tracing/processor_interface.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tracing/processors.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tracing/scope.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tracing/setup.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tracing/spans.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/tracing/traces.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/usage.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/util/_coro.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/util/_error_tracing.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/util/_json.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/util/_pretty_print.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/util/_transforms.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/util/_types.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/src/agents/version.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/README.md +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/conftest.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/fake_model.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_agent_config.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_agent_hooks.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_agent_runner.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_agent_runner_streamed.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_agent_tracing.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_computer_action.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_config.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_doc_parsing.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_extension_filters.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_function_schema.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_function_tool.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_function_tool_decorator.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_global_hooks.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_guardrails.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_handoff_tool.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_items_helpers.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_max_turns.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_openai_chatcompletions.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_openai_chatcompletions_converter.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_openai_chatcompletions_stream.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_openai_responses_converter.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_output_tool.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_pretty_print.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_responses.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_responses_tracing.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_result_cast.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_run_config.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_run_step_execution.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_run_step_processing.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_strict_schema.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_tool_converter.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_tool_use_behavior.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_trace_processor.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_tracing.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_tracing_errors.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/test_tracing_errors_streamed.py +0 -0
- {openai_agents-0.0.5 → openai_agents-0.0.6}/tests/testing_processor.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.6
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://github.com/openai/openai-agents-python
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -24,6 +24,9 @@ Requires-Dist: pydantic<3,>=2.10
|
|
|
24
24
|
Requires-Dist: requests<3,>=2.0
|
|
25
25
|
Requires-Dist: types-requests<3,>=2.0
|
|
26
26
|
Requires-Dist: typing-extensions<5,>=4.12.2
|
|
27
|
+
Provides-Extra: voice
|
|
28
|
+
Requires-Dist: numpy<3,>=2.2.0; (python_version >= '3.10') and extra == 'voice'
|
|
29
|
+
Requires-Dist: websockets<16,>=15.0; extra == 'voice'
|
|
27
30
|
Description-Content-Type: text/markdown
|
|
28
31
|
|
|
29
32
|
# OpenAI Agents SDK
|
|
@@ -58,6 +61,8 @@ source env/bin/activate
|
|
|
58
61
|
pip install openai-agents
|
|
59
62
|
```
|
|
60
63
|
|
|
64
|
+
For voice support, install with the optional `voice` group: `pip install openai-agents[voice]`.
|
|
65
|
+
|
|
61
66
|
## Hello world example
|
|
62
67
|
|
|
63
68
|
```python
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# Examples
|
|
2
|
+
|
|
3
|
+
Check out a variety of sample implementations of the SDK in the examples section of the [repo](https://github.com/openai/openai-agents-python/tree/main/examples). The examples are organized into several categories that demonstrate different patterns and capabilities.
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
## Categories
|
|
7
|
+
|
|
8
|
+
- **agent_patterns:**
|
|
9
|
+
Examples in this category illustrate common agent design patterns, such as
|
|
10
|
+
|
|
11
|
+
- Deterministic workflows
|
|
12
|
+
- Agents as tools
|
|
13
|
+
- Parallel agent execution
|
|
14
|
+
|
|
15
|
+
- **basic:**
|
|
16
|
+
These examples showcase foundational capabilities of the SDK, such as
|
|
17
|
+
|
|
18
|
+
- Dynamic system prompts
|
|
19
|
+
- Streaming outputs
|
|
20
|
+
- Lifecycle events
|
|
21
|
+
|
|
22
|
+
- **tool examples:**
|
|
23
|
+
Learn how to implement OAI hosted tools such as web search and file search,
|
|
24
|
+
and integrate them into your agents.
|
|
25
|
+
|
|
26
|
+
- **model providers:**
|
|
27
|
+
Explore how to use non-OpenAI models with the SDK.
|
|
28
|
+
|
|
29
|
+
- **handoffs:**
|
|
30
|
+
See practical examples of agent handoffs.
|
|
31
|
+
|
|
32
|
+
- **customer_service** and **research_bot:**
|
|
33
|
+
Two more built-out examples that illustrate real-world applications
|
|
34
|
+
|
|
35
|
+
- **customer_service**: Example customer service system for an airline.
|
|
36
|
+
- **research_bot**: Simple deep research clone.
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# Pipelines and workflows
|
|
2
|
+
|
|
3
|
+
[`VoicePipeline`][agents.voice.pipeline.VoicePipeline] is a class that makes it easy to turn your agentic workflows into a voice app. You pass in a workflow to run, and the pipeline takes care of transcribing input audio, detecting when the audio ends, calling your workflow at the right time, and turning the workflow output back into audio.
|
|
4
|
+
|
|
5
|
+
```mermaid
|
|
6
|
+
graph LR
|
|
7
|
+
%% Input
|
|
8
|
+
A["🎤 Audio Input"]
|
|
9
|
+
|
|
10
|
+
%% Voice Pipeline
|
|
11
|
+
subgraph Voice_Pipeline [Voice Pipeline]
|
|
12
|
+
direction TB
|
|
13
|
+
B["Transcribe (speech-to-text)"]
|
|
14
|
+
C["Your Code"]:::highlight
|
|
15
|
+
D["Text-to-speech"]
|
|
16
|
+
B --> C --> D
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
%% Output
|
|
20
|
+
E["🎧 Audio Output"]
|
|
21
|
+
|
|
22
|
+
%% Flow
|
|
23
|
+
A --> Voice_Pipeline
|
|
24
|
+
Voice_Pipeline --> E
|
|
25
|
+
|
|
26
|
+
%% Custom styling
|
|
27
|
+
classDef highlight fill:#ffcc66,stroke:#333,stroke-width:1px,font-weight:700;
|
|
28
|
+
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
## Configuring a pipeline
|
|
32
|
+
|
|
33
|
+
When you create a pipeline, you can set a few things:
|
|
34
|
+
|
|
35
|
+
1. The [`workflow`][agents.voice.workflow.VoiceWorkflowBase], which is the code that runs each time new audio is transcribed.
|
|
36
|
+
2. The [`speech-to-text`][agents.voice.model.STTModel] and [`text-to-speech`][agents.voice.model.TTSModel] models used
|
|
37
|
+
3. The [`config`][agents.voice.pipeline_config.VoicePipelineConfig], which lets you configure things like:
|
|
38
|
+
- A model provider, which can map model names to models
|
|
39
|
+
- Tracing, including whether to disable tracing, whether audio files are uploaded, the workflow name, trace IDs etc.
|
|
40
|
+
- Settings on the TTS and STT models, like the prompt, language and data types used.
|
|
41
|
+
|
|
42
|
+
## Running a pipeline
|
|
43
|
+
|
|
44
|
+
You can run a pipeline via the [`run()`][agents.voice.pipeline.VoicePipeline.run] method, which lets you pass in audio input in two forms:
|
|
45
|
+
|
|
46
|
+
1. [`AudioInput`][agents.voice.input.AudioInput] is used when you have a full audio transcript, and just want to produce a result for it. This is useful in cases where you don't need to detect when a speaker is done speaking; for example, when you have pre-recorded audio or in push-to-talk apps where it's clear when the user is done speaking.
|
|
47
|
+
2. [`StreamedAudioInput`][agents.voice.input.StreamedAudioInput] is used when you might need to detect when a user is done speaking. It allows you to push audio chunks as they are detected, and the voice pipeline will automatically run the agent workflow at the right time, via a process called "activity detection".
|
|
48
|
+
|
|
49
|
+
## Results
|
|
50
|
+
|
|
51
|
+
The result of a voice pipeline run is a [`StreamedAudioResult`][agents.voice.result.StreamedAudioResult]. This is an object that lets you stream events as they occur. There are a few kinds of [`VoiceStreamEvent`][agents.voice.events.VoiceStreamEvent], including:
|
|
52
|
+
|
|
53
|
+
1. [`VoiceStreamEventAudio`][agents.voice.events.VoiceStreamEventAudio], which contains a chunk of audio.
|
|
54
|
+
2. [`VoiceStreamEventLifecycle`][agents.voice.events.VoiceStreamEventLifecycle], which informs you of lifecycle events like a turn starting or ending.
|
|
55
|
+
3. [`VoiceStreamEventError`][agents.voice.events.VoiceStreamEventError], is an error event.
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
|
|
59
|
+
result = await pipeline.run(input)
|
|
60
|
+
|
|
61
|
+
async for event in result.stream():
|
|
62
|
+
if event.type == "voice_stream_event_audio":
|
|
63
|
+
# play audio
|
|
64
|
+
elif event.type == "voice_stream_event_lifecycle":
|
|
65
|
+
# lifecycle
|
|
66
|
+
elif event.type == "voice_stream_event_error"
|
|
67
|
+
# error
|
|
68
|
+
...
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
## Best practices
|
|
72
|
+
|
|
73
|
+
### Interruptions
|
|
74
|
+
|
|
75
|
+
The Agents SDK currently does not support any built-in interruptions support for [`StreamedAudioInput`][agents.voice.input.StreamedAudioInput]. Instead for every detected turn it will trigger a separate run of your workflow. If you want to handle interruptions inside your application you can listen to the [`VoiceStreamEventLifecycle`][agents.voice.events.VoiceStreamEventLifecycle] events. `turn_started` will indicate that a new turn was transcribed and processing is beginning. `turn_ended` will trigger after all the audio was dispatched for a respective turn. You could use these events to mute the microphone of the speaker when the model starts a turn and unmute it after you flushed all the related audio for a turn.
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
# Quickstart
|
|
2
|
+
|
|
3
|
+
## Prerequisites
|
|
4
|
+
|
|
5
|
+
Make sure you've followed the base [quickstart instructions](../quickstart.md) for the Agents SDK, and set up a virtual environment. Then, install the optional voice dependencies from the SDK:
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install openai-agents[voice]
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Concepts
|
|
12
|
+
|
|
13
|
+
The main concept to know about is a [`VoicePipeline`][agents.voice.pipeline.VoicePipeline], which is a 3 step process:
|
|
14
|
+
|
|
15
|
+
1. Run a speech-to-text model to turn audio into text.
|
|
16
|
+
2. Run your code, which is usually an agentic workflow, to produce a result.
|
|
17
|
+
3. Run a text-to-speech model to turn the result text back into audio.
|
|
18
|
+
|
|
19
|
+
```mermaid
|
|
20
|
+
graph LR
|
|
21
|
+
%% Input
|
|
22
|
+
A["🎤 Audio Input"]
|
|
23
|
+
|
|
24
|
+
%% Voice Pipeline
|
|
25
|
+
subgraph Voice_Pipeline [Voice Pipeline]
|
|
26
|
+
direction TB
|
|
27
|
+
B["Transcribe (speech-to-text)"]
|
|
28
|
+
C["Your Code"]:::highlight
|
|
29
|
+
D["Text-to-speech"]
|
|
30
|
+
B --> C --> D
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
%% Output
|
|
34
|
+
E["🎧 Audio Output"]
|
|
35
|
+
|
|
36
|
+
%% Flow
|
|
37
|
+
A --> Voice_Pipeline
|
|
38
|
+
Voice_Pipeline --> E
|
|
39
|
+
|
|
40
|
+
%% Custom styling
|
|
41
|
+
classDef highlight fill:#ffcc66,stroke:#333,stroke-width:1px,font-weight:700;
|
|
42
|
+
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## Agents
|
|
46
|
+
|
|
47
|
+
First, let's set up some Agents. This should feel familiar to you if you've built any agents with this SDK. We'll have a couple of Agents, a handoff, and a tool.
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
import asyncio
|
|
51
|
+
import random
|
|
52
|
+
|
|
53
|
+
from agents import (
|
|
54
|
+
Agent,
|
|
55
|
+
function_tool,
|
|
56
|
+
)
|
|
57
|
+
from agents.extensions.handoff_prompt import prompt_with_handoff_instructions
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@function_tool
|
|
62
|
+
def get_weather(city: str) -> str:
|
|
63
|
+
"""Get the weather for a given city."""
|
|
64
|
+
print(f"[debug] get_weather called with city: {city}")
|
|
65
|
+
choices = ["sunny", "cloudy", "rainy", "snowy"]
|
|
66
|
+
return f"The weather in {city} is {random.choice(choices)}."
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
spanish_agent = Agent(
|
|
70
|
+
name="Spanish",
|
|
71
|
+
handoff_description="A spanish speaking agent.",
|
|
72
|
+
instructions=prompt_with_handoff_instructions(
|
|
73
|
+
"You're speaking to a human, so be polite and concise. Speak in Spanish.",
|
|
74
|
+
),
|
|
75
|
+
model="gpt-4o-mini",
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
agent = Agent(
|
|
79
|
+
name="Assistant",
|
|
80
|
+
instructions=prompt_with_handoff_instructions(
|
|
81
|
+
"You're speaking to a human, so be polite and concise. If the user speaks in Spanish, handoff to the spanish agent.",
|
|
82
|
+
),
|
|
83
|
+
model="gpt-4o-mini",
|
|
84
|
+
handoffs=[spanish_agent],
|
|
85
|
+
tools=[get_weather],
|
|
86
|
+
)
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
## Voice pipeline
|
|
90
|
+
|
|
91
|
+
We'll set up a simple voice pipeline, using [`SingleAgentVoiceWorkflow`][agents.voice.workflow.SingleAgentVoiceWorkflow] as the workflow.
|
|
92
|
+
|
|
93
|
+
```python
|
|
94
|
+
from agents import SingleAgentVoiceWorkflow, VoicePipeline,
|
|
95
|
+
pipeline = VoicePipeline(workflow=SingleAgentVoiceWorkflow(agent))
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## Run the pipeline
|
|
99
|
+
|
|
100
|
+
```python
|
|
101
|
+
import numpy as np
|
|
102
|
+
import sounddevice as sd
|
|
103
|
+
|
|
104
|
+
# For simplicity, we'll just create 3 seconds of silence
|
|
105
|
+
# In reality, you'd get microphone data
|
|
106
|
+
audio = np.zeros(24000 * 3, dtype=np.int16)
|
|
107
|
+
result = await pipeline.run(audio_input)
|
|
108
|
+
|
|
109
|
+
# Create an audio player using `sounddevice`
|
|
110
|
+
player = sd.OutputStream(samplerate=24000, channels=1, dtype=np.int16)
|
|
111
|
+
player.start()
|
|
112
|
+
|
|
113
|
+
# Play the audio stream as it comes in
|
|
114
|
+
async for event in result.stream():
|
|
115
|
+
if event.type == "voice_stream_event_audio":
|
|
116
|
+
player.write(event.data)
|
|
117
|
+
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
## Put it all together
|
|
121
|
+
|
|
122
|
+
```python
|
|
123
|
+
import asyncio
|
|
124
|
+
import random
|
|
125
|
+
|
|
126
|
+
import numpy as np
|
|
127
|
+
import sounddevice as sd
|
|
128
|
+
|
|
129
|
+
from agents import (
|
|
130
|
+
Agent,
|
|
131
|
+
AudioInput,
|
|
132
|
+
SingleAgentVoiceWorkflow,
|
|
133
|
+
VoicePipeline,
|
|
134
|
+
function_tool,
|
|
135
|
+
set_tracing_disabled,
|
|
136
|
+
)
|
|
137
|
+
from agents.extensions.handoff_prompt import prompt_with_handoff_instructions
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
@function_tool
|
|
141
|
+
def get_weather(city: str) -> str:
|
|
142
|
+
"""Get the weather for a given city."""
|
|
143
|
+
print(f"[debug] get_weather called with city: {city}")
|
|
144
|
+
choices = ["sunny", "cloudy", "rainy", "snowy"]
|
|
145
|
+
return f"The weather in {city} is {random.choice(choices)}."
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
spanish_agent = Agent(
|
|
149
|
+
name="Spanish",
|
|
150
|
+
handoff_description="A spanish speaking agent.",
|
|
151
|
+
instructions=prompt_with_handoff_instructions(
|
|
152
|
+
"You're speaking to a human, so be polite and concise. Speak in Spanish.",
|
|
153
|
+
),
|
|
154
|
+
model="gpt-4o-mini",
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
agent = Agent(
|
|
158
|
+
name="Assistant",
|
|
159
|
+
instructions=prompt_with_handoff_instructions(
|
|
160
|
+
"You're speaking to a human, so be polite and concise. If the user speaks in Spanish, handoff to the spanish agent.",
|
|
161
|
+
),
|
|
162
|
+
model="gpt-4o-mini",
|
|
163
|
+
handoffs=[spanish_agent],
|
|
164
|
+
tools=[get_weather],
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
async def main():
|
|
169
|
+
pipeline = VoicePipeline(workflow=SingleAgentVoiceWorkflow(agent))
|
|
170
|
+
buffer = np.zeros(24000 * 3, dtype=np.int16)
|
|
171
|
+
audio_input = AudioInput(buffer=buffer)
|
|
172
|
+
|
|
173
|
+
result = await pipeline.run(audio_input)
|
|
174
|
+
|
|
175
|
+
# Create an audio player using `sounddevice`
|
|
176
|
+
player = sd.OutputStream(samplerate=24000, channels=1, dtype=np.int16)
|
|
177
|
+
player.start()
|
|
178
|
+
|
|
179
|
+
# Play the audio stream as it comes in
|
|
180
|
+
async for event in result.stream():
|
|
181
|
+
if event.type == "voice_stream_event_audio":
|
|
182
|
+
player.write(event.data)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
if __name__ == "__main__":
|
|
186
|
+
asyncio.run(main())
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
If you run this example, the agent will speak to you! Check out the example in [examples/voice/static](https://github.com/openai/openai-agents-python/tree/main/examples/voice/static) to see a demo where you can speak to the agent yourself.
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# Tracing
|
|
2
|
+
|
|
3
|
+
Just like the way [agents are traced](../tracing.md), voice pipelines are also automatically traced.
|
|
4
|
+
|
|
5
|
+
You can read the tracing doc above for basic tracing information, but you can additionally configure tracing of a pipeline via [`VoicePipelineConfig`][agents.voice.pipeline_config.VoicePipelineConfig].
|
|
6
|
+
|
|
7
|
+
Key tracing related fields are:
|
|
8
|
+
|
|
9
|
+
- [`tracing_disabled`][agents.voice.pipeline_config.VoicePipelineConfig.tracing_disabled]: controls whether tracing is disabled. By default, tracing is enabled.
|
|
10
|
+
- [`trace_include_sensitive_data`][agents.voice.pipeline_config.VoicePipelineConfig.trace_include_sensitive_data]: controls whether traces include potentially sensitive data, like audio transcripts. This is specifically for the voice pipeline, and not for anything that goes on inside your Workflow.
|
|
11
|
+
- [`trace_include_sensitive_audio_data`][agents.voice.pipeline_config.VoicePipelineConfig.trace_include_sensitive_audio_data]: controls whether traces include audio data.
|
|
12
|
+
- [`workflow_name`][agents.voice.pipeline_config.VoicePipelineConfig.workflow_name]: The name of the trace workflow.
|
|
13
|
+
- [`group_id`][agents.voice.pipeline_config.VoicePipelineConfig.group_id]: The `group_id` of the trace, which lets you link multiple traces.
|
|
14
|
+
- [`trace_metadata`][agents.voice.pipeline_config.VoicePipelineConfig.tracing_disabled]: Additional metadata to include with the trace.
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# Financial Research Agent Example
|
|
2
|
+
|
|
3
|
+
This example shows how you might compose a richer financial research agent using the Agents SDK. The pattern is similar to the `research_bot` example, but with more specialized sub‑agents and a verification step.
|
|
4
|
+
|
|
5
|
+
The flow is:
|
|
6
|
+
|
|
7
|
+
1. **Planning**: A planner agent turns the end user’s request into a list of search terms relevant to financial analysis – recent news, earnings calls, corporate filings, industry commentary, etc.
|
|
8
|
+
2. **Search**: A search agent uses the built‑in `WebSearchTool` to retrieve terse summaries for each search term. (You could also add `FileSearchTool` if you have indexed PDFs or 10‑Ks.)
|
|
9
|
+
3. **Sub‑analysts**: Additional agents (e.g. a fundamentals analyst and a risk analyst) are exposed as tools so the writer can call them inline and incorporate their outputs.
|
|
10
|
+
4. **Writing**: A senior writer agent brings together the search snippets and any sub‑analyst summaries into a long‑form markdown report plus a short executive summary.
|
|
11
|
+
5. **Verification**: A final verifier agent audits the report for obvious inconsistencies or missing sourcing.
|
|
12
|
+
|
|
13
|
+
You can run the example with:
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
python -m examples.financial_research_agent.main
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
and enter a query like:
|
|
20
|
+
|
|
21
|
+
```
|
|
22
|
+
Write up an analysis of Apple Inc.'s most recent quarter.
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
### Starter prompt
|
|
26
|
+
|
|
27
|
+
The writer agent is seeded with instructions similar to:
|
|
28
|
+
|
|
29
|
+
```
|
|
30
|
+
You are a senior financial analyst. You will be provided with the original query
|
|
31
|
+
and a set of raw search summaries. Your job is to synthesize these into a
|
|
32
|
+
long‑form markdown report (at least several paragraphs) with a short executive
|
|
33
|
+
summary. You also have access to tools like `fundamentals_analysis` and
|
|
34
|
+
`risk_analysis` to get short specialist write‑ups if you want to incorporate them.
|
|
35
|
+
Add a few follow‑up questions for further research.
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
You can tweak these prompts and sub‑agents to suit your own data sources and preferred report structure.
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
|
|
3
|
+
from agents import Agent
|
|
4
|
+
|
|
5
|
+
# A sub‑agent focused on analyzing a company's fundamentals.
|
|
6
|
+
FINANCIALS_PROMPT = (
|
|
7
|
+
"You are a financial analyst focused on company fundamentals such as revenue, "
|
|
8
|
+
"profit, margins and growth trajectory. Given a collection of web (and optional file) "
|
|
9
|
+
"search results about a company, write a concise analysis of its recent financial "
|
|
10
|
+
"performance. Pull out key metrics or quotes. Keep it under 2 paragraphs."
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class AnalysisSummary(BaseModel):
|
|
15
|
+
summary: str
|
|
16
|
+
"""Short text summary for this aspect of the analysis."""
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
financials_agent = Agent(
|
|
20
|
+
name="FundamentalsAnalystAgent",
|
|
21
|
+
instructions=FINANCIALS_PROMPT,
|
|
22
|
+
output_type=AnalysisSummary,
|
|
23
|
+
)
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
|
|
3
|
+
from agents import Agent
|
|
4
|
+
|
|
5
|
+
# Generate a plan of searches to ground the financial analysis.
|
|
6
|
+
# For a given financial question or company, we want to search for
|
|
7
|
+
# recent news, official filings, analyst commentary, and other
|
|
8
|
+
# relevant background.
|
|
9
|
+
PROMPT = (
|
|
10
|
+
"You are a financial research planner. Given a request for financial analysis, "
|
|
11
|
+
"produce a set of web searches to gather the context needed. Aim for recent "
|
|
12
|
+
"headlines, earnings calls or 10‑K snippets, analyst commentary, and industry background. "
|
|
13
|
+
"Output between 5 and 15 search terms to query for."
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class FinancialSearchItem(BaseModel):
|
|
18
|
+
reason: str
|
|
19
|
+
"""Your reasoning for why this search is relevant."""
|
|
20
|
+
|
|
21
|
+
query: str
|
|
22
|
+
"""The search term to feed into a web (or file) search."""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class FinancialSearchPlan(BaseModel):
|
|
26
|
+
searches: list[FinancialSearchItem]
|
|
27
|
+
"""A list of searches to perform."""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
planner_agent = Agent(
|
|
31
|
+
name="FinancialPlannerAgent",
|
|
32
|
+
instructions=PROMPT,
|
|
33
|
+
model="o3-mini",
|
|
34
|
+
output_type=FinancialSearchPlan,
|
|
35
|
+
)
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
|
|
3
|
+
from agents import Agent
|
|
4
|
+
|
|
5
|
+
# A sub‑agent specializing in identifying risk factors or concerns.
|
|
6
|
+
RISK_PROMPT = (
|
|
7
|
+
"You are a risk analyst looking for potential red flags in a company's outlook. "
|
|
8
|
+
"Given background research, produce a short analysis of risks such as competitive threats, "
|
|
9
|
+
"regulatory issues, supply chain problems, or slowing growth. Keep it under 2 paragraphs."
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AnalysisSummary(BaseModel):
|
|
14
|
+
summary: str
|
|
15
|
+
"""Short text summary for this aspect of the analysis."""
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
risk_agent = Agent(
|
|
19
|
+
name="RiskAnalystAgent",
|
|
20
|
+
instructions=RISK_PROMPT,
|
|
21
|
+
output_type=AnalysisSummary,
|
|
22
|
+
)
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from agents import Agent, WebSearchTool
|
|
2
|
+
from agents.model_settings import ModelSettings
|
|
3
|
+
|
|
4
|
+
# Given a search term, use web search to pull back a brief summary.
|
|
5
|
+
# Summaries should be concise but capture the main financial points.
|
|
6
|
+
INSTRUCTIONS = (
|
|
7
|
+
"You are a research assistant specializing in financial topics. "
|
|
8
|
+
"Given a search term, use web search to retrieve up‑to‑date context and "
|
|
9
|
+
"produce a short summary of at most 300 words. Focus on key numbers, events, "
|
|
10
|
+
"or quotes that will be useful to a financial analyst."
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
search_agent = Agent(
|
|
14
|
+
name="FinancialSearchAgent",
|
|
15
|
+
instructions=INSTRUCTIONS,
|
|
16
|
+
tools=[WebSearchTool()],
|
|
17
|
+
model_settings=ModelSettings(tool_choice="required"),
|
|
18
|
+
)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
|
|
3
|
+
from agents import Agent
|
|
4
|
+
|
|
5
|
+
# Agent to sanity‑check a synthesized report for consistency and recall.
|
|
6
|
+
# This can be used to flag potential gaps or obvious mistakes.
|
|
7
|
+
VERIFIER_PROMPT = (
|
|
8
|
+
"You are a meticulous auditor. You have been handed a financial analysis report. "
|
|
9
|
+
"Your job is to verify the report is internally consistent, clearly sourced, and makes "
|
|
10
|
+
"no unsupported claims. Point out any issues or uncertainties."
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class VerificationResult(BaseModel):
|
|
15
|
+
verified: bool
|
|
16
|
+
"""Whether the report seems coherent and plausible."""
|
|
17
|
+
|
|
18
|
+
issues: str
|
|
19
|
+
"""If not verified, describe the main issues or concerns."""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
verifier_agent = Agent(
|
|
23
|
+
name="VerificationAgent",
|
|
24
|
+
instructions=VERIFIER_PROMPT,
|
|
25
|
+
model="gpt-4o",
|
|
26
|
+
output_type=VerificationResult,
|
|
27
|
+
)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
|
|
3
|
+
from agents import Agent
|
|
4
|
+
|
|
5
|
+
# Writer agent brings together the raw search results and optionally calls out
|
|
6
|
+
# to sub‑analyst tools for specialized commentary, then returns a cohesive markdown report.
|
|
7
|
+
WRITER_PROMPT = (
|
|
8
|
+
"You are a senior financial analyst. You will be provided with the original query and "
|
|
9
|
+
"a set of raw search summaries. Your task is to synthesize these into a long‑form markdown "
|
|
10
|
+
"report (at least several paragraphs) including a short executive summary and follow‑up "
|
|
11
|
+
"questions. If needed, you can call the available analysis tools (e.g. fundamentals_analysis, "
|
|
12
|
+
"risk_analysis) to get short specialist write‑ups to incorporate."
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class FinancialReportData(BaseModel):
|
|
17
|
+
short_summary: str
|
|
18
|
+
"""A short 2‑3 sentence executive summary."""
|
|
19
|
+
|
|
20
|
+
markdown_report: str
|
|
21
|
+
"""The full markdown report."""
|
|
22
|
+
|
|
23
|
+
follow_up_questions: list[str]
|
|
24
|
+
"""Suggested follow‑up questions for further research."""
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# Note: We will attach handoffs to specialist analyst agents at runtime in the manager.
|
|
28
|
+
# This shows how an agent can use handoffs to delegate to specialized subagents.
|
|
29
|
+
writer_agent = Agent(
|
|
30
|
+
name="FinancialWriterAgent",
|
|
31
|
+
instructions=WRITER_PROMPT,
|
|
32
|
+
model="gpt-4.5-preview-2025-02-27",
|
|
33
|
+
output_type=FinancialReportData,
|
|
34
|
+
)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
from .manager import FinancialResearchManager
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# Entrypoint for the financial bot example.
|
|
7
|
+
# Run this as `python -m examples.financial_bot.main` and enter a
|
|
8
|
+
# financial research query, for example:
|
|
9
|
+
# "Write up an analysis of Apple Inc.'s most recent quarter."
|
|
10
|
+
async def main() -> None:
|
|
11
|
+
query = input("Enter a financial research query: ")
|
|
12
|
+
mgr = FinancialResearchManager()
|
|
13
|
+
await mgr.run(query)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
if __name__ == "__main__":
|
|
17
|
+
asyncio.run(main())
|