openai-agents 0.0.3__tar.gz → 0.0.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- openai_agents-0.0.4/.github/ISSUE_TEMPLATE/bug_report.md +28 -0
- openai_agents-0.0.4/.github/ISSUE_TEMPLATE/feature_request.md +16 -0
- openai_agents-0.0.4/.github/ISSUE_TEMPLATE/question.md +16 -0
- openai_agents-0.0.4/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +18 -0
- openai_agents-0.0.4/.github/workflows/issues.yml +23 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/PKG-INFO +7 -5
- {openai_agents-0.0.3 → openai_agents-0.0.4}/README.md +6 -4
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/agents.md +2 -1
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/config.md +2 -2
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/context.md +2 -1
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/guardrails.md +2 -2
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/index.md +2 -2
- openai_agents-0.0.4/docs/models.md +66 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/multi_agent.md +2 -2
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/quickstart.md +3 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/results.md +1 -1
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/running_agents.md +1 -1
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/tracing.md +4 -2
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/agent_patterns/input_guardrails.py +1 -1
- openai_agents-0.0.4/examples/basic/hello_world_jupyter.py +11 -0
- openai_agents-0.0.4/examples/model_providers/README.md +19 -0
- openai_agents-0.0.4/examples/model_providers/custom_example_agent.py +51 -0
- openai_agents-0.0.4/examples/model_providers/custom_example_global.py +55 -0
- openai_agents-0.0.4/examples/model_providers/custom_example_provider.py +73 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/README.md +1 -1
- {openai_agents-0.0.3 → openai_agents-0.0.4}/pyproject.toml +1 -1
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/__init__.py +10 -4
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/_config.py +6 -3
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/_run_impl.py +1 -1
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/guardrail.py +1 -1
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/model_settings.py +20 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/models/openai_chatcompletions.py +27 -1
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/models/openai_provider.py +24 -11
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/models/openai_responses.py +4 -2
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/result.py +0 -2
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tool.py +0 -2
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tracing/processors.py +1 -4
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_openai_chatcompletions_converter.py +35 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_openai_chatcompletions_stream.py +5 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/uv.lock +1 -1
- openai_agents-0.0.3/docs/models.md +0 -73
- {openai_agents-0.0.3 → openai_agents-0.0.4}/.github/workflows/docs.yml +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/.github/workflows/publish.yml +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/.github/workflows/tests.yml +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/.gitignore +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/.prettierrc +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/LICENSE +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/Makefile +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/assets/images/favicon-platform.svg +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/assets/images/orchestration.png +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/assets/logo.svg +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/handoffs.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/agent.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/agent_output.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/exceptions.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/extensions/handoff_filters.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/extensions/handoff_prompt.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/function_schema.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/guardrail.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/handoffs.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/index.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/items.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/lifecycle.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/model_settings.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/models/interface.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/models/openai_chatcompletions.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/models/openai_responses.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/result.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/run.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/run_context.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/stream_events.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/tool.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/tracing/create.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/tracing/index.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/tracing/processor_interface.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/tracing/processors.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/tracing/scope.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/tracing/setup.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/tracing/span_data.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/tracing/spans.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/tracing/traces.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/tracing/util.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/ref/usage.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/streaming.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/stylesheets/extra.css +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/docs/tools.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/__init__.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/agent_patterns/README.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/agent_patterns/agents_as_tools.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/agent_patterns/deterministic.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/agent_patterns/llm_as_a_judge.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/agent_patterns/output_guardrails.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/agent_patterns/parallelization.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/agent_patterns/routing.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/basic/agent_lifecycle_example.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/basic/dynamic_system_prompt.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/basic/hello_world.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/basic/lifecycle_example.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/basic/stream_items.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/basic/stream_text.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/customer_service/main.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/handoffs/message_filter.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/handoffs/message_filter_streaming.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/__init__.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/agents/__init__.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/agents/planner_agent.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/agents/search_agent.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/agents/writer_agent.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/main.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/manager.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/printer.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/sample_outputs/product_recs.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/sample_outputs/product_recs.txt +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/sample_outputs/vacation.md +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/research_bot/sample_outputs/vacation.txt +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/tools/computer_use.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/tools/file_search.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/examples/tools/web_search.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/mkdocs.yml +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/_debug.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/_utils.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/agent.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/agent_output.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/computer.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/exceptions.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/extensions/__init__.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/extensions/handoff_filters.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/extensions/handoff_prompt.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/function_schema.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/handoffs.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/items.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/lifecycle.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/logger.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/models/__init__.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/models/_openai_shared.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/models/fake_id.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/models/interface.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/run.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/run_context.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/stream_events.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/strict_schema.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tracing/__init__.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tracing/create.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tracing/logger.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tracing/processor_interface.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tracing/scope.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tracing/setup.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tracing/span_data.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tracing/spans.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tracing/traces.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/tracing/util.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/usage.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/src/agents/version.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/__init__.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/conftest.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/fake_model.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_agent_config.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_agent_hooks.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_agent_runner.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_agent_runner_streamed.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_agent_tracing.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_computer_action.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_config.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_doc_parsing.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_extension_filters.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_function_schema.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_function_tool.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_function_tool_decorator.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_global_hooks.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_guardrails.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_handoff_tool.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_items_helpers.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_max_turns.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_openai_chatcompletions.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_openai_responses_converter.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_output_tool.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_responses.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_responses_tracing.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_result_cast.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_run_config.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_run_step_execution.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_run_step_processing.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_strict_schema.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_tool_converter.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_trace_processor.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_tracing.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_tracing_errors.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/test_tracing_errors_streamed.py +0 -0
- {openai_agents-0.0.3 → openai_agents-0.0.4}/tests/testing_processor.py +0 -0
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: Bug report
|
|
3
|
+
about: Report a bug
|
|
4
|
+
title: ''
|
|
5
|
+
labels: bug
|
|
6
|
+
assignees: ''
|
|
7
|
+
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
### Please read this first
|
|
11
|
+
|
|
12
|
+
- **Have you read the docs?**[Agents SDK docs](https://openai.github.io/openai-agents-python/)
|
|
13
|
+
- **Have you searched for related issues?** Others may have faced similar issues.
|
|
14
|
+
|
|
15
|
+
### Describe the bug
|
|
16
|
+
A clear and concise description of what the bug is.
|
|
17
|
+
|
|
18
|
+
### Debug information
|
|
19
|
+
- Agents SDK version: (e.g. `v0.0.3`)
|
|
20
|
+
- Python version (e.g. Python 3.10)
|
|
21
|
+
|
|
22
|
+
### Repro steps
|
|
23
|
+
|
|
24
|
+
Ideally provide a minimal python script that can be run to reproduce the bug.
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
### Expected behavior
|
|
28
|
+
A clear and concise description of what you expected to happen.
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: Feature request
|
|
3
|
+
about: Suggest an idea for this project
|
|
4
|
+
title: ''
|
|
5
|
+
labels: enhancement
|
|
6
|
+
assignees: ''
|
|
7
|
+
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
### Please read this first
|
|
11
|
+
|
|
12
|
+
- **Have you read the docs?**[Agents SDK docs](https://openai.github.io/openai-agents-python/)
|
|
13
|
+
- **Have you searched for related issues?** Others may have had similar requesrs
|
|
14
|
+
|
|
15
|
+
### Describe the feature
|
|
16
|
+
What is the feature you're requesting? How would it work? Please provide examples and details if possible.
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: Question
|
|
3
|
+
about: Questions about the SDK
|
|
4
|
+
title: ''
|
|
5
|
+
labels: question
|
|
6
|
+
assignees: ''
|
|
7
|
+
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
### Please read this first
|
|
11
|
+
|
|
12
|
+
- **Have you read the docs?**[Agents SDK docs](https://openai.github.io/openai-agents-python/)
|
|
13
|
+
- **Have you searched for related issues?** Others may have had similar requesrs
|
|
14
|
+
|
|
15
|
+
### Question
|
|
16
|
+
Describe your question. Provide details if available.
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
### Summary
|
|
2
|
+
|
|
3
|
+
<!-- Please give a short summary of the change and the problem this solves. -->
|
|
4
|
+
|
|
5
|
+
### Test plan
|
|
6
|
+
|
|
7
|
+
<!-- Please explain how this was tested -->
|
|
8
|
+
|
|
9
|
+
### Issue number
|
|
10
|
+
|
|
11
|
+
<!-- For example: "Closes #1234" -->
|
|
12
|
+
|
|
13
|
+
### Checks
|
|
14
|
+
|
|
15
|
+
- [ ] I've added new tests (if relevant)
|
|
16
|
+
- [ ] I've added/updated the relevant documentation
|
|
17
|
+
- [ ] I've run `make lint` and `make format`
|
|
18
|
+
- [ ] I've made sure tests pass
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
name: Close inactive issues
|
|
2
|
+
on:
|
|
3
|
+
schedule:
|
|
4
|
+
- cron: "30 1 * * *"
|
|
5
|
+
|
|
6
|
+
jobs:
|
|
7
|
+
close-issues:
|
|
8
|
+
runs-on: ubuntu-latest
|
|
9
|
+
permissions:
|
|
10
|
+
issues: write
|
|
11
|
+
pull-requests: write
|
|
12
|
+
steps:
|
|
13
|
+
- uses: actions/stale@v9
|
|
14
|
+
with:
|
|
15
|
+
days-before-issue-stale: 7
|
|
16
|
+
days-before-issue-close: 3
|
|
17
|
+
stale-issue-label: "stale"
|
|
18
|
+
stale-issue-message: "This issue is stale because it has been open for 7 days with no activity."
|
|
19
|
+
close-issue-message: "This issue was closed because it has been inactive for 3 days since being marked as stale."
|
|
20
|
+
days-before-pr-stale: -1
|
|
21
|
+
days-before-pr-close: -1
|
|
22
|
+
any-of-labels: 'question,needs-more-info'
|
|
23
|
+
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.4
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://github.com/openai/openai-agents-python
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -75,9 +75,11 @@ print(result.final_output)
|
|
|
75
75
|
|
|
76
76
|
(_If running this, ensure you set the `OPENAI_API_KEY` environment variable_)
|
|
77
77
|
|
|
78
|
+
(_For Jupyter notebook users, see [hello_world_jupyter.py](examples/basic/hello_world_jupyter.py)_)
|
|
79
|
+
|
|
78
80
|
## Handoffs example
|
|
79
81
|
|
|
80
|
-
```
|
|
82
|
+
```python
|
|
81
83
|
from agents import Agent, Runner
|
|
82
84
|
import asyncio
|
|
83
85
|
|
|
@@ -144,9 +146,9 @@ When you call `Runner.run()`, we run a loop until we get a final output.
|
|
|
144
146
|
|
|
145
147
|
1. We call the LLM, using the model and settings on the agent, and the message history.
|
|
146
148
|
2. The LLM returns a response, which may include tool calls.
|
|
147
|
-
3. If the response has a final output (see below for
|
|
149
|
+
3. If the response has a final output (see below for more on this), we return it and end the loop.
|
|
148
150
|
4. If the response has a handoff, we set the agent to the new agent and go back to step 1.
|
|
149
|
-
5. We process the tool calls (if any) and append the tool responses
|
|
151
|
+
5. We process the tool calls (if any) and append the tool responses messages. Then we go to step 1.
|
|
150
152
|
|
|
151
153
|
There is a `max_turns` parameter that you can use to limit the number of times the loop executes.
|
|
152
154
|
|
|
@@ -168,7 +170,7 @@ The Agents SDK is designed to be highly flexible, allowing you to model a wide r
|
|
|
168
170
|
|
|
169
171
|
## Tracing
|
|
170
172
|
|
|
171
|
-
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk),
|
|
173
|
+
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk), [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration), and [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing).
|
|
172
174
|
|
|
173
175
|
## Development (only needed if you need to edit the SDK/examples)
|
|
174
176
|
|
|
@@ -47,9 +47,11 @@ print(result.final_output)
|
|
|
47
47
|
|
|
48
48
|
(_If running this, ensure you set the `OPENAI_API_KEY` environment variable_)
|
|
49
49
|
|
|
50
|
+
(_For Jupyter notebook users, see [hello_world_jupyter.py](examples/basic/hello_world_jupyter.py)_)
|
|
51
|
+
|
|
50
52
|
## Handoffs example
|
|
51
53
|
|
|
52
|
-
```
|
|
54
|
+
```python
|
|
53
55
|
from agents import Agent, Runner
|
|
54
56
|
import asyncio
|
|
55
57
|
|
|
@@ -116,9 +118,9 @@ When you call `Runner.run()`, we run a loop until we get a final output.
|
|
|
116
118
|
|
|
117
119
|
1. We call the LLM, using the model and settings on the agent, and the message history.
|
|
118
120
|
2. The LLM returns a response, which may include tool calls.
|
|
119
|
-
3. If the response has a final output (see below for
|
|
121
|
+
3. If the response has a final output (see below for more on this), we return it and end the loop.
|
|
120
122
|
4. If the response has a handoff, we set the agent to the new agent and go back to step 1.
|
|
121
|
-
5. We process the tool calls (if any) and append the tool responses
|
|
123
|
+
5. We process the tool calls (if any) and append the tool responses messages. Then we go to step 1.
|
|
122
124
|
|
|
123
125
|
There is a `max_turns` parameter that you can use to limit the number of times the loop executes.
|
|
124
126
|
|
|
@@ -140,7 +142,7 @@ The Agents SDK is designed to be highly flexible, allowing you to model a wide r
|
|
|
140
142
|
|
|
141
143
|
## Tracing
|
|
142
144
|
|
|
143
|
-
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk),
|
|
145
|
+
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk), [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration), and [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing).
|
|
144
146
|
|
|
145
147
|
## Development (only needed if you need to edit the SDK/examples)
|
|
146
148
|
|
|
@@ -13,6 +13,7 @@ The most common properties of an agent you'll configure are:
|
|
|
13
13
|
```python
|
|
14
14
|
from agents import Agent, ModelSettings, function_tool
|
|
15
15
|
|
|
16
|
+
@function_tool
|
|
16
17
|
def get_weather(city: str) -> str:
|
|
17
18
|
return f"The weather in {city} is sunny"
|
|
18
19
|
|
|
@@ -20,7 +21,7 @@ agent = Agent(
|
|
|
20
21
|
name="Haiku agent",
|
|
21
22
|
instructions="Always respond in haiku form",
|
|
22
23
|
model="o3-mini",
|
|
23
|
-
tools=[
|
|
24
|
+
tools=[get_weather],
|
|
24
25
|
)
|
|
25
26
|
```
|
|
26
27
|
|
|
@@ -10,14 +10,14 @@ from agents import set_default_openai_key
|
|
|
10
10
|
set_default_openai_key("sk-...")
|
|
11
11
|
```
|
|
12
12
|
|
|
13
|
-
Alternatively, you can also configure an OpenAI client to be used. By default, the SDK creates an `AsyncOpenAI` instance, using the API key from the environment variable or the default key set above. You can
|
|
13
|
+
Alternatively, you can also configure an OpenAI client to be used. By default, the SDK creates an `AsyncOpenAI` instance, using the API key from the environment variable or the default key set above. You can change this by using the [set_default_openai_client()][agents.set_default_openai_client] function.
|
|
14
14
|
|
|
15
15
|
```python
|
|
16
16
|
from openai import AsyncOpenAI
|
|
17
17
|
from agents import set_default_openai_client
|
|
18
18
|
|
|
19
19
|
custom_client = AsyncOpenAI(base_url="...", api_key="...")
|
|
20
|
-
set_default_openai_client(
|
|
20
|
+
set_default_openai_client(custom_client)
|
|
21
21
|
```
|
|
22
22
|
|
|
23
23
|
Finally, you can also customize the OpenAI API that is used. By default, we use the OpenAI Responses API. You can override this to use the Chat Completions API by using the [set_default_openai_api()][agents.set_default_openai_api] function.
|
|
@@ -36,6 +36,7 @@ class UserInfo: # (1)!
|
|
|
36
36
|
name: str
|
|
37
37
|
uid: int
|
|
38
38
|
|
|
39
|
+
@function_tool
|
|
39
40
|
async def fetch_user_age(wrapper: RunContextWrapper[UserInfo]) -> str: # (2)!
|
|
40
41
|
return f"User {wrapper.context.name} is 47 years old"
|
|
41
42
|
|
|
@@ -44,7 +45,7 @@ async def main():
|
|
|
44
45
|
|
|
45
46
|
agent = Agent[UserInfo]( # (4)!
|
|
46
47
|
name="Assistant",
|
|
47
|
-
tools=[
|
|
48
|
+
tools=[fetch_user_age],
|
|
48
49
|
)
|
|
49
50
|
|
|
50
51
|
result = await Runner.run(
|
|
@@ -21,7 +21,7 @@ Input guardrails run in 3 steps:
|
|
|
21
21
|
|
|
22
22
|
## Output guardrails
|
|
23
23
|
|
|
24
|
-
Output
|
|
24
|
+
Output guardrails run in 3 steps:
|
|
25
25
|
|
|
26
26
|
1. First, the guardrail receives the same input passed to the agent.
|
|
27
27
|
2. Next, the guardrail function runs to produce a [`GuardrailFunctionOutput`][agents.guardrail.GuardrailFunctionOutput], which is then wrapped in an [`OutputGuardrailResult`][agents.guardrail.OutputGuardrailResult]
|
|
@@ -33,7 +33,7 @@ Output guardrailas run in 3 steps:
|
|
|
33
33
|
|
|
34
34
|
## Tripwires
|
|
35
35
|
|
|
36
|
-
If the input or output fails the guardrail, the Guardrail can signal this with a tripwire. As soon as we see a
|
|
36
|
+
If the input or output fails the guardrail, the Guardrail can signal this with a tripwire. As soon as we see a guardrail that has triggered the tripwires, we immediately raise a `{Input,Output}GuardrailTripwireTriggered` exception and halt the Agent execution.
|
|
37
37
|
|
|
38
38
|
## Implementing a guardrail
|
|
39
39
|
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
# OpenAI Agents SDK
|
|
2
2
|
|
|
3
|
-
The [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) enables you to build agentic AI apps in a lightweight, easy
|
|
3
|
+
The [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) enables you to build agentic AI apps in a lightweight, easy-to-use package with very few abstractions. It's a production-ready upgrade of our previous experimentation for agents, [Swarm](https://github.com/openai/swarm/tree/main). The Agents SDK has a very small set of primitives:
|
|
4
4
|
|
|
5
5
|
- **Agents**, which are LLMs equipped with instructions and tools
|
|
6
6
|
- **Handoffs**, which allow agents to delegate to other agents for specific tasks
|
|
7
7
|
- **Guardrails**, which enable the inputs to agents to be validated
|
|
8
8
|
|
|
9
|
-
In combination with Python, these primitives are powerful enough to express complex relationships between tools and agents, and allow you to build real
|
|
9
|
+
In combination with Python, these primitives are powerful enough to express complex relationships between tools and agents, and allow you to build real-world applications without a steep learning curve. In addition, the SDK comes with built-in **tracing** that lets you visualize and debug your agentic flows, as well as evaluate them and even fine-tune models for your application.
|
|
10
10
|
|
|
11
11
|
## Why use the Agents SDK
|
|
12
12
|
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# Models
|
|
2
|
+
|
|
3
|
+
The Agents SDK comes with out-of-the-box support for OpenAI models in two flavors:
|
|
4
|
+
|
|
5
|
+
- **Recommended**: the [`OpenAIResponsesModel`][agents.models.openai_responses.OpenAIResponsesModel], which calls OpenAI APIs using the new [Responses API](https://platform.openai.com/docs/api-reference/responses).
|
|
6
|
+
- The [`OpenAIChatCompletionsModel`][agents.models.openai_chatcompletions.OpenAIChatCompletionsModel], which calls OpenAI APIs using the [Chat Completions API](https://platform.openai.com/docs/api-reference/chat).
|
|
7
|
+
|
|
8
|
+
## Mixing and matching models
|
|
9
|
+
|
|
10
|
+
Within a single workflow, you may want to use different models for each agent. For example, you could use a smaller, faster model for triage, while using a larger, more capable model for complex tasks. When configuring an [`Agent`][agents.Agent], you can select a specific model by either:
|
|
11
|
+
|
|
12
|
+
1. Passing the name of an OpenAI model.
|
|
13
|
+
2. Passing any model name + a [`ModelProvider`][agents.models.interface.ModelProvider] that can map that name to a Model instance.
|
|
14
|
+
3. Directly providing a [`Model`][agents.models.interface.Model] implementation.
|
|
15
|
+
|
|
16
|
+
!!!note
|
|
17
|
+
|
|
18
|
+
While our SDK supports both the [`OpenAIResponsesModel`][agents.models.openai_responses.OpenAIResponsesModel] and the [`OpenAIChatCompletionsModel`][agents.models.openai_chatcompletions.OpenAIChatCompletionsModel] shapes, we recommend using a single model shape for each workflow because the two shapes support a different set of features and tools. If your workflow requires mixing and matching model shapes, make sure that all the features you're using are available on both.
|
|
19
|
+
|
|
20
|
+
```python
|
|
21
|
+
from agents import Agent, Runner, AsyncOpenAI, OpenAIChatCompletionsModel
|
|
22
|
+
import asyncio
|
|
23
|
+
|
|
24
|
+
spanish_agent = Agent(
|
|
25
|
+
name="Spanish agent",
|
|
26
|
+
instructions="You only speak Spanish.",
|
|
27
|
+
model="o3-mini", # (1)!
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
english_agent = Agent(
|
|
31
|
+
name="English agent",
|
|
32
|
+
instructions="You only speak English",
|
|
33
|
+
model=OpenAIChatCompletionsModel( # (2)!
|
|
34
|
+
model="gpt-4o",
|
|
35
|
+
openai_client=AsyncOpenAI()
|
|
36
|
+
),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
triage_agent = Agent(
|
|
40
|
+
name="Triage agent",
|
|
41
|
+
instructions="Handoff to the appropriate agent based on the language of the request.",
|
|
42
|
+
handoffs=[spanish_agent, english_agent],
|
|
43
|
+
model="gpt-3.5-turbo",
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
async def main():
|
|
47
|
+
result = await Runner.run(triage_agent, input="Hola, ¿cómo estás?")
|
|
48
|
+
print(result.final_output)
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
1. Sets the name of an OpenAI model directly.
|
|
52
|
+
2. Provides a [`Model`][agents.models.interface.Model] implementation.
|
|
53
|
+
|
|
54
|
+
## Using other LLM providers
|
|
55
|
+
|
|
56
|
+
You can use other LLM providers in 3 ways (examples [here](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/)):
|
|
57
|
+
|
|
58
|
+
1. [`set_default_openai_client`][agents.set_default_openai_client] is useful in cases where you want to globally use an instance of `AsyncOpenAI` as the LLM client. This is for cases where the LLM provider has an OpenAI compatible API endpoint, and you can set the `base_url` and `api_key`. See a configurable example in [examples/model_providers/custom_example_global.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/custom_example_global.py).
|
|
59
|
+
2. [`ModelProvider`][agents.models.interface.ModelProvider] is at the `Runner.run` level. This lets you say "use a custom model provider for all agents in this run". See a configurable example in [examples/model_providers/custom_example_provider.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/custom_example_provider.py).
|
|
60
|
+
3. [`Agent.model`][agents.agent.Agent.model] lets you specify the model on a specific Agent instance. This enables you to mix and match different providers for different agents. See a configurable example in [examples/model_providers/custom_example_agent.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/custom_example_agent.py).
|
|
61
|
+
|
|
62
|
+
In cases where you do not have an API key from `platform.openai.com`, we recommend disabling tracing via `set_tracing_disabled()`, or setting up a [different tracing processor](tracing.md).
|
|
63
|
+
|
|
64
|
+
!!! note
|
|
65
|
+
|
|
66
|
+
In these examples, we use the Chat Completions API/model, because most LLM providers don't yet support the Responses API. If your LLM provider does support it, we recommend using Responses.
|
|
@@ -27,11 +27,11 @@ This pattern is great when the task is open-ended and you want to rely on the in
|
|
|
27
27
|
|
|
28
28
|
## Orchestrating via code
|
|
29
29
|
|
|
30
|
-
While orchestrating via LLM is powerful, orchestrating via
|
|
30
|
+
While orchestrating via LLM is powerful, orchestrating via code makes tasks more deterministic and predictable, in terms of speed, cost and performance. Common patterns here are:
|
|
31
31
|
|
|
32
32
|
- Using [structured outputs](https://platform.openai.com/docs/guides/structured-outputs) to generate well formed data that you can inspect with your code. For example, you might ask an agent to classify the task into a few categories, and then pick the next agent based on the category.
|
|
33
33
|
- Chaining multiple agents by transforming the output of one into the input of the next. You can decompose a task like writing a blog post into a series of steps - do research, write an outline, write the blog post, critique it, and then improve it.
|
|
34
34
|
- Running the agent that performs the task in a `while` loop with an agent that evaluates and provides feedback, until the evaluator says the output passes certain criteria.
|
|
35
35
|
- Running multiple agents in parallel, e.g. via Python primitives like `asyncio.gather`. This is useful for speed when you have multiple tasks that don't depend on each other.
|
|
36
36
|
|
|
37
|
-
We have a number of examples in [`examples/agent_patterns`](https://github.com/openai/openai-agents-python/examples/agent_patterns).
|
|
37
|
+
We have a number of examples in [`examples/agent_patterns`](https://github.com/openai/openai-agents-python/tree/main/examples/agent_patterns).
|
|
@@ -166,6 +166,9 @@ triage_agent = Agent(
|
|
|
166
166
|
)
|
|
167
167
|
|
|
168
168
|
async def main():
|
|
169
|
+
result = await Runner.run(triage_agent, "who was the first president of the united states?")
|
|
170
|
+
print(result.final_output)
|
|
171
|
+
|
|
169
172
|
result = await Runner.run(triage_agent, "what is life")
|
|
170
173
|
print(result.final_output)
|
|
171
174
|
|
|
@@ -32,7 +32,7 @@ The [`new_items`][agents.result.RunResultBase.new_items] property contains the n
|
|
|
32
32
|
|
|
33
33
|
- [`MessageOutputItem`][agents.items.MessageOutputItem] indicates a message from the LLM. The raw item is the message generated.
|
|
34
34
|
- [`HandoffCallItem`][agents.items.HandoffCallItem] indicates that the LLM called the handoff tool. The raw item is the tool call item from the LLM.
|
|
35
|
-
- [`HandoffOutputItem`][agents.items.HandoffOutputItem] indicates that a handoff
|
|
35
|
+
- [`HandoffOutputItem`][agents.items.HandoffOutputItem] indicates that a handoff occurred. The raw item is the tool response to the handoff tool call. You can also access the source/target agents from the item.
|
|
36
36
|
- [`ToolCallItem`][agents.items.ToolCallItem] indicates that the LLM invoked a tool.
|
|
37
37
|
- [`ToolCallOutputItem`][agents.items.ToolCallOutputItem] indicates that a tool was called. The raw item is the tool response. You can also access the tool output from the item.
|
|
38
38
|
- [`ReasoningItem`][agents.items.ReasoningItem] indicates a reasoning item from the LLM. The raw item is the reasoning generated.
|
|
@@ -78,7 +78,7 @@ async def main():
|
|
|
78
78
|
# San Francisco
|
|
79
79
|
|
|
80
80
|
# Second turn
|
|
81
|
-
new_input =
|
|
81
|
+
new_input = result.to_input_list() + [{"role": "user", "content": "What state is it in?"}]
|
|
82
82
|
result = await Runner.run(agent, new_input)
|
|
83
83
|
print(result.final_output)
|
|
84
84
|
# California
|
|
@@ -16,7 +16,7 @@ The Agents SDK includes built-in tracing, collecting a comprehensive record of e
|
|
|
16
16
|
- `trace_id`: A unique ID for the trace. Automatically generated if you don't pass one. Must have the format `trace_<32_alphanumeric>`.
|
|
17
17
|
- `group_id`: Optional group ID, to link multiple traces from the same conversation. For example, you might use a chat thread ID.
|
|
18
18
|
- `disabled`: If True, the trace will not be recorded.
|
|
19
|
-
- `metadata`:
|
|
19
|
+
- `metadata`: Optional metadata for the trace.
|
|
20
20
|
- **Spans** represent operations that have a start and end time. Spans have:
|
|
21
21
|
- `started_at` and `ended_at` timestamps.
|
|
22
22
|
- `trace_id`, to represent the trace they belong to
|
|
@@ -50,7 +50,7 @@ async def main():
|
|
|
50
50
|
|
|
51
51
|
with trace("Joke workflow"): # (1)!
|
|
52
52
|
first_result = await Runner.run(agent, "Tell me a joke")
|
|
53
|
-
second_result = await Runner.run(agent, f"Rate this joke: {
|
|
53
|
+
second_result = await Runner.run(agent, f"Rate this joke: {first_result.final_output}")
|
|
54
54
|
print(f"Joke: {first_result.final_output}")
|
|
55
55
|
print(f"Rating: {second_result.final_output}")
|
|
56
56
|
```
|
|
@@ -93,3 +93,5 @@ External trace processors include:
|
|
|
93
93
|
- [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk)
|
|
94
94
|
- [Pydantic Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents)
|
|
95
95
|
- [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk)
|
|
96
|
+
- [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration))
|
|
97
|
+
- [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from agents import Agent, Runner
|
|
2
|
+
|
|
3
|
+
agent = Agent(name="Assistant", instructions="You are a helpful assistant")
|
|
4
|
+
|
|
5
|
+
# Intended for Jupyter notebooks where there's an existing event loop
|
|
6
|
+
result = await Runner.run(agent, "Write a haiku about recursion in programming.") # type: ignore[top-level-await] # noqa: F704
|
|
7
|
+
print(result.final_output)
|
|
8
|
+
|
|
9
|
+
# Code within code loops,
|
|
10
|
+
# Infinite mirrors reflect—
|
|
11
|
+
# Logic folds on self.
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# Custom LLM providers
|
|
2
|
+
|
|
3
|
+
The examples in this directory demonstrate how you might use a non-OpenAI LLM provider. To run them, first set a base URL, API key and model.
|
|
4
|
+
|
|
5
|
+
```bash
|
|
6
|
+
export EXAMPLE_BASE_URL="..."
|
|
7
|
+
export EXAMPLE_API_KEY="..."
|
|
8
|
+
export EXAMPLE_MODEL_NAME"..."
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
Then run the examples, e.g.:
|
|
12
|
+
|
|
13
|
+
```
|
|
14
|
+
python examples/model_providers/custom_example_provider.py
|
|
15
|
+
|
|
16
|
+
Loops within themselves,
|
|
17
|
+
Function calls its own being,
|
|
18
|
+
Depth without ending.
|
|
19
|
+
```
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from openai import AsyncOpenAI
|
|
5
|
+
|
|
6
|
+
from agents import Agent, OpenAIChatCompletionsModel, Runner, set_tracing_disabled
|
|
7
|
+
|
|
8
|
+
BASE_URL = os.getenv("EXAMPLE_BASE_URL") or ""
|
|
9
|
+
API_KEY = os.getenv("EXAMPLE_API_KEY") or ""
|
|
10
|
+
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or ""
|
|
11
|
+
|
|
12
|
+
if not BASE_URL or not API_KEY or not MODEL_NAME:
|
|
13
|
+
raise ValueError(
|
|
14
|
+
"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code."
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
"""This example uses a custom provider for a specific agent. Steps:
|
|
18
|
+
1. Create a custom OpenAI client.
|
|
19
|
+
2. Create a `Model` that uses the custom client.
|
|
20
|
+
3. Set the `model` on the Agent.
|
|
21
|
+
|
|
22
|
+
Note that in this example, we disable tracing under the assumption that you don't have an API key
|
|
23
|
+
from platform.openai.com. If you do have one, you can either set the `OPENAI_API_KEY` env var
|
|
24
|
+
or call set_tracing_export_api_key() to set a tracing specific key.
|
|
25
|
+
"""
|
|
26
|
+
client = AsyncOpenAI(base_url=BASE_URL, api_key=API_KEY)
|
|
27
|
+
set_tracing_disabled(disabled=True)
|
|
28
|
+
|
|
29
|
+
# An alternate approach that would also work:
|
|
30
|
+
# PROVIDER = OpenAIProvider(openai_client=client)
|
|
31
|
+
# agent = Agent(..., model="some-custom-model")
|
|
32
|
+
# Runner.run(agent, ..., run_config=RunConfig(model_provider=PROVIDER))
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
async def main():
|
|
36
|
+
# This agent will use the custom LLM provider
|
|
37
|
+
agent = Agent(
|
|
38
|
+
name="Assistant",
|
|
39
|
+
instructions="You only respond in haikus.",
|
|
40
|
+
model=OpenAIChatCompletionsModel(model=MODEL_NAME, openai_client=client),
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
result = await Runner.run(
|
|
44
|
+
agent,
|
|
45
|
+
"Tell me about recursion in programming.",
|
|
46
|
+
)
|
|
47
|
+
print(result.final_output)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
if __name__ == "__main__":
|
|
51
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from openai import AsyncOpenAI
|
|
5
|
+
|
|
6
|
+
from agents import (
|
|
7
|
+
Agent,
|
|
8
|
+
Runner,
|
|
9
|
+
set_default_openai_api,
|
|
10
|
+
set_default_openai_client,
|
|
11
|
+
set_tracing_disabled,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
BASE_URL = os.getenv("EXAMPLE_BASE_URL") or ""
|
|
15
|
+
API_KEY = os.getenv("EXAMPLE_API_KEY") or ""
|
|
16
|
+
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or ""
|
|
17
|
+
|
|
18
|
+
if not BASE_URL or not API_KEY or not MODEL_NAME:
|
|
19
|
+
raise ValueError(
|
|
20
|
+
"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code."
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
"""This example uses a custom provider for all requests by default. We do three things:
|
|
25
|
+
1. Create a custom client.
|
|
26
|
+
2. Set it as the default OpenAI client, and don't use it for tracing.
|
|
27
|
+
3. Set the default API as Chat Completions, as most LLM providers don't yet support Responses API.
|
|
28
|
+
|
|
29
|
+
Note that in this example, we disable tracing under the assumption that you don't have an API key
|
|
30
|
+
from platform.openai.com. If you do have one, you can either set the `OPENAI_API_KEY` env var
|
|
31
|
+
or call set_tracing_export_api_key() to set a tracing specific key.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
client = AsyncOpenAI(
|
|
35
|
+
base_url=BASE_URL,
|
|
36
|
+
api_key=API_KEY,
|
|
37
|
+
)
|
|
38
|
+
set_default_openai_client(client=client, use_for_tracing=False)
|
|
39
|
+
set_default_openai_api("chat_completions")
|
|
40
|
+
set_tracing_disabled(disabled=True)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
async def main():
|
|
44
|
+
agent = Agent(
|
|
45
|
+
name="Assistant",
|
|
46
|
+
instructions="You only respond in haikus.",
|
|
47
|
+
model=MODEL_NAME,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
result = await Runner.run(agent, "Tell me about recursion in programming.")
|
|
51
|
+
print(result.final_output)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
if __name__ == "__main__":
|
|
55
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
from openai import AsyncOpenAI
|
|
7
|
+
|
|
8
|
+
from agents import (
|
|
9
|
+
Agent,
|
|
10
|
+
Model,
|
|
11
|
+
ModelProvider,
|
|
12
|
+
OpenAIChatCompletionsModel,
|
|
13
|
+
RunConfig,
|
|
14
|
+
Runner,
|
|
15
|
+
set_tracing_disabled,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
BASE_URL = os.getenv("EXAMPLE_BASE_URL") or ""
|
|
19
|
+
API_KEY = os.getenv("EXAMPLE_API_KEY") or ""
|
|
20
|
+
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or ""
|
|
21
|
+
|
|
22
|
+
if not BASE_URL or not API_KEY or not MODEL_NAME:
|
|
23
|
+
raise ValueError(
|
|
24
|
+
"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code."
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
"""This example uses a custom provider for some calls to Runner.run(), and direct calls to OpenAI for
|
|
29
|
+
others. Steps:
|
|
30
|
+
1. Create a custom OpenAI client.
|
|
31
|
+
2. Create a ModelProvider that uses the custom client.
|
|
32
|
+
3. Use the ModelProvider in calls to Runner.run(), only when we want to use the custom LLM provider.
|
|
33
|
+
|
|
34
|
+
Note that in this example, we disable tracing under the assumption that you don't have an API key
|
|
35
|
+
from platform.openai.com. If you do have one, you can either set the `OPENAI_API_KEY` env var
|
|
36
|
+
or call set_tracing_export_api_key() to set a tracing specific key.
|
|
37
|
+
"""
|
|
38
|
+
client = AsyncOpenAI(base_url=BASE_URL, api_key=API_KEY)
|
|
39
|
+
set_tracing_disabled(disabled=True)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class CustomModelProvider(ModelProvider):
|
|
43
|
+
def get_model(self, model_name: str | None) -> Model:
|
|
44
|
+
return OpenAIChatCompletionsModel(model=model_name or MODEL_NAME, openai_client=client)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
CUSTOM_MODEL_PROVIDER = CustomModelProvider()
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
async def main():
|
|
51
|
+
agent = Agent(
|
|
52
|
+
name="Assistant",
|
|
53
|
+
instructions="You only respond in haikus.",
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# This will use the custom model provider
|
|
57
|
+
result = await Runner.run(
|
|
58
|
+
agent,
|
|
59
|
+
"Tell me about recursion in programming.",
|
|
60
|
+
run_config=RunConfig(model_provider=CUSTOM_MODEL_PROVIDER),
|
|
61
|
+
)
|
|
62
|
+
print(result.final_output)
|
|
63
|
+
|
|
64
|
+
# If you uncomment this, it will use OpenAI directly, not the custom provider
|
|
65
|
+
# result = await Runner.run(
|
|
66
|
+
# agent,
|
|
67
|
+
# "Tell me about recursion in programming.",
|
|
68
|
+
# )
|
|
69
|
+
# print(result.final_output)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
if __name__ == "__main__":
|
|
73
|
+
asyncio.run(main())
|
|
@@ -21,5 +21,5 @@ If you're building your own research bot, some ideas to add to this are:
|
|
|
21
21
|
|
|
22
22
|
1. Retrieval: Add support for fetching relevant information from a vector store. You could use the File Search tool for this.
|
|
23
23
|
2. Image and file upload: Allow users to attach PDFs or other files, as baseline context for the research.
|
|
24
|
-
3. More planning and thinking: Models often produce better results given more time to think. Improve the planning process to come up with a better plan, and add an evaluation step so that the model can choose to improve
|
|
24
|
+
3. More planning and thinking: Models often produce better results given more time to think. Improve the planning process to come up with a better plan, and add an evaluation step so that the model can choose to improve its results, search for more stuff, etc.
|
|
25
25
|
4. Code execution: Allow running code, which is useful for data analysis.
|