openai-agents-python-providers 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_agents_python_providers-1.0.0/.env.example +11 -0
- openai_agents_python_providers-1.0.0/.github/workflows/ci.yml +40 -0
- openai_agents_python_providers-1.0.0/.github/workflows/release.yml +82 -0
- openai_agents_python_providers-1.0.0/.gitignore +36 -0
- openai_agents_python_providers-1.0.0/CHANGELOG.md +7 -0
- openai_agents_python_providers-1.0.0/LICENSE +21 -0
- openai_agents_python_providers-1.0.0/PKG-INFO +182 -0
- openai_agents_python_providers-1.0.0/README.md +152 -0
- openai_agents_python_providers-1.0.0/examples/README.md +76 -0
- openai_agents_python_providers-1.0.0/examples/standalone/llama_cpp_example.py +47 -0
- openai_agents_python_providers-1.0.0/examples/standalone/ollama_example.py +42 -0
- openai_agents_python_providers-1.0.0/examples/temporal/.dockerignore +10 -0
- openai_agents_python_providers-1.0.0/examples/temporal/Dockerfile +23 -0
- openai_agents_python_providers-1.0.0/examples/temporal/README.md +64 -0
- openai_agents_python_providers-1.0.0/examples/temporal/activities.py +60 -0
- openai_agents_python_providers-1.0.0/examples/temporal/docker-compose.yml +21 -0
- openai_agents_python_providers-1.0.0/examples/temporal/starter.py +34 -0
- openai_agents_python_providers-1.0.0/examples/temporal/worker.py +81 -0
- openai_agents_python_providers-1.0.0/examples/temporal/workflow.py +41 -0
- openai_agents_python_providers-1.0.0/pyproject.toml +107 -0
- openai_agents_python_providers-1.0.0/src/openai_agents_providers/__init__.py +35 -0
- openai_agents_python_providers-1.0.0/src/openai_agents_providers/llama_cpp_provider.py +90 -0
- openai_agents_python_providers-1.0.0/src/openai_agents_providers/ollama_provider.py +85 -0
- openai_agents_python_providers-1.0.0/src/openai_agents_providers/py.typed +1 -0
- openai_agents_python_providers-1.0.0/tests/test_providers.py +170 -0
- openai_agents_python_providers-1.0.0/uv.lock +2068 -0
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# Temporal Configuration
|
|
2
|
+
# Use 'host.docker.internal:7233' if running in Docker and Temporal is on the host
|
|
3
|
+
TEMPORAL_ADDRESS=localhost:7233
|
|
4
|
+
TASK_QUEUE=temporal-openai-agents-demo
|
|
5
|
+
|
|
6
|
+
# Model Provider Configuration
|
|
7
|
+
# PROVIDER_TYPE can be 'ollama' or 'llamacpp'
|
|
8
|
+
PROVIDER_TYPE=ollama
|
|
9
|
+
# Use 'http://host.docker.internal:11434/v1' if running in Docker and Ollama is on the host
|
|
10
|
+
PROVIDER_URL=http://localhost:11434/v1
|
|
11
|
+
MODEL_NAME=llama3.2
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
name: CI
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
branches: [main]
|
|
6
|
+
pull_request:
|
|
7
|
+
branches: [main]
|
|
8
|
+
|
|
9
|
+
jobs:
|
|
10
|
+
test:
|
|
11
|
+
runs-on: ubuntu-latest
|
|
12
|
+
strategy:
|
|
13
|
+
fail-fast: false
|
|
14
|
+
matrix:
|
|
15
|
+
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
|
16
|
+
steps:
|
|
17
|
+
- uses: actions/checkout@v4
|
|
18
|
+
|
|
19
|
+
- name: Install uv
|
|
20
|
+
uses: astral-sh/setup-uv@v5
|
|
21
|
+
|
|
22
|
+
- name: Set up Python ${{ matrix.python-version }}
|
|
23
|
+
uses: actions/setup-python@v5
|
|
24
|
+
with:
|
|
25
|
+
python-version: ${{ matrix.python-version }}
|
|
26
|
+
|
|
27
|
+
- name: Install dependencies
|
|
28
|
+
run: uv sync --all-extras --group dev --group temporal
|
|
29
|
+
|
|
30
|
+
- name: Lint
|
|
31
|
+
run: uv run ruff check .
|
|
32
|
+
|
|
33
|
+
- name: Format check
|
|
34
|
+
run: uv run ruff format --check .
|
|
35
|
+
|
|
36
|
+
- name: Type check
|
|
37
|
+
run: uv run mypy src/
|
|
38
|
+
|
|
39
|
+
- name: Run tests
|
|
40
|
+
run: uv run python -m pytest -v
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
name: Release
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
branches:
|
|
6
|
+
- main
|
|
7
|
+
|
|
8
|
+
jobs:
|
|
9
|
+
release:
|
|
10
|
+
name: Semantic release
|
|
11
|
+
runs-on: ubuntu-latest
|
|
12
|
+
concurrency: release
|
|
13
|
+
permissions:
|
|
14
|
+
contents: write
|
|
15
|
+
outputs:
|
|
16
|
+
released: ${{ steps.version_check.outputs.will_release }}
|
|
17
|
+
|
|
18
|
+
steps:
|
|
19
|
+
- name: Checkout
|
|
20
|
+
uses: actions/checkout@v4
|
|
21
|
+
with:
|
|
22
|
+
fetch-depth: 0
|
|
23
|
+
token: ${{ secrets.GITHUB_TOKEN }}
|
|
24
|
+
|
|
25
|
+
- name: Install uv
|
|
26
|
+
uses: astral-sh/setup-uv@v5
|
|
27
|
+
|
|
28
|
+
- name: Set up Python
|
|
29
|
+
uses: actions/setup-python@v5
|
|
30
|
+
with:
|
|
31
|
+
python-version: "3.12"
|
|
32
|
+
|
|
33
|
+
- name: Install dependencies
|
|
34
|
+
run: uv sync --all-extras --group dev --group temporal
|
|
35
|
+
|
|
36
|
+
- name: Check for releasable commits
|
|
37
|
+
id: version_check
|
|
38
|
+
env:
|
|
39
|
+
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
40
|
+
run: |
|
|
41
|
+
version=$(uv run semantic-release version --print 2>/dev/null || true)
|
|
42
|
+
if [ -n "$version" ]; then
|
|
43
|
+
echo "will_release=true" >> $GITHUB_OUTPUT
|
|
44
|
+
else
|
|
45
|
+
echo "will_release=false" >> $GITHUB_OUTPUT
|
|
46
|
+
fi
|
|
47
|
+
|
|
48
|
+
- name: Run semantic release
|
|
49
|
+
if: steps.version_check.outputs.will_release == 'true'
|
|
50
|
+
env:
|
|
51
|
+
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
52
|
+
run: uv run semantic-release version --push
|
|
53
|
+
|
|
54
|
+
- name: Build package
|
|
55
|
+
if: steps.version_check.outputs.will_release == 'true'
|
|
56
|
+
run: uv build
|
|
57
|
+
|
|
58
|
+
- name: Upload build artifacts
|
|
59
|
+
if: steps.version_check.outputs.will_release == 'true'
|
|
60
|
+
uses: actions/upload-artifact@v4
|
|
61
|
+
with:
|
|
62
|
+
name: dist
|
|
63
|
+
path: dist/
|
|
64
|
+
|
|
65
|
+
publish:
|
|
66
|
+
name: Publish to PyPI
|
|
67
|
+
needs: release
|
|
68
|
+
if: needs.release.outputs.released == 'true'
|
|
69
|
+
runs-on: ubuntu-latest
|
|
70
|
+
environment: pypi
|
|
71
|
+
permissions:
|
|
72
|
+
id-token: write
|
|
73
|
+
|
|
74
|
+
steps:
|
|
75
|
+
- name: Download build artifacts
|
|
76
|
+
uses: actions/download-artifact@v4
|
|
77
|
+
with:
|
|
78
|
+
name: dist
|
|
79
|
+
path: dist/
|
|
80
|
+
|
|
81
|
+
- name: Publish to PyPI
|
|
82
|
+
uses: pypa/gh-action-pypi-publish@release/v1
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
__pycache__/
|
|
2
|
+
*.py[cod]
|
|
3
|
+
*$py.class
|
|
4
|
+
*.so
|
|
5
|
+
.Python
|
|
6
|
+
build/
|
|
7
|
+
develop-eggs/
|
|
8
|
+
dist/
|
|
9
|
+
downloads/
|
|
10
|
+
eggs/
|
|
11
|
+
.eggs/
|
|
12
|
+
lib/
|
|
13
|
+
lib64/
|
|
14
|
+
parts/
|
|
15
|
+
sdist/
|
|
16
|
+
var/
|
|
17
|
+
wheels/
|
|
18
|
+
*.egg-info/
|
|
19
|
+
.installed.cfg
|
|
20
|
+
*.egg
|
|
21
|
+
MANIFEST
|
|
22
|
+
.env
|
|
23
|
+
.venv
|
|
24
|
+
env/
|
|
25
|
+
venv/
|
|
26
|
+
ENV/
|
|
27
|
+
env.bak/
|
|
28
|
+
venv.bak/
|
|
29
|
+
.mypy_cache/
|
|
30
|
+
.pytest_cache/
|
|
31
|
+
.ruff_cache/
|
|
32
|
+
.coverage
|
|
33
|
+
htmlcov/
|
|
34
|
+
.tox/
|
|
35
|
+
*.log
|
|
36
|
+
site/
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Mike Toscano
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: openai-agents-python-providers
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Ollama and llama.cpp providers for the OpenAI Agents SDK
|
|
5
|
+
Project-URL: Homepage, https://github.com/GethosTheWalrus/openai-agents-python-providers
|
|
6
|
+
Project-URL: Repository, https://github.com/GethosTheWalrus/openai-agents-python-providers
|
|
7
|
+
Project-URL: Issues, https://github.com/GethosTheWalrus/openai-agents-python-providers/issues
|
|
8
|
+
Author-email: Mike Toscano <mike@miketoscano.com>
|
|
9
|
+
License-Expression: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: agents,llama.cpp,llm,ollama,openai,providers
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
21
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
|
+
Classifier: Typing :: Typed
|
|
23
|
+
Requires-Python: >=3.10
|
|
24
|
+
Requires-Dist: openai-agents<1,>=0.16.0
|
|
25
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
26
|
+
Provides-Extra: temporal
|
|
27
|
+
Requires-Dist: httpx>=0.27.0; extra == 'temporal'
|
|
28
|
+
Requires-Dist: temporalio[openai-agents,opentelemetry]>=1.7.0; extra == 'temporal'
|
|
29
|
+
Description-Content-Type: text/markdown
|
|
30
|
+
|
|
31
|
+
# openai-agents-python-providers
|
|
32
|
+
|
|
33
|
+
Community model providers for the [OpenAI Agents SDK](https://github.com/openai/openai-agents-python).
|
|
34
|
+
|
|
35
|
+
Because OpenAI's SDK is intentionally focused on first-party integrations, this package provides ready-to-use `ModelProvider` implementations for locally-hosted and OpenAI-compatible backends:
|
|
36
|
+
|
|
37
|
+
| Provider | Backend |
|
|
38
|
+
|---|---|
|
|
39
|
+
| `OllamaProvider` | [Ollama](https://ollama.com/) |
|
|
40
|
+
| `LlamaCppProvider` | [llama.cpp](https://github.com/ggerganov/llama.cpp), [vLLM](https://github.com/vllm-project/vllm), and any OpenAI-compatible server |
|
|
41
|
+
|
|
42
|
+
## Installation
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install openai-agents-python-providers
|
|
46
|
+
|
|
47
|
+
# or with temporal support
|
|
48
|
+
pip install "openai-agents-python-providers[temporal]"
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Quickstart
|
|
52
|
+
|
|
53
|
+
### Ollama
|
|
54
|
+
|
|
55
|
+
Make sure Ollama is running and you have a model pulled:
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
import asyncio
|
|
59
|
+
import os
|
|
60
|
+
from agents import Agent, Runner, RunConfig
|
|
61
|
+
from openai_agents_providers import OllamaProvider
|
|
62
|
+
|
|
63
|
+
# Configure via environment or parameters
|
|
64
|
+
provider = OllamaProvider(
|
|
65
|
+
model=os.getenv("MODEL_NAME", "llama3.2"),
|
|
66
|
+
base_url=os.getenv("PROVIDER_URL", "http://localhost:11434/v1")
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
agent = Agent(
|
|
70
|
+
name="Assistant",
|
|
71
|
+
instructions="You are a helpful assistant.",
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
async def main():
|
|
75
|
+
result = await Runner.run(
|
|
76
|
+
agent,
|
|
77
|
+
"What is the capital of France?",
|
|
78
|
+
run_config=RunConfig(model_provider=provider),
|
|
79
|
+
)
|
|
80
|
+
print(result.final_output)
|
|
81
|
+
|
|
82
|
+
asyncio.run(main())
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
### llama.cpp
|
|
86
|
+
|
|
87
|
+
Start a llama.cpp server:
|
|
88
|
+
|
|
89
|
+
```bash
|
|
90
|
+
llama-server --model my-model.gguf --port 8080
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
```python
|
|
94
|
+
import asyncio
|
|
95
|
+
import os
|
|
96
|
+
from agents import Agent, Runner, RunConfig
|
|
97
|
+
from openai_agents_providers import LlamaCppProvider
|
|
98
|
+
|
|
99
|
+
provider = LlamaCppProvider(
|
|
100
|
+
base_url=os.getenv("PROVIDER_URL", "http://localhost:8080/v1"),
|
|
101
|
+
model=os.getenv("MODEL_NAME"), # optional
|
|
102
|
+
api_key="sk-anything",
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
agent = Agent(
|
|
106
|
+
name="Assistant",
|
|
107
|
+
instructions="You are a helpful assistant.",
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
async def main():
|
|
111
|
+
result = await Runner.run(
|
|
112
|
+
agent,
|
|
113
|
+
"Explain quantum entanglement in one sentence.",
|
|
114
|
+
run_config=RunConfig(model_provider=provider),
|
|
115
|
+
)
|
|
116
|
+
print(result.final_output)
|
|
117
|
+
|
|
118
|
+
asyncio.run(main())
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
### Temporal Integration
|
|
122
|
+
|
|
123
|
+
This package works seamlessly with the [Temporal OpenAI Agents Plugin](https://github.com/temporalio/sdk-python/tree/main/temporalio/contrib/openai_agents). You can use local providers like `OllamaProvider` or `LlamaCppProvider` while running agents durably in Temporal workflows.
|
|
124
|
+
|
|
125
|
+
See [examples/temporal/](examples/temporal/) for a complete "tool-as-activity" demonstration.
|
|
126
|
+
|
|
127
|
+
```bash
|
|
128
|
+
# Install temporal dependencies
|
|
129
|
+
uv sync --group temporal
|
|
130
|
+
|
|
131
|
+
# Start the worker (pointing to your infrastructure)
|
|
132
|
+
TEMPORAL_ADDRESS="temporal.example.com:7233" \
|
|
133
|
+
PROVIDER_TYPE="ollama" \
|
|
134
|
+
MODEL_NAME="llama3.2" \
|
|
135
|
+
uv run examples/temporal/worker.py
|
|
136
|
+
|
|
137
|
+
# Start the workflow
|
|
138
|
+
TEMPORAL_ADDRESS="temporal.example.com:7233" \
|
|
139
|
+
uv run examples/temporal/starter.py "What is the weather where I am?"
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
## API Reference
|
|
143
|
+
|
|
144
|
+
### `OllamaProvider`
|
|
145
|
+
|
|
146
|
+
```python
|
|
147
|
+
OllamaProvider(
|
|
148
|
+
*,
|
|
149
|
+
base_url: str = "http://localhost:11434/v1",
|
|
150
|
+
model: str | None = None,
|
|
151
|
+
api_key: str = "ollama",
|
|
152
|
+
**kwargs, # forwarded to AsyncOpenAI
|
|
153
|
+
)
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
| Parameter | Default | Description |
|
|
157
|
+
|---|---|---|
|
|
158
|
+
| `base_url` | `http://localhost:11434/v1` | Ollama API base URL |
|
|
159
|
+
| `model` | `None` | Model name (e.g. `"llama3.2"`, `"qwen3:8b"`). Overrides any name passed by the agent. |
|
|
160
|
+
| `api_key` | `"ollama"` | Ignored by Ollama; required by the OpenAI SDK. |
|
|
161
|
+
|
|
162
|
+
### `LlamaCppProvider`
|
|
163
|
+
|
|
164
|
+
```python
|
|
165
|
+
LlamaCppProvider(
|
|
166
|
+
*,
|
|
167
|
+
base_url: str,
|
|
168
|
+
model: str | None = None,
|
|
169
|
+
api_key: str = "sk-anything",
|
|
170
|
+
**kwargs, # forwarded to AsyncOpenAI
|
|
171
|
+
)
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
| Parameter | Default | Description |
|
|
175
|
+
|---|---|---|
|
|
176
|
+
| `base_url` | *(required)* | OpenAI-compatible API base URL, e.g. `http://localhost:8080/v1`. |
|
|
177
|
+
| `model` | `None` | Model name. Overrides any name passed by the agent. |
|
|
178
|
+
| `api_key` | `"sk-anything"` | Ignored by most backends; required by the OpenAI SDK. |
|
|
179
|
+
|
|
180
|
+
## License
|
|
181
|
+
|
|
182
|
+
MIT
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
# openai-agents-python-providers
|
|
2
|
+
|
|
3
|
+
Community model providers for the [OpenAI Agents SDK](https://github.com/openai/openai-agents-python).
|
|
4
|
+
|
|
5
|
+
Because OpenAI's SDK is intentionally focused on first-party integrations, this package provides ready-to-use `ModelProvider` implementations for locally-hosted and OpenAI-compatible backends:
|
|
6
|
+
|
|
7
|
+
| Provider | Backend |
|
|
8
|
+
|---|---|
|
|
9
|
+
| `OllamaProvider` | [Ollama](https://ollama.com/) |
|
|
10
|
+
| `LlamaCppProvider` | [llama.cpp](https://github.com/ggerganov/llama.cpp), [vLLM](https://github.com/vllm-project/vllm), and any OpenAI-compatible server |
|
|
11
|
+
|
|
12
|
+
## Installation
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
pip install openai-agents-python-providers
|
|
16
|
+
|
|
17
|
+
# or with temporal support
|
|
18
|
+
pip install "openai-agents-python-providers[temporal]"
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Quickstart
|
|
22
|
+
|
|
23
|
+
### Ollama
|
|
24
|
+
|
|
25
|
+
Make sure Ollama is running and you have a model pulled:
|
|
26
|
+
|
|
27
|
+
```python
|
|
28
|
+
import asyncio
|
|
29
|
+
import os
|
|
30
|
+
from agents import Agent, Runner, RunConfig
|
|
31
|
+
from openai_agents_providers import OllamaProvider
|
|
32
|
+
|
|
33
|
+
# Configure via environment or parameters
|
|
34
|
+
provider = OllamaProvider(
|
|
35
|
+
model=os.getenv("MODEL_NAME", "llama3.2"),
|
|
36
|
+
base_url=os.getenv("PROVIDER_URL", "http://localhost:11434/v1")
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
agent = Agent(
|
|
40
|
+
name="Assistant",
|
|
41
|
+
instructions="You are a helpful assistant.",
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
async def main():
|
|
45
|
+
result = await Runner.run(
|
|
46
|
+
agent,
|
|
47
|
+
"What is the capital of France?",
|
|
48
|
+
run_config=RunConfig(model_provider=provider),
|
|
49
|
+
)
|
|
50
|
+
print(result.final_output)
|
|
51
|
+
|
|
52
|
+
asyncio.run(main())
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### llama.cpp
|
|
56
|
+
|
|
57
|
+
Start a llama.cpp server:
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
llama-server --model my-model.gguf --port 8080
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
```python
|
|
64
|
+
import asyncio
|
|
65
|
+
import os
|
|
66
|
+
from agents import Agent, Runner, RunConfig
|
|
67
|
+
from openai_agents_providers import LlamaCppProvider
|
|
68
|
+
|
|
69
|
+
provider = LlamaCppProvider(
|
|
70
|
+
base_url=os.getenv("PROVIDER_URL", "http://localhost:8080/v1"),
|
|
71
|
+
model=os.getenv("MODEL_NAME"), # optional
|
|
72
|
+
api_key="sk-anything",
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
agent = Agent(
|
|
76
|
+
name="Assistant",
|
|
77
|
+
instructions="You are a helpful assistant.",
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
async def main():
|
|
81
|
+
result = await Runner.run(
|
|
82
|
+
agent,
|
|
83
|
+
"Explain quantum entanglement in one sentence.",
|
|
84
|
+
run_config=RunConfig(model_provider=provider),
|
|
85
|
+
)
|
|
86
|
+
print(result.final_output)
|
|
87
|
+
|
|
88
|
+
asyncio.run(main())
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### Temporal Integration
|
|
92
|
+
|
|
93
|
+
This package works seamlessly with the [Temporal OpenAI Agents Plugin](https://github.com/temporalio/sdk-python/tree/main/temporalio/contrib/openai_agents). You can use local providers like `OllamaProvider` or `LlamaCppProvider` while running agents durably in Temporal workflows.
|
|
94
|
+
|
|
95
|
+
See [examples/temporal/](examples/temporal/) for a complete "tool-as-activity" demonstration.
|
|
96
|
+
|
|
97
|
+
```bash
|
|
98
|
+
# Install temporal dependencies
|
|
99
|
+
uv sync --group temporal
|
|
100
|
+
|
|
101
|
+
# Start the worker (pointing to your infrastructure)
|
|
102
|
+
TEMPORAL_ADDRESS="temporal.example.com:7233" \
|
|
103
|
+
PROVIDER_TYPE="ollama" \
|
|
104
|
+
MODEL_NAME="llama3.2" \
|
|
105
|
+
uv run examples/temporal/worker.py
|
|
106
|
+
|
|
107
|
+
# Start the workflow
|
|
108
|
+
TEMPORAL_ADDRESS="temporal.example.com:7233" \
|
|
109
|
+
uv run examples/temporal/starter.py "What is the weather where I am?"
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
## API Reference
|
|
113
|
+
|
|
114
|
+
### `OllamaProvider`
|
|
115
|
+
|
|
116
|
+
```python
|
|
117
|
+
OllamaProvider(
|
|
118
|
+
*,
|
|
119
|
+
base_url: str = "http://localhost:11434/v1",
|
|
120
|
+
model: str | None = None,
|
|
121
|
+
api_key: str = "ollama",
|
|
122
|
+
**kwargs, # forwarded to AsyncOpenAI
|
|
123
|
+
)
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
| Parameter | Default | Description |
|
|
127
|
+
|---|---|---|
|
|
128
|
+
| `base_url` | `http://localhost:11434/v1` | Ollama API base URL |
|
|
129
|
+
| `model` | `None` | Model name (e.g. `"llama3.2"`, `"qwen3:8b"`). Overrides any name passed by the agent. |
|
|
130
|
+
| `api_key` | `"ollama"` | Ignored by Ollama; required by the OpenAI SDK. |
|
|
131
|
+
|
|
132
|
+
### `LlamaCppProvider`
|
|
133
|
+
|
|
134
|
+
```python
|
|
135
|
+
LlamaCppProvider(
|
|
136
|
+
*,
|
|
137
|
+
base_url: str,
|
|
138
|
+
model: str | None = None,
|
|
139
|
+
api_key: str = "sk-anything",
|
|
140
|
+
**kwargs, # forwarded to AsyncOpenAI
|
|
141
|
+
)
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
| Parameter | Default | Description |
|
|
145
|
+
|---|---|---|
|
|
146
|
+
| `base_url` | *(required)* | OpenAI-compatible API base URL, e.g. `http://localhost:8080/v1`. |
|
|
147
|
+
| `model` | `None` | Model name. Overrides any name passed by the agent. |
|
|
148
|
+
| `api_key` | `"sk-anything"` | Ignored by most backends; required by the OpenAI SDK. |
|
|
149
|
+
|
|
150
|
+
## License
|
|
151
|
+
|
|
152
|
+
MIT
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
# Examples
|
|
2
|
+
|
|
3
|
+
This directory contains examples of how to use the `openai-agents-python-providers` package.
|
|
4
|
+
|
|
5
|
+
## Configuration
|
|
6
|
+
|
|
7
|
+
All examples can be configured using environment variables. You can set these directly in your shell or use a `.env` file in the project root (see `.env.example` for a template).
|
|
8
|
+
|
|
9
|
+
| Variable | Default | Description |
|
|
10
|
+
|---|---|---|
|
|
11
|
+
| `MODEL_NAME` | `llama3.2` | The LLM model name. |
|
|
12
|
+
| `PROVIDER_URL` | *(varies)* | The API endpoint for the provider (Ollama or llama.cpp). |
|
|
13
|
+
| `TEMPORAL_ADDRESS` | `localhost:7233` | Address of the Temporal server (Temporal demo only). |
|
|
14
|
+
|
|
15
|
+
## Standalone Examples
|
|
16
|
+
|
|
17
|
+
Located in `examples/standalone/`. These demonstrate using the providers directly with the OpenAI Agents SDK in a standard Python script.
|
|
18
|
+
|
|
19
|
+
### Ollama
|
|
20
|
+
|
|
21
|
+
```bash
|
|
22
|
+
# Optional: override default model or URL
|
|
23
|
+
export MODEL_NAME="qwen2.5:7b"
|
|
24
|
+
export PROVIDER_URL="http://192.168.1.10:11434/v1"
|
|
25
|
+
|
|
26
|
+
uv run examples/standalone/ollama_example.py
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
### llama.cpp
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
# Optional: override default URL
|
|
33
|
+
export PROVIDER_URL="http://localhost:8080/v1"
|
|
34
|
+
|
|
35
|
+
uv run examples/standalone/llama_cpp_example.py
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
---
|
|
39
|
+
|
|
40
|
+
## Temporal Integration Example
|
|
41
|
+
|
|
42
|
+
This is a comprehensive "tool-as-activity" demonstration showing how to run durable agents in [Temporal](https://temporal.io/).
|
|
43
|
+
|
|
44
|
+
**Location:** [examples/temporal/](temporal/)
|
|
45
|
+
|
|
46
|
+
### Quick Start (Local)
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
# Install temporal dependencies via extra
|
|
50
|
+
pip install "openai-agents-python-providers[temporal]"
|
|
51
|
+
|
|
52
|
+
# Or if using uv locally
|
|
53
|
+
uv sync --group temporal
|
|
54
|
+
|
|
55
|
+
# Start the worker (configure as needed)
|
|
56
|
+
TEMPORAL_ADDRESS="temporal.example.com:7233" \
|
|
57
|
+
PROVIDER_TYPE="ollama" \
|
|
58
|
+
MODEL_NAME="llama3.2" \
|
|
59
|
+
uv run examples/temporal/worker.py
|
|
60
|
+
|
|
61
|
+
# Start the workflow
|
|
62
|
+
TEMPORAL_ADDRESS="temporal.example.com:7233" \
|
|
63
|
+
uv run examples/temporal/starter.py "What is the weather where I am?"
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
### Dockerized Test
|
|
67
|
+
|
|
68
|
+
If you have Temporal or model providers running externally, you can run the demo in Docker:
|
|
69
|
+
|
|
70
|
+
```bash
|
|
71
|
+
cd examples/temporal
|
|
72
|
+
docker-compose up --build worker
|
|
73
|
+
docker-compose run --rm starter "What is the weather where I am?"
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
See the [Temporal README](temporal/README.md) for more configuration options.
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""Example: use LlamaCppProvider to run an agent against a local llama.cpp server.
|
|
2
|
+
|
|
3
|
+
Prerequisites:
|
|
4
|
+
1. Install llama.cpp: https://github.com/ggerganov/llama.cpp
|
|
5
|
+
2. Start the server:
|
|
6
|
+
llama-server --model /path/to/your-model.gguf --port 8080
|
|
7
|
+
3. Install this package: pip install openai-agents-python-providers
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import os
|
|
12
|
+
|
|
13
|
+
from agents import Agent, RunConfig, Runner
|
|
14
|
+
from dotenv import load_dotenv
|
|
15
|
+
|
|
16
|
+
from openai_agents_providers import LlamaCppProvider
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
async def main() -> None:
|
|
20
|
+
load_dotenv()
|
|
21
|
+
# Use environment variables for configuration
|
|
22
|
+
model = os.getenv("MODEL_NAME") # optional for llama.cpp
|
|
23
|
+
base_url = os.getenv("PROVIDER_URL", "http://localhost:8080/v1")
|
|
24
|
+
|
|
25
|
+
print(f"Using model: {model or 'server default'} at {base_url}")
|
|
26
|
+
|
|
27
|
+
provider = LlamaCppProvider(
|
|
28
|
+
base_url=base_url,
|
|
29
|
+
model=model,
|
|
30
|
+
api_key="sk-anything",
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
agent = Agent(
|
|
34
|
+
name="Assistant",
|
|
35
|
+
instructions="You are a concise, helpful assistant.",
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
result = await Runner.run(
|
|
39
|
+
agent,
|
|
40
|
+
"Explain the phrase 'to err is human' in one sentence.",
|
|
41
|
+
run_config=RunConfig(model_provider=provider),
|
|
42
|
+
)
|
|
43
|
+
print(f"Result: {result.final_output}")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
if __name__ == "__main__":
|
|
47
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""Example: use OllamaProvider to run an agent against a local Ollama server.
|
|
2
|
+
|
|
3
|
+
Prerequisites:
|
|
4
|
+
1. Install Ollama: https://ollama.com/
|
|
5
|
+
2. Pull a model: ollama pull llama3.2
|
|
6
|
+
3. Install this package: pip install openai-agents-python-providers
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import os
|
|
11
|
+
|
|
12
|
+
from agents import Agent, RunConfig, Runner
|
|
13
|
+
from dotenv import load_dotenv
|
|
14
|
+
|
|
15
|
+
from openai_agents_providers import OllamaProvider
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
async def main() -> None:
|
|
19
|
+
load_dotenv()
|
|
20
|
+
# Use environment variables for configuration
|
|
21
|
+
model = os.getenv("MODEL_NAME", "llama3.2")
|
|
22
|
+
base_url = os.getenv("PROVIDER_URL", "http://localhost:11434/v1")
|
|
23
|
+
|
|
24
|
+
print(f"Using model: {model} at {base_url}")
|
|
25
|
+
|
|
26
|
+
provider = OllamaProvider(model=model, base_url=base_url)
|
|
27
|
+
|
|
28
|
+
agent = Agent(
|
|
29
|
+
name="Assistant",
|
|
30
|
+
instructions="You are a concise, helpful assistant.",
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
result = await Runner.run(
|
|
34
|
+
agent,
|
|
35
|
+
"What is the capital of France? Answer in one word.",
|
|
36
|
+
run_config=RunConfig(model_provider=provider),
|
|
37
|
+
)
|
|
38
|
+
print(f"Result: {result.final_output}")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
if __name__ == "__main__":
|
|
42
|
+
asyncio.run(main())
|