openai-agents-python-providers 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_agents_providers/__init__.py +35 -0
- openai_agents_providers/llama_cpp_provider.py +90 -0
- openai_agents_providers/ollama_provider.py +85 -0
- openai_agents_providers/py.typed +1 -0
- openai_agents_python_providers-1.0.0.dist-info/METADATA +182 -0
- openai_agents_python_providers-1.0.0.dist-info/RECORD +8 -0
- openai_agents_python_providers-1.0.0.dist-info/WHEEL +4 -0
- openai_agents_python_providers-1.0.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""Community model providers for the OpenAI Agents SDK.
|
|
2
|
+
|
|
3
|
+
Provides ready-to-use :class:`~agents.ModelProvider` implementations for
|
|
4
|
+
locally-hosted and OpenAI-compatible backends:
|
|
5
|
+
|
|
6
|
+
- :class:`~openai_agents_providers.OllamaProvider` — Ollama (``http://localhost:11434/v1``)
|
|
7
|
+
- :class:`~openai_agents_providers.LlamaCppProvider` — llama.cpp, vLLM, and any
|
|
8
|
+
other OpenAI-compatible server
|
|
9
|
+
|
|
10
|
+
Usage example::
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
from agents import Agent, Runner, RunConfig
|
|
14
|
+
from openai_agents_providers import OllamaProvider
|
|
15
|
+
|
|
16
|
+
provider = OllamaProvider(
|
|
17
|
+
model=os.getenv("MODEL_NAME", "llama3.2"),
|
|
18
|
+
base_url=os.getenv("PROVIDER_URL", "http://localhost:11434/v1")
|
|
19
|
+
)
|
|
20
|
+
agent = Agent(name="Assistant", instructions="You are a helpful assistant.")
|
|
21
|
+
result = await Runner.run(
|
|
22
|
+
agent,
|
|
23
|
+
"What is the capital of France?",
|
|
24
|
+
run_config=RunConfig(model_provider=provider),
|
|
25
|
+
)
|
|
26
|
+
print(result.final_output)
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
from .llama_cpp_provider import LlamaCppProvider
|
|
30
|
+
from .ollama_provider import OllamaProvider
|
|
31
|
+
|
|
32
|
+
__all__ = [
|
|
33
|
+
"LlamaCppProvider",
|
|
34
|
+
"OllamaProvider",
|
|
35
|
+
]
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
"""Provider for llama.cpp and other OpenAI-compatible servers (vLLM, Ollama, etc.)."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from agents.models.interface import Model, ModelProvider
|
|
6
|
+
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
|
7
|
+
from openai import AsyncOpenAI
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class LlamaCppProvider(ModelProvider):
|
|
11
|
+
"""A model provider for llama.cpp and other OpenAI-compatible servers.
|
|
12
|
+
|
|
13
|
+
This provider creates models that use the OpenAI Chat Completions API
|
|
14
|
+
against any compatible backend (llama.cpp, vLLM, Ollama, etc.).
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
base_url: The OpenAI-compatible API base URL. Must end with ``/v1``.
|
|
18
|
+
Examples:
|
|
19
|
+
|
|
20
|
+
- ``http://localhost:8080/v1`` (llama.cpp server)
|
|
21
|
+
- ``http://localhost:8080/v1`` (Ollama with ``--api`` flag)
|
|
22
|
+
- ``https://your-vllm-instance.com/v1`` (vLLM)
|
|
23
|
+
|
|
24
|
+
model: The model name to use. Passed to every model instance created
|
|
25
|
+
by this provider. If ``None``, the backend's default model is used.
|
|
26
|
+
api_key: API key. Most OpenAI-compatible servers ignore this but the
|
|
27
|
+
OpenAI SDK requires it. Use anything (e.g. ``"sk-anything"``).
|
|
28
|
+
**kwargs: Additional keyword arguments passed to ``AsyncOpenAI``.
|
|
29
|
+
|
|
30
|
+
Example:
|
|
31
|
+
>>> provider = LlamaCppProvider(
|
|
32
|
+
... base_url="http://localhost:8080/v1",
|
|
33
|
+
... model="qwen3.6-35b",
|
|
34
|
+
... api_key="sk-anything",
|
|
35
|
+
... )
|
|
36
|
+
>>> agent = Agent(name="Assistant")
|
|
37
|
+
>>> config = RunConfig(model_provider=provider)
|
|
38
|
+
>>> result = await Runner.run(agent, "Hello!", run_config=config)
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
*,
|
|
44
|
+
base_url: str,
|
|
45
|
+
model: str | None = None,
|
|
46
|
+
api_key: str = "sk-anything",
|
|
47
|
+
**kwargs,
|
|
48
|
+
) -> None:
|
|
49
|
+
"""Initialize the LlamaCpp provider.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
base_url: The OpenAI-compatible API base URL (must end with /v1).
|
|
53
|
+
model: The model name to use. If ``None``, the backend's default is used.
|
|
54
|
+
api_key: API key (required by OpenAI SDK, usually ignored by the backend).
|
|
55
|
+
**kwargs: Additional arguments passed to ``AsyncOpenAI``.
|
|
56
|
+
"""
|
|
57
|
+
self._base_url = base_url
|
|
58
|
+
self._model = model
|
|
59
|
+
self._api_key = api_key
|
|
60
|
+
self._kwargs = kwargs
|
|
61
|
+
self._client: AsyncOpenAI | None = None
|
|
62
|
+
|
|
63
|
+
def _get_client(self) -> AsyncOpenAI:
|
|
64
|
+
if self._client is None:
|
|
65
|
+
self._client = AsyncOpenAI(
|
|
66
|
+
base_url=self._base_url,
|
|
67
|
+
api_key=self._api_key,
|
|
68
|
+
max_retries=0,
|
|
69
|
+
**self._kwargs,
|
|
70
|
+
)
|
|
71
|
+
return self._client
|
|
72
|
+
|
|
73
|
+
def get_model(self, model_name: str | None) -> Model:
|
|
74
|
+
"""Get a model instance.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
model_name: The model name requested by the agent. If this provider
|
|
78
|
+
was constructed with a ``model`` argument, that value takes
|
|
79
|
+
precedence and ``model_name`` is ignored.
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
An ``OpenAIChatCompletionsModel`` instance pointing at the configured base URL.
|
|
83
|
+
"""
|
|
84
|
+
resolved_model = self._model or model_name or "default"
|
|
85
|
+
return OpenAIChatCompletionsModel(model=resolved_model, openai_client=self._get_client())
|
|
86
|
+
|
|
87
|
+
async def aclose(self) -> None:
|
|
88
|
+
"""Close the underlying OpenAI client."""
|
|
89
|
+
if self._client is not None:
|
|
90
|
+
await self._client.close()
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"""Provider for Ollama.
|
|
2
|
+
|
|
3
|
+
Ollama exposes an OpenAI-compatible API at /v1/ when started with the --api flag.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from agents.models.interface import Model, ModelProvider
|
|
9
|
+
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
|
10
|
+
from openai import AsyncOpenAI
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class OllamaProvider(ModelProvider):
|
|
14
|
+
"""A model provider for Ollama.
|
|
15
|
+
|
|
16
|
+
Ollama exposes an OpenAI-compatible Chat Completions API at ``/v1/`` when
|
|
17
|
+
started with ``--api`` (enabled by default in recent versions).
|
|
18
|
+
|
|
19
|
+
This provider creates models that use the OpenAI Chat Completions API
|
|
20
|
+
against the Ollama server.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
base_url: The Ollama API base URL. Defaults to ``http://localhost:11434/v1``.
|
|
24
|
+
model: The Ollama model name to use (e.g. ``"llama3.2"``, ``"qwen3:8b"``).
|
|
25
|
+
If ``None``, Ollama's default model is used.
|
|
26
|
+
api_key: API key. Ollama ignores this but the OpenAI SDK requires it.
|
|
27
|
+
**kwargs: Additional keyword arguments passed to ``AsyncOpenAI``.
|
|
28
|
+
|
|
29
|
+
Example:
|
|
30
|
+
>>> provider = OllamaProvider(model="llama3.2")
|
|
31
|
+
>>> agent = Agent(name="Assistant")
|
|
32
|
+
>>> config = RunConfig(model_provider=provider)
|
|
33
|
+
>>> result = await Runner.run(agent, "Hello!", run_config=config)
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
*,
|
|
39
|
+
base_url: str = "http://localhost:11434/v1",
|
|
40
|
+
model: str | None = None,
|
|
41
|
+
api_key: str = "ollama",
|
|
42
|
+
**kwargs,
|
|
43
|
+
) -> None:
|
|
44
|
+
"""Initialize the Ollama provider.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
base_url: The Ollama API base URL. Defaults to Ollama's default.
|
|
48
|
+
model: The Ollama model name to use. If ``None``, Ollama's default is used.
|
|
49
|
+
api_key: API key (required by OpenAI SDK, Ollama ignores it).
|
|
50
|
+
**kwargs: Additional arguments passed to ``AsyncOpenAI``.
|
|
51
|
+
"""
|
|
52
|
+
self._base_url = base_url
|
|
53
|
+
self._model = model
|
|
54
|
+
self._api_key = api_key
|
|
55
|
+
self._kwargs = kwargs
|
|
56
|
+
self._client: AsyncOpenAI | None = None
|
|
57
|
+
|
|
58
|
+
def _get_client(self) -> AsyncOpenAI:
|
|
59
|
+
if self._client is None:
|
|
60
|
+
self._client = AsyncOpenAI(
|
|
61
|
+
base_url=self._base_url,
|
|
62
|
+
api_key=self._api_key,
|
|
63
|
+
max_retries=0,
|
|
64
|
+
**self._kwargs,
|
|
65
|
+
)
|
|
66
|
+
return self._client
|
|
67
|
+
|
|
68
|
+
def get_model(self, model_name: str | None) -> Model:
|
|
69
|
+
"""Get a model instance.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
model_name: The model name requested by the agent. If this provider
|
|
73
|
+
was constructed with a ``model`` argument, that value takes
|
|
74
|
+
precedence and ``model_name`` is ignored.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
An ``OpenAIChatCompletionsModel`` instance pointing at Ollama.
|
|
78
|
+
"""
|
|
79
|
+
resolved_model = self._model or model_name or "default"
|
|
80
|
+
return OpenAIChatCompletionsModel(model=resolved_model, openai_client=self._get_client())
|
|
81
|
+
|
|
82
|
+
async def aclose(self) -> None:
|
|
83
|
+
"""Close the underlying OpenAI client."""
|
|
84
|
+
if self._client is not None:
|
|
85
|
+
await self._client.close()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Type stubs marker for openai-agents-python-providers."""
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: openai-agents-python-providers
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Ollama and llama.cpp providers for the OpenAI Agents SDK
|
|
5
|
+
Project-URL: Homepage, https://github.com/GethosTheWalrus/openai-agents-python-providers
|
|
6
|
+
Project-URL: Repository, https://github.com/GethosTheWalrus/openai-agents-python-providers
|
|
7
|
+
Project-URL: Issues, https://github.com/GethosTheWalrus/openai-agents-python-providers/issues
|
|
8
|
+
Author-email: Mike Toscano <mike@miketoscano.com>
|
|
9
|
+
License-Expression: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: agents,llama.cpp,llm,ollama,openai,providers
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
21
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
|
+
Classifier: Typing :: Typed
|
|
23
|
+
Requires-Python: >=3.10
|
|
24
|
+
Requires-Dist: openai-agents<1,>=0.16.0
|
|
25
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
26
|
+
Provides-Extra: temporal
|
|
27
|
+
Requires-Dist: httpx>=0.27.0; extra == 'temporal'
|
|
28
|
+
Requires-Dist: temporalio[openai-agents,opentelemetry]>=1.7.0; extra == 'temporal'
|
|
29
|
+
Description-Content-Type: text/markdown
|
|
30
|
+
|
|
31
|
+
# openai-agents-python-providers
|
|
32
|
+
|
|
33
|
+
Community model providers for the [OpenAI Agents SDK](https://github.com/openai/openai-agents-python).
|
|
34
|
+
|
|
35
|
+
Because OpenAI's SDK is intentionally focused on first-party integrations, this package provides ready-to-use `ModelProvider` implementations for locally-hosted and OpenAI-compatible backends:
|
|
36
|
+
|
|
37
|
+
| Provider | Backend |
|
|
38
|
+
|---|---|
|
|
39
|
+
| `OllamaProvider` | [Ollama](https://ollama.com/) |
|
|
40
|
+
| `LlamaCppProvider` | [llama.cpp](https://github.com/ggerganov/llama.cpp), [vLLM](https://github.com/vllm-project/vllm), and any OpenAI-compatible server |
|
|
41
|
+
|
|
42
|
+
## Installation
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install openai-agents-python-providers
|
|
46
|
+
|
|
47
|
+
# or with temporal support
|
|
48
|
+
pip install "openai-agents-python-providers[temporal]"
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Quickstart
|
|
52
|
+
|
|
53
|
+
### Ollama
|
|
54
|
+
|
|
55
|
+
Make sure Ollama is running and you have a model pulled:
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
import asyncio
|
|
59
|
+
import os
|
|
60
|
+
from agents import Agent, Runner, RunConfig
|
|
61
|
+
from openai_agents_providers import OllamaProvider
|
|
62
|
+
|
|
63
|
+
# Configure via environment or parameters
|
|
64
|
+
provider = OllamaProvider(
|
|
65
|
+
model=os.getenv("MODEL_NAME", "llama3.2"),
|
|
66
|
+
base_url=os.getenv("PROVIDER_URL", "http://localhost:11434/v1")
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
agent = Agent(
|
|
70
|
+
name="Assistant",
|
|
71
|
+
instructions="You are a helpful assistant.",
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
async def main():
|
|
75
|
+
result = await Runner.run(
|
|
76
|
+
agent,
|
|
77
|
+
"What is the capital of France?",
|
|
78
|
+
run_config=RunConfig(model_provider=provider),
|
|
79
|
+
)
|
|
80
|
+
print(result.final_output)
|
|
81
|
+
|
|
82
|
+
asyncio.run(main())
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
### llama.cpp
|
|
86
|
+
|
|
87
|
+
Start a llama.cpp server:
|
|
88
|
+
|
|
89
|
+
```bash
|
|
90
|
+
llama-server --model my-model.gguf --port 8080
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
```python
|
|
94
|
+
import asyncio
|
|
95
|
+
import os
|
|
96
|
+
from agents import Agent, Runner, RunConfig
|
|
97
|
+
from openai_agents_providers import LlamaCppProvider
|
|
98
|
+
|
|
99
|
+
provider = LlamaCppProvider(
|
|
100
|
+
base_url=os.getenv("PROVIDER_URL", "http://localhost:8080/v1"),
|
|
101
|
+
model=os.getenv("MODEL_NAME"), # optional
|
|
102
|
+
api_key="sk-anything",
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
agent = Agent(
|
|
106
|
+
name="Assistant",
|
|
107
|
+
instructions="You are a helpful assistant.",
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
async def main():
|
|
111
|
+
result = await Runner.run(
|
|
112
|
+
agent,
|
|
113
|
+
"Explain quantum entanglement in one sentence.",
|
|
114
|
+
run_config=RunConfig(model_provider=provider),
|
|
115
|
+
)
|
|
116
|
+
print(result.final_output)
|
|
117
|
+
|
|
118
|
+
asyncio.run(main())
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
### Temporal Integration
|
|
122
|
+
|
|
123
|
+
This package works seamlessly with the [Temporal OpenAI Agents Plugin](https://github.com/temporalio/sdk-python/tree/main/temporalio/contrib/openai_agents). You can use local providers like `OllamaProvider` or `LlamaCppProvider` while running agents durably in Temporal workflows.
|
|
124
|
+
|
|
125
|
+
See [examples/temporal/](examples/temporal/) for a complete "tool-as-activity" demonstration.
|
|
126
|
+
|
|
127
|
+
```bash
|
|
128
|
+
# Install temporal dependencies
|
|
129
|
+
uv sync --group temporal
|
|
130
|
+
|
|
131
|
+
# Start the worker (pointing to your infrastructure)
|
|
132
|
+
TEMPORAL_ADDRESS="temporal.example.com:7233" \
|
|
133
|
+
PROVIDER_TYPE="ollama" \
|
|
134
|
+
MODEL_NAME="llama3.2" \
|
|
135
|
+
uv run examples/temporal/worker.py
|
|
136
|
+
|
|
137
|
+
# Start the workflow
|
|
138
|
+
TEMPORAL_ADDRESS="temporal.example.com:7233" \
|
|
139
|
+
uv run examples/temporal/starter.py "What is the weather where I am?"
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
## API Reference
|
|
143
|
+
|
|
144
|
+
### `OllamaProvider`
|
|
145
|
+
|
|
146
|
+
```python
|
|
147
|
+
OllamaProvider(
|
|
148
|
+
*,
|
|
149
|
+
base_url: str = "http://localhost:11434/v1",
|
|
150
|
+
model: str | None = None,
|
|
151
|
+
api_key: str = "ollama",
|
|
152
|
+
**kwargs, # forwarded to AsyncOpenAI
|
|
153
|
+
)
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
| Parameter | Default | Description |
|
|
157
|
+
|---|---|---|
|
|
158
|
+
| `base_url` | `http://localhost:11434/v1` | Ollama API base URL |
|
|
159
|
+
| `model` | `None` | Model name (e.g. `"llama3.2"`, `"qwen3:8b"`). Overrides any name passed by the agent. |
|
|
160
|
+
| `api_key` | `"ollama"` | Ignored by Ollama; required by the OpenAI SDK. |
|
|
161
|
+
|
|
162
|
+
### `LlamaCppProvider`
|
|
163
|
+
|
|
164
|
+
```python
|
|
165
|
+
LlamaCppProvider(
|
|
166
|
+
*,
|
|
167
|
+
base_url: str,
|
|
168
|
+
model: str | None = None,
|
|
169
|
+
api_key: str = "sk-anything",
|
|
170
|
+
**kwargs, # forwarded to AsyncOpenAI
|
|
171
|
+
)
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
| Parameter | Default | Description |
|
|
175
|
+
|---|---|---|
|
|
176
|
+
| `base_url` | *(required)* | OpenAI-compatible API base URL, e.g. `http://localhost:8080/v1`. |
|
|
177
|
+
| `model` | `None` | Model name. Overrides any name passed by the agent. |
|
|
178
|
+
| `api_key` | `"sk-anything"` | Ignored by most backends; required by the OpenAI SDK. |
|
|
179
|
+
|
|
180
|
+
## License
|
|
181
|
+
|
|
182
|
+
MIT
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
openai_agents_providers/__init__.py,sha256=BctulqLHYuRo2Nb44zPxC9FMHrb5TMht8u0sg5Huhs4,1103
|
|
2
|
+
openai_agents_providers/llama_cpp_provider.py,sha256=WWb9_lgZgMR82jdAlP3M3YZbbdIrnjMNwiXosQusj9U,3421
|
|
3
|
+
openai_agents_providers/ollama_provider.py,sha256=pDGyekG7y0CYsPu0F0Hv4nflvFeyPyYpSBGASOYVFZ0,3081
|
|
4
|
+
openai_agents_providers/py.typed,sha256=p3UHlJheWvOOVAC2IYgJT_mpGMZ7oX96qmJf_e5vIIo,60
|
|
5
|
+
openai_agents_python_providers-1.0.0.dist-info/METADATA,sha256=lH0ynOSCQhgsQzb0QslZeXrkFLvUIW4FXRBZ5Yedkls,5463
|
|
6
|
+
openai_agents_python_providers-1.0.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
|
|
7
|
+
openai_agents_python_providers-1.0.0.dist-info/licenses/LICENSE,sha256=sUfy39jvRigVcRBv5vgjxTaUsTxWl5WdAZwN_zENKMI,1069
|
|
8
|
+
openai_agents_python_providers-1.0.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Mike Toscano
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|