openai-sdk-helpers 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/prompt/summarizer.jinja +7 -0
- openai_sdk_helpers/prompt/translator.jinja +7 -0
- openai_sdk_helpers/prompt/validator.jinja +7 -0
- openai_sdk_helpers/py.typed +0 -0
- {openai_sdk_helpers-0.0.2.dist-info → openai_sdk_helpers-0.0.3.dist-info}/METADATA +57 -4
- openai_sdk_helpers-0.0.3.dist-info/RECORD +8 -0
- openai_sdk_helpers/__init__.py +0 -34
- openai_sdk_helpers/agent/__init__.py +0 -23
- openai_sdk_helpers/agent/base.py +0 -432
- openai_sdk_helpers/agent/config.py +0 -66
- openai_sdk_helpers/agent/project_manager.py +0 -416
- openai_sdk_helpers/agent/runner.py +0 -117
- openai_sdk_helpers/agent/utils.py +0 -47
- openai_sdk_helpers/agent/vector_search.py +0 -418
- openai_sdk_helpers/agent/web_search.py +0 -404
- openai_sdk_helpers/config.py +0 -141
- openai_sdk_helpers/enums/__init__.py +0 -7
- openai_sdk_helpers/enums/base.py +0 -17
- openai_sdk_helpers/environment.py +0 -27
- openai_sdk_helpers/prompt/__init__.py +0 -77
- openai_sdk_helpers/response/__init__.py +0 -16
- openai_sdk_helpers/response/base.py +0 -477
- openai_sdk_helpers/response/messages.py +0 -211
- openai_sdk_helpers/response/runner.py +0 -42
- openai_sdk_helpers/response/tool_call.py +0 -70
- openai_sdk_helpers/structure/__init__.py +0 -57
- openai_sdk_helpers/structure/base.py +0 -591
- openai_sdk_helpers/structure/plan/__init__.py +0 -13
- openai_sdk_helpers/structure/plan/enum.py +0 -48
- openai_sdk_helpers/structure/plan/plan.py +0 -104
- openai_sdk_helpers/structure/plan/task.py +0 -122
- openai_sdk_helpers/structure/prompt.py +0 -24
- openai_sdk_helpers/structure/responses.py +0 -148
- openai_sdk_helpers/structure/summary.py +0 -65
- openai_sdk_helpers/structure/vector_search.py +0 -82
- openai_sdk_helpers/structure/web_search.py +0 -46
- openai_sdk_helpers/utils/__init__.py +0 -13
- openai_sdk_helpers/utils/core.py +0 -208
- openai_sdk_helpers/vector_storage/__init__.py +0 -15
- openai_sdk_helpers/vector_storage/cleanup.py +0 -91
- openai_sdk_helpers/vector_storage/storage.py +0 -501
- openai_sdk_helpers/vector_storage/types.py +0 -58
- openai_sdk_helpers-0.0.2.dist-info/RECORD +0 -40
- {openai_sdk_helpers-0.0.2.dist-info → openai_sdk_helpers-0.0.3.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.0.2.dist-info → openai_sdk_helpers-0.0.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
You are a concise assistant that summarizes long-form text into crisp findings.
|
|
2
|
+
|
|
3
|
+
Instructions:
|
|
4
|
+
- Focus on factual statements present in the source text.
|
|
5
|
+
- Prefer short bullet points over paragraphs.
|
|
6
|
+
- Call out key entities, dates, and figures when present.
|
|
7
|
+
- Note any missing context or unanswered questions.
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
You are a professional translator.
|
|
2
|
+
|
|
3
|
+
Instructions:
|
|
4
|
+
- Rewrite the provided text into the requested target language.
|
|
5
|
+
- Preserve the intent and meaning of the original content.
|
|
6
|
+
- Keep terminology consistent and avoid embellishment.
|
|
7
|
+
- If the input text is empty, respond with "No text provided."
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
You are a meticulous safety validator that enforces product guardrails.
|
|
2
|
+
|
|
3
|
+
Instructions:
|
|
4
|
+
- Inspect the provided user_input and optional agent_output for policy violations, safety risks, or missing disclaimers.
|
|
5
|
+
- Highlight any prohibited content (violence, hate, sensitive data, PII) and note why it violates guardrails.
|
|
6
|
+
- Recommend concise remediation steps (redaction, refusal, rephrasing) and provide sanitized_output when possible.
|
|
7
|
+
- If everything is within guardrails, confirm both input_safe and output_safe are true and keep violations empty.
|
|
File without changes
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-sdk-helpers
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.3
|
|
4
4
|
Summary: Composable helpers for OpenAI SDK agents, prompts, and storage
|
|
5
5
|
Author: openai-sdk-helpers maintainers
|
|
6
6
|
License: MIT
|
|
@@ -39,6 +39,7 @@ application-specific prompts and tools to the consuming project.
|
|
|
39
39
|
predictable inputs and outputs.
|
|
40
40
|
- **Vector and web search flows** that coordinate planning, execution, and
|
|
41
41
|
reporting.
|
|
42
|
+
- **Reusable text agents** for summarization and translation tasks.
|
|
42
43
|
|
|
43
44
|
## Installation
|
|
44
45
|
|
|
@@ -48,6 +49,9 @@ Install the package directly from PyPI to reuse it across projects:
|
|
|
48
49
|
pip install openai-sdk-helpers
|
|
49
50
|
```
|
|
50
51
|
|
|
52
|
+
Type information ships with the published wheel via `py.typed`, so external
|
|
53
|
+
projects can rely on the bundled annotations without adding custom stubs.
|
|
54
|
+
|
|
51
55
|
For local development, install with editable sources and the optional dev
|
|
52
56
|
dependencies:
|
|
53
57
|
|
|
@@ -74,9 +78,39 @@ report = vector_search.run_agent_sync("Explain quantum entanglement for beginner
|
|
|
74
78
|
print(report.report)
|
|
75
79
|
```
|
|
76
80
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
81
|
+
### Text utilities
|
|
82
|
+
|
|
83
|
+
Use the built-in text helpers when you need lightweight single-step agents.
|
|
84
|
+
|
|
85
|
+
```python
|
|
86
|
+
from openai_sdk_helpers.agent import (
|
|
87
|
+
SummarizerAgent,
|
|
88
|
+
TranslatorAgent,
|
|
89
|
+
ValidatorAgent,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
summarizer = SummarizerAgent(default_model="gpt-4o-mini")
|
|
94
|
+
translator = TranslatorAgent(default_model="gpt-4o-mini")
|
|
95
|
+
validator = ValidatorAgent(default_model="gpt-4o-mini")
|
|
96
|
+
|
|
97
|
+
summary = summarizer.run_sync("Long-form content to condense")
|
|
98
|
+
translation = translator.run_sync("Bonjour", target_language="English")
|
|
99
|
+
guardrails = validator.run_sync(
|
|
100
|
+
"Share meeting notes with names removed", agent_output=summary.text
|
|
101
|
+
)
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
Prompt templates are optional for the built-in text helpers. They already ship
|
|
105
|
+
with defaults under `src/openai_sdk_helpers/prompt`, so you do **not** need to
|
|
106
|
+
create placeholder files when installing from PyPI. Only pass a `prompt_dir`
|
|
107
|
+
when you have real replacements you want to load.
|
|
108
|
+
|
|
109
|
+
The vector search workflow expects real prompts for each agent (for example,
|
|
110
|
+
`vector_planner.jinja`, `vector_search.jinja`, and `vector_writer.jinja`). If
|
|
111
|
+
you point `prompt_dir` at a folder that does not contain those files, agent
|
|
112
|
+
construction fails with a `FileNotFoundError`. Skip `prompt_dir` entirely unless
|
|
113
|
+
you have working templates ready.
|
|
80
114
|
|
|
81
115
|
### Centralized OpenAI configuration
|
|
82
116
|
|
|
@@ -129,6 +163,25 @@ pytest -q --cov=src --cov-report=term-missing --cov-fail-under=70
|
|
|
129
163
|
- `src/openai_sdk_helpers/vector_storage`: Minimal vector store abstraction.
|
|
130
164
|
- `tests/`: Unit tests covering core modules and structures.
|
|
131
165
|
|
|
166
|
+
## Key modules
|
|
167
|
+
|
|
168
|
+
The package centers around a handful of cohesive building blocks:
|
|
169
|
+
|
|
170
|
+
- `openai_sdk_helpers.agent.project_manager.ProjectManager` coordinates prompt
|
|
171
|
+
creation, plan building, task execution, and summarization while persisting
|
|
172
|
+
intermediate artifacts to disk.
|
|
173
|
+
- `openai_sdk_helpers.agent.vector_search.VectorSearch` bundles the planners,
|
|
174
|
+
executors, and summarizers required to run a multi-turn vector search flow
|
|
175
|
+
from a single entry point.
|
|
176
|
+
- `openai_sdk_helpers.agent.summarizer.SummarizerAgent`,
|
|
177
|
+
`agent.translator.TranslatorAgent`, and `agent.validator.ValidatorAgent`
|
|
178
|
+
expose streamlined text-processing utilities that reuse shared prompt
|
|
179
|
+
templates.
|
|
180
|
+
- `openai_sdk_helpers.response` contains the response runners and helpers used
|
|
181
|
+
to normalize outputs from agents, including streaming responses.
|
|
182
|
+
- `openai_sdk_helpers.utils` holds JSON serialization helpers, logging
|
|
183
|
+
utilities, and common validation helpers used across modules.
|
|
184
|
+
|
|
132
185
|
## Contributing
|
|
133
186
|
|
|
134
187
|
Contributions are welcome! Please accompany functional changes with relevant
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
openai_sdk_helpers/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
openai_sdk_helpers/prompt/summarizer.jinja,sha256=jliSetWDISbql1EkWi1RB8-L_BXUg8JMkRRsPRHuzbY,309
|
|
3
|
+
openai_sdk_helpers/prompt/translator.jinja,sha256=SZhW8ipEzM-9IA4wyS_r2wIMTAclWrilmk1s46njoL0,291
|
|
4
|
+
openai_sdk_helpers/prompt/validator.jinja,sha256=6t8q_IdxFd3mVBGX6SFKNOert1Wo3YpTOji2SNEbbtE,547
|
|
5
|
+
openai_sdk_helpers-0.0.3.dist-info/METADATA,sha256=70bhIgEVzCZFLn7y_wFZinSD82oOXl0MNGqKbgkCXZY,6338
|
|
6
|
+
openai_sdk_helpers-0.0.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
7
|
+
openai_sdk_helpers-0.0.3.dist-info/licenses/LICENSE,sha256=CUhc1NrE50bs45tcXF7OcTQBKEvkUuLqeOHgrWQ5jaA,1067
|
|
8
|
+
openai_sdk_helpers-0.0.3.dist-info/RECORD,,
|
openai_sdk_helpers/__init__.py
DELETED
|
@@ -1,34 +0,0 @@
|
|
|
1
|
-
"""Shared AI helpers and base structures."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
__version__ = "0.0.1"
|
|
6
|
-
|
|
7
|
-
from .structure import (
|
|
8
|
-
BaseStructure,
|
|
9
|
-
SchemaOptions,
|
|
10
|
-
spec_field,
|
|
11
|
-
assistant_tool_definition,
|
|
12
|
-
assistant_format,
|
|
13
|
-
response_tool_definition,
|
|
14
|
-
response_format,
|
|
15
|
-
)
|
|
16
|
-
from .prompt import PromptRenderer
|
|
17
|
-
from .config import OpenAISettings
|
|
18
|
-
from .vector_storage import VectorStorage, VectorStorageFileInfo, VectorStorageFileStats
|
|
19
|
-
|
|
20
|
-
__all__ = [
|
|
21
|
-
"__version__",
|
|
22
|
-
"BaseStructure",
|
|
23
|
-
"SchemaOptions",
|
|
24
|
-
"spec_field",
|
|
25
|
-
"PromptRenderer",
|
|
26
|
-
"OpenAISettings",
|
|
27
|
-
"VectorStorage",
|
|
28
|
-
"VectorStorageFileInfo",
|
|
29
|
-
"VectorStorageFileStats",
|
|
30
|
-
"assistant_tool_definition",
|
|
31
|
-
"assistant_format",
|
|
32
|
-
"response_tool_definition",
|
|
33
|
-
"response_format",
|
|
34
|
-
]
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
"""Shared agent helpers built on the OpenAI Agents SDK."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
from .base import BaseAgent
|
|
6
|
-
from .config import AgentConfig
|
|
7
|
-
from .project_manager import ProjectManager
|
|
8
|
-
from .runner import run, run_streamed, run_sync
|
|
9
|
-
from .utils import run_coro_sync
|
|
10
|
-
from .vector_search import VectorSearch
|
|
11
|
-
from .web_search import WebAgentSearch
|
|
12
|
-
|
|
13
|
-
__all__ = [
|
|
14
|
-
"BaseAgent",
|
|
15
|
-
"AgentConfig",
|
|
16
|
-
"ProjectManager",
|
|
17
|
-
"run",
|
|
18
|
-
"run_sync",
|
|
19
|
-
"run_streamed",
|
|
20
|
-
"run_coro_sync",
|
|
21
|
-
"VectorSearch",
|
|
22
|
-
"WebAgentSearch",
|
|
23
|
-
]
|
openai_sdk_helpers/agent/base.py
DELETED
|
@@ -1,432 +0,0 @@
|
|
|
1
|
-
"""Base agent helpers built on the OpenAI Agents SDK."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
import asyncio
|
|
6
|
-
import threading
|
|
7
|
-
from pathlib import Path
|
|
8
|
-
from typing import Any, Dict, Optional, Protocol
|
|
9
|
-
|
|
10
|
-
from agents import Agent, Runner, RunResult, RunResultStreaming
|
|
11
|
-
from agents.run_context import RunContextWrapper
|
|
12
|
-
from agents.tool import FunctionTool
|
|
13
|
-
from jinja2 import Template
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class AgentConfigLike(Protocol):
|
|
17
|
-
"""Protocol describing the configuration attributes for BaseAgent."""
|
|
18
|
-
|
|
19
|
-
name: str
|
|
20
|
-
description: Optional[str]
|
|
21
|
-
model: Optional[str]
|
|
22
|
-
template_path: Optional[str]
|
|
23
|
-
input_type: Optional[Any]
|
|
24
|
-
output_type: Optional[Any]
|
|
25
|
-
tools: Optional[Any]
|
|
26
|
-
model_settings: Optional[Any]
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
class BaseAgent:
|
|
30
|
-
"""Factory for creating and configuring specialized agents.
|
|
31
|
-
|
|
32
|
-
Methods
|
|
33
|
-
-------
|
|
34
|
-
from_config(config, run_context_wrapper)
|
|
35
|
-
Instantiate a ``BaseAgent`` from configuration.
|
|
36
|
-
build_prompt_from_jinja(run_context_wrapper)
|
|
37
|
-
Render the agent prompt using Jinja and optional context.
|
|
38
|
-
get_prompt(run_context_wrapper, _)
|
|
39
|
-
Render the agent prompt using the provided run context.
|
|
40
|
-
get_agent()
|
|
41
|
-
Construct the configured :class:`agents.Agent` instance.
|
|
42
|
-
run(agent_input, agent_context, output_type)
|
|
43
|
-
Execute the agent asynchronously and optionally cast the result.
|
|
44
|
-
run_sync(agent_input, agent_context, output_type)
|
|
45
|
-
Execute the agent synchronously.
|
|
46
|
-
run_streamed(agent_input, agent_context, output_type)
|
|
47
|
-
Return a streaming result for the agent execution.
|
|
48
|
-
as_tool()
|
|
49
|
-
Return the agent as a callable tool.
|
|
50
|
-
"""
|
|
51
|
-
|
|
52
|
-
def __init__(
|
|
53
|
-
self,
|
|
54
|
-
config: AgentConfigLike,
|
|
55
|
-
run_context_wrapper: Optional[RunContextWrapper[Dict[str, Any]]] = None,
|
|
56
|
-
prompt_dir: Optional[Path] = None,
|
|
57
|
-
default_model: Optional[str] = None,
|
|
58
|
-
) -> None:
|
|
59
|
-
"""Initialize the ``BaseAgent`` using a configuration object.
|
|
60
|
-
|
|
61
|
-
Parameters
|
|
62
|
-
----------
|
|
63
|
-
config
|
|
64
|
-
Configuration describing this agent.
|
|
65
|
-
run_context_wrapper
|
|
66
|
-
Optional wrapper providing runtime context for prompt rendering.
|
|
67
|
-
Default ``None``.
|
|
68
|
-
prompt_dir
|
|
69
|
-
Optional directory holding prompt templates.
|
|
70
|
-
default_model
|
|
71
|
-
Optional fallback model identifier if the config does not supply one.
|
|
72
|
-
|
|
73
|
-
Returns
|
|
74
|
-
-------
|
|
75
|
-
None
|
|
76
|
-
"""
|
|
77
|
-
name = config.name
|
|
78
|
-
description = config.description or ""
|
|
79
|
-
model = config.model or default_model
|
|
80
|
-
if not model:
|
|
81
|
-
raise ValueError("Model is required to construct the agent.")
|
|
82
|
-
|
|
83
|
-
prompt_path: Optional[Path]
|
|
84
|
-
if config.template_path:
|
|
85
|
-
prompt_path = Path(config.template_path)
|
|
86
|
-
elif prompt_dir is not None:
|
|
87
|
-
prompt_path = prompt_dir / f"{name}.jinja"
|
|
88
|
-
else:
|
|
89
|
-
prompt_path = None
|
|
90
|
-
|
|
91
|
-
if prompt_path is not None and prompt_path.exists():
|
|
92
|
-
self._template = Template(prompt_path.read_text())
|
|
93
|
-
else:
|
|
94
|
-
self._template = Template("")
|
|
95
|
-
|
|
96
|
-
self.agent_name = name
|
|
97
|
-
self.description = description
|
|
98
|
-
self.model = model
|
|
99
|
-
|
|
100
|
-
self._input_type = config.input_type
|
|
101
|
-
self._output_type = config.output_type or config.input_type
|
|
102
|
-
self._tools = config.tools
|
|
103
|
-
self._model_settings = config.model_settings
|
|
104
|
-
self._run_context_wrapper = run_context_wrapper
|
|
105
|
-
|
|
106
|
-
@classmethod
|
|
107
|
-
def from_config(
|
|
108
|
-
cls,
|
|
109
|
-
config: AgentConfigLike,
|
|
110
|
-
run_context_wrapper: Optional[RunContextWrapper[Dict[str, Any]]] = None,
|
|
111
|
-
prompt_dir: Optional[Path] = None,
|
|
112
|
-
default_model: Optional[str] = None,
|
|
113
|
-
) -> "BaseAgent":
|
|
114
|
-
"""Create a :class:`BaseAgent` instance from configuration.
|
|
115
|
-
|
|
116
|
-
Parameters
|
|
117
|
-
----------
|
|
118
|
-
config
|
|
119
|
-
Configuration describing the agent.
|
|
120
|
-
run_context_wrapper
|
|
121
|
-
Optional wrapper providing runtime context. Default ``None``.
|
|
122
|
-
prompt_dir
|
|
123
|
-
Optional directory holding prompt templates.
|
|
124
|
-
default_model
|
|
125
|
-
Optional fallback model identifier.
|
|
126
|
-
|
|
127
|
-
Returns
|
|
128
|
-
-------
|
|
129
|
-
BaseAgent
|
|
130
|
-
Instantiated agent.
|
|
131
|
-
"""
|
|
132
|
-
return cls(
|
|
133
|
-
config=config,
|
|
134
|
-
run_context_wrapper=run_context_wrapper,
|
|
135
|
-
prompt_dir=prompt_dir,
|
|
136
|
-
default_model=default_model,
|
|
137
|
-
)
|
|
138
|
-
|
|
139
|
-
def _build_instructions_from_jinja(self) -> str:
|
|
140
|
-
"""Return the rendered instructions prompt for this agent.
|
|
141
|
-
|
|
142
|
-
Returns
|
|
143
|
-
-------
|
|
144
|
-
str
|
|
145
|
-
Prompt text rendered from the Jinja template.
|
|
146
|
-
"""
|
|
147
|
-
return self.build_prompt_from_jinja(
|
|
148
|
-
run_context_wrapper=self._run_context_wrapper
|
|
149
|
-
)
|
|
150
|
-
|
|
151
|
-
def build_prompt_from_jinja(
|
|
152
|
-
self, run_context_wrapper: Optional[RunContextWrapper[Dict[str, Any]]] = None
|
|
153
|
-
) -> str:
|
|
154
|
-
"""Render the agent prompt using the provided run context.
|
|
155
|
-
|
|
156
|
-
Parameters
|
|
157
|
-
----------
|
|
158
|
-
run_context_wrapper
|
|
159
|
-
Wrapper whose ``context`` dictionary is used to render the Jinja
|
|
160
|
-
template. Default ``None``.
|
|
161
|
-
|
|
162
|
-
Returns
|
|
163
|
-
-------
|
|
164
|
-
str
|
|
165
|
-
Rendered prompt text.
|
|
166
|
-
"""
|
|
167
|
-
context = {}
|
|
168
|
-
if run_context_wrapper is not None:
|
|
169
|
-
context = run_context_wrapper.context
|
|
170
|
-
|
|
171
|
-
return self._template.render(context)
|
|
172
|
-
|
|
173
|
-
def get_prompt(
|
|
174
|
-
self, run_context_wrapper: RunContextWrapper[Dict[str, Any]], _: Agent
|
|
175
|
-
) -> str:
|
|
176
|
-
"""Render the agent prompt using the provided run context.
|
|
177
|
-
|
|
178
|
-
Parameters
|
|
179
|
-
----------
|
|
180
|
-
run_context_wrapper
|
|
181
|
-
Wrapper around the current run context whose ``context`` dictionary
|
|
182
|
-
is used to render the Jinja template.
|
|
183
|
-
_
|
|
184
|
-
Underlying :class:`agents.Agent` instance (ignored).
|
|
185
|
-
|
|
186
|
-
Returns
|
|
187
|
-
-------
|
|
188
|
-
str
|
|
189
|
-
The rendered prompt.
|
|
190
|
-
"""
|
|
191
|
-
return self.build_prompt_from_jinja(run_context_wrapper)
|
|
192
|
-
|
|
193
|
-
def get_agent(self) -> Agent:
|
|
194
|
-
"""Construct and return the configured :class:`agents.Agent` instance.
|
|
195
|
-
|
|
196
|
-
Returns
|
|
197
|
-
-------
|
|
198
|
-
Agent
|
|
199
|
-
Initialized agent ready for execution.
|
|
200
|
-
"""
|
|
201
|
-
agent_config: Dict[str, Any] = {
|
|
202
|
-
"name": self.agent_name,
|
|
203
|
-
"instructions": self._build_instructions_from_jinja(),
|
|
204
|
-
"model": self.model,
|
|
205
|
-
}
|
|
206
|
-
if self._output_type:
|
|
207
|
-
agent_config["output_type"] = self._output_type
|
|
208
|
-
if self._tools:
|
|
209
|
-
agent_config["tools"] = self._tools
|
|
210
|
-
if self._model_settings:
|
|
211
|
-
agent_config["model_settings"] = self._model_settings
|
|
212
|
-
|
|
213
|
-
return Agent(**agent_config)
|
|
214
|
-
|
|
215
|
-
async def run(
|
|
216
|
-
self,
|
|
217
|
-
agent_input: str,
|
|
218
|
-
agent_context: Optional[Dict[str, Any]] = None,
|
|
219
|
-
output_type: Optional[Any] = None,
|
|
220
|
-
) -> Any:
|
|
221
|
-
"""Execute the agent asynchronously.
|
|
222
|
-
|
|
223
|
-
Parameters
|
|
224
|
-
----------
|
|
225
|
-
agent_input
|
|
226
|
-
Prompt or query for the agent.
|
|
227
|
-
agent_context
|
|
228
|
-
Optional dictionary passed to the agent. Default ``None``.
|
|
229
|
-
output_type
|
|
230
|
-
Optional type used to cast the final output. Default ``None``.
|
|
231
|
-
|
|
232
|
-
Returns
|
|
233
|
-
-------
|
|
234
|
-
Any
|
|
235
|
-
Agent result, optionally converted to ``output_type``.
|
|
236
|
-
"""
|
|
237
|
-
if self._output_type is not None and output_type is None:
|
|
238
|
-
output_type = self._output_type
|
|
239
|
-
return await _run_agent(
|
|
240
|
-
agent=self.get_agent(),
|
|
241
|
-
agent_input=agent_input,
|
|
242
|
-
agent_context=agent_context,
|
|
243
|
-
output_type=output_type,
|
|
244
|
-
)
|
|
245
|
-
|
|
246
|
-
def run_sync(
|
|
247
|
-
self,
|
|
248
|
-
agent_input: str,
|
|
249
|
-
agent_context: Optional[Dict[str, Any]] = None,
|
|
250
|
-
output_type: Optional[Any] = None,
|
|
251
|
-
) -> Any:
|
|
252
|
-
"""Run the agent synchronously.
|
|
253
|
-
|
|
254
|
-
Parameters
|
|
255
|
-
----------
|
|
256
|
-
agent_input
|
|
257
|
-
Prompt or query for the agent.
|
|
258
|
-
agent_context
|
|
259
|
-
Optional dictionary passed to the agent. Default ``None``.
|
|
260
|
-
output_type
|
|
261
|
-
Optional type used to cast the final output. Default ``None``.
|
|
262
|
-
|
|
263
|
-
Returns
|
|
264
|
-
-------
|
|
265
|
-
Any
|
|
266
|
-
Agent result, optionally converted to ``output_type``.
|
|
267
|
-
"""
|
|
268
|
-
result = _run_agent_sync(
|
|
269
|
-
self.get_agent(),
|
|
270
|
-
agent_input,
|
|
271
|
-
agent_context=agent_context,
|
|
272
|
-
)
|
|
273
|
-
if self._output_type and not output_type:
|
|
274
|
-
output_type = self._output_type
|
|
275
|
-
if output_type:
|
|
276
|
-
return result.final_output_as(output_type)
|
|
277
|
-
return result
|
|
278
|
-
|
|
279
|
-
def run_streamed(
|
|
280
|
-
self,
|
|
281
|
-
agent_input: str,
|
|
282
|
-
agent_context: Optional[Dict[str, Any]] = None,
|
|
283
|
-
output_type: Optional[Any] = None,
|
|
284
|
-
) -> RunResultStreaming:
|
|
285
|
-
"""Return a streaming result for the agent execution.
|
|
286
|
-
|
|
287
|
-
Parameters
|
|
288
|
-
----------
|
|
289
|
-
agent_input
|
|
290
|
-
Prompt or query for the agent.
|
|
291
|
-
agent_context
|
|
292
|
-
Optional dictionary passed to the agent. Default ``None``.
|
|
293
|
-
output_type
|
|
294
|
-
Optional type used to cast the final output. Default ``None``.
|
|
295
|
-
|
|
296
|
-
Returns
|
|
297
|
-
-------
|
|
298
|
-
RunResultStreaming
|
|
299
|
-
Streaming output wrapper from the agent execution.
|
|
300
|
-
"""
|
|
301
|
-
result = _run_agent_streamed(
|
|
302
|
-
agent=self.get_agent(),
|
|
303
|
-
agent_input=agent_input,
|
|
304
|
-
context=agent_context,
|
|
305
|
-
)
|
|
306
|
-
if self._output_type and not output_type:
|
|
307
|
-
output_type = self._output_type
|
|
308
|
-
if output_type:
|
|
309
|
-
return result.final_output_as(output_type)
|
|
310
|
-
return result
|
|
311
|
-
|
|
312
|
-
def as_tool(self) -> FunctionTool:
|
|
313
|
-
"""Return the agent as a callable tool.
|
|
314
|
-
|
|
315
|
-
Returns
|
|
316
|
-
-------
|
|
317
|
-
FunctionTool
|
|
318
|
-
Tool instance wrapping this agent.
|
|
319
|
-
"""
|
|
320
|
-
agent = self.get_agent()
|
|
321
|
-
tool_obj: FunctionTool = agent.as_tool(
|
|
322
|
-
tool_name=self.agent_name, tool_description=self.description
|
|
323
|
-
) # type: ignore
|
|
324
|
-
return tool_obj
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
async def _run_agent(
|
|
328
|
-
agent: Agent,
|
|
329
|
-
agent_input: str,
|
|
330
|
-
agent_context: Optional[Dict[str, Any]] = None,
|
|
331
|
-
output_type: Optional[Any] = None,
|
|
332
|
-
) -> Any:
|
|
333
|
-
"""Run an ``Agent`` asynchronously.
|
|
334
|
-
|
|
335
|
-
Parameters
|
|
336
|
-
----------
|
|
337
|
-
agent
|
|
338
|
-
Configured agent instance to execute.
|
|
339
|
-
agent_input
|
|
340
|
-
Prompt or query string for the agent.
|
|
341
|
-
agent_context
|
|
342
|
-
Optional context dictionary passed to the agent. Default ``None``.
|
|
343
|
-
output_type
|
|
344
|
-
Optional type used to cast the final output. Default ``None``.
|
|
345
|
-
|
|
346
|
-
Returns
|
|
347
|
-
-------
|
|
348
|
-
Any
|
|
349
|
-
Agent response, optionally converted to ``output_type``.
|
|
350
|
-
"""
|
|
351
|
-
result = await Runner.run(agent, agent_input, context=agent_context)
|
|
352
|
-
if output_type is not None:
|
|
353
|
-
result = result.final_output_as(output_type)
|
|
354
|
-
return result
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
def _run_agent_sync(
|
|
358
|
-
agent: Agent,
|
|
359
|
-
agent_input: str,
|
|
360
|
-
agent_context: Optional[Dict[str, Any]] = None,
|
|
361
|
-
) -> RunResult:
|
|
362
|
-
"""Run an ``Agent`` synchronously.
|
|
363
|
-
|
|
364
|
-
Parameters
|
|
365
|
-
----------
|
|
366
|
-
agent
|
|
367
|
-
Configured agent instance to execute.
|
|
368
|
-
agent_input
|
|
369
|
-
Prompt or query string for the agent.
|
|
370
|
-
agent_context
|
|
371
|
-
Optional context dictionary passed to the agent. Default ``None``.
|
|
372
|
-
|
|
373
|
-
Returns
|
|
374
|
-
-------
|
|
375
|
-
RunResult
|
|
376
|
-
Result from the agent execution.
|
|
377
|
-
"""
|
|
378
|
-
coro = Runner.run(agent, agent_input, context=agent_context)
|
|
379
|
-
try:
|
|
380
|
-
loop = asyncio.get_running_loop()
|
|
381
|
-
except RuntimeError:
|
|
382
|
-
return asyncio.run(coro)
|
|
383
|
-
|
|
384
|
-
if loop.is_running():
|
|
385
|
-
result: RunResult | None = None
|
|
386
|
-
|
|
387
|
-
def _thread_runner() -> None:
|
|
388
|
-
nonlocal result
|
|
389
|
-
result = asyncio.run(coro)
|
|
390
|
-
|
|
391
|
-
thread = threading.Thread(target=_thread_runner, daemon=True)
|
|
392
|
-
thread.start()
|
|
393
|
-
thread.join()
|
|
394
|
-
if result is None:
|
|
395
|
-
raise RuntimeError("Agent execution did not return a result.")
|
|
396
|
-
return result
|
|
397
|
-
|
|
398
|
-
return loop.run_until_complete(coro)
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
def _run_agent_streamed(
|
|
402
|
-
agent: Agent,
|
|
403
|
-
agent_input: str,
|
|
404
|
-
context: Optional[Dict[str, Any]] = None,
|
|
405
|
-
) -> RunResultStreaming:
|
|
406
|
-
"""Run an ``Agent`` synchronously and return a streaming result.
|
|
407
|
-
|
|
408
|
-
Parameters
|
|
409
|
-
----------
|
|
410
|
-
agent
|
|
411
|
-
Configured agent to execute.
|
|
412
|
-
agent_input
|
|
413
|
-
Prompt or query string for the agent.
|
|
414
|
-
context
|
|
415
|
-
Optional context dictionary passed to the agent. Default ``None``.
|
|
416
|
-
|
|
417
|
-
Returns
|
|
418
|
-
-------
|
|
419
|
-
RunResultStreaming
|
|
420
|
-
Instance for streaming outputs.
|
|
421
|
-
"""
|
|
422
|
-
result = Runner.run_streamed(agent, agent_input, context=context)
|
|
423
|
-
return result
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
__all__ = [
|
|
427
|
-
"AgentConfigLike",
|
|
428
|
-
"BaseAgent",
|
|
429
|
-
"_run_agent",
|
|
430
|
-
"_run_agent_sync",
|
|
431
|
-
"_run_agent_streamed",
|
|
432
|
-
]
|
|
@@ -1,66 +0,0 @@
|
|
|
1
|
-
"""Configuration helpers for ``BaseAgent``."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
from typing import Any, List, Optional, Type
|
|
6
|
-
|
|
7
|
-
from agents.model_settings import ModelSettings
|
|
8
|
-
from pydantic import BaseModel, ConfigDict, Field
|
|
9
|
-
|
|
10
|
-
from ..structure import BaseStructure
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class AgentConfig(BaseStructure):
|
|
14
|
-
"""Configuration required to build a :class:`BaseAgent`.
|
|
15
|
-
|
|
16
|
-
Methods
|
|
17
|
-
-------
|
|
18
|
-
print()
|
|
19
|
-
Return a human readable representation of the configuration.
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
23
|
-
|
|
24
|
-
name: str = Field(title="Agent Name", description="Unique name for the agent")
|
|
25
|
-
description: Optional[str] = Field(
|
|
26
|
-
default=None, title="Description", description="Short description of the agent"
|
|
27
|
-
)
|
|
28
|
-
model: Optional[str] = Field(
|
|
29
|
-
default=None, title="Model", description="Model identifier to use"
|
|
30
|
-
)
|
|
31
|
-
template_path: Optional[str] = Field(
|
|
32
|
-
default=None, title="Template Path", description="Path to the Jinja template"
|
|
33
|
-
)
|
|
34
|
-
input_type: Optional[Type[BaseModel]] = Field(
|
|
35
|
-
default=None,
|
|
36
|
-
title="Input Type",
|
|
37
|
-
description="Pydantic model describing the agent input",
|
|
38
|
-
)
|
|
39
|
-
output_type: Optional[Type[Any]] = Field(
|
|
40
|
-
default=None,
|
|
41
|
-
title="Output Type",
|
|
42
|
-
description="Type describing the agent output; commonly a Pydantic model or builtin like ``str``",
|
|
43
|
-
)
|
|
44
|
-
tools: Optional[List[Any]] = Field(
|
|
45
|
-
default=None,
|
|
46
|
-
title="Tools",
|
|
47
|
-
description="Tools available to the agent",
|
|
48
|
-
)
|
|
49
|
-
model_settings: Optional[ModelSettings] = Field(
|
|
50
|
-
default=None, title="Model Settings", description="Additional model settings"
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
def print(self) -> str:
|
|
54
|
-
"""Return a human readable representation.
|
|
55
|
-
|
|
56
|
-
Returns
|
|
57
|
-
-------
|
|
58
|
-
str
|
|
59
|
-
The agent's name.
|
|
60
|
-
"""
|
|
61
|
-
return self.name
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
__all__ = ["AgentConfig"]
|
|
65
|
-
|
|
66
|
-
AgentConfig.model_rebuild()
|