openai-sdk-helpers 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +34 -0
- openai_sdk_helpers/agent/__init__.py +23 -0
- openai_sdk_helpers/agent/base.py +432 -0
- openai_sdk_helpers/agent/config.py +66 -0
- openai_sdk_helpers/agent/project_manager.py +416 -0
- openai_sdk_helpers/agent/runner.py +117 -0
- openai_sdk_helpers/agent/utils.py +47 -0
- openai_sdk_helpers/agent/vector_search.py +418 -0
- openai_sdk_helpers/agent/web_search.py +404 -0
- openai_sdk_helpers/config.py +141 -0
- openai_sdk_helpers/enums/__init__.py +7 -0
- openai_sdk_helpers/enums/base.py +17 -0
- openai_sdk_helpers/environment.py +27 -0
- openai_sdk_helpers/prompt/__init__.py +77 -0
- openai_sdk_helpers/response/__init__.py +16 -0
- openai_sdk_helpers/response/base.py +477 -0
- openai_sdk_helpers/response/messages.py +211 -0
- openai_sdk_helpers/response/runner.py +42 -0
- openai_sdk_helpers/response/tool_call.py +70 -0
- openai_sdk_helpers/structure/__init__.py +57 -0
- openai_sdk_helpers/structure/base.py +591 -0
- openai_sdk_helpers/structure/plan/__init__.py +13 -0
- openai_sdk_helpers/structure/plan/enum.py +48 -0
- openai_sdk_helpers/structure/plan/plan.py +104 -0
- openai_sdk_helpers/structure/plan/task.py +122 -0
- openai_sdk_helpers/structure/prompt.py +24 -0
- openai_sdk_helpers/structure/responses.py +148 -0
- openai_sdk_helpers/structure/summary.py +65 -0
- openai_sdk_helpers/structure/vector_search.py +82 -0
- openai_sdk_helpers/structure/web_search.py +46 -0
- openai_sdk_helpers/utils/__init__.py +13 -0
- openai_sdk_helpers/utils/core.py +208 -0
- openai_sdk_helpers/vector_storage/__init__.py +15 -0
- openai_sdk_helpers/vector_storage/cleanup.py +91 -0
- openai_sdk_helpers/vector_storage/storage.py +501 -0
- openai_sdk_helpers/vector_storage/types.py +58 -0
- openai_sdk_helpers-0.0.2.dist-info/METADATA +137 -0
- openai_sdk_helpers-0.0.2.dist-info/RECORD +40 -0
- openai_sdk_helpers-0.0.2.dist-info/WHEEL +4 -0
- openai_sdk_helpers-0.0.2.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""Shared AI helpers and base structures."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
__version__ = "0.0.1"
|
|
6
|
+
|
|
7
|
+
from .structure import (
|
|
8
|
+
BaseStructure,
|
|
9
|
+
SchemaOptions,
|
|
10
|
+
spec_field,
|
|
11
|
+
assistant_tool_definition,
|
|
12
|
+
assistant_format,
|
|
13
|
+
response_tool_definition,
|
|
14
|
+
response_format,
|
|
15
|
+
)
|
|
16
|
+
from .prompt import PromptRenderer
|
|
17
|
+
from .config import OpenAISettings
|
|
18
|
+
from .vector_storage import VectorStorage, VectorStorageFileInfo, VectorStorageFileStats
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"__version__",
|
|
22
|
+
"BaseStructure",
|
|
23
|
+
"SchemaOptions",
|
|
24
|
+
"spec_field",
|
|
25
|
+
"PromptRenderer",
|
|
26
|
+
"OpenAISettings",
|
|
27
|
+
"VectorStorage",
|
|
28
|
+
"VectorStorageFileInfo",
|
|
29
|
+
"VectorStorageFileStats",
|
|
30
|
+
"assistant_tool_definition",
|
|
31
|
+
"assistant_format",
|
|
32
|
+
"response_tool_definition",
|
|
33
|
+
"response_format",
|
|
34
|
+
]
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""Shared agent helpers built on the OpenAI Agents SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .base import BaseAgent
|
|
6
|
+
from .config import AgentConfig
|
|
7
|
+
from .project_manager import ProjectManager
|
|
8
|
+
from .runner import run, run_streamed, run_sync
|
|
9
|
+
from .utils import run_coro_sync
|
|
10
|
+
from .vector_search import VectorSearch
|
|
11
|
+
from .web_search import WebAgentSearch
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"BaseAgent",
|
|
15
|
+
"AgentConfig",
|
|
16
|
+
"ProjectManager",
|
|
17
|
+
"run",
|
|
18
|
+
"run_sync",
|
|
19
|
+
"run_streamed",
|
|
20
|
+
"run_coro_sync",
|
|
21
|
+
"VectorSearch",
|
|
22
|
+
"WebAgentSearch",
|
|
23
|
+
]
|
|
@@ -0,0 +1,432 @@
|
|
|
1
|
+
"""Base agent helpers built on the OpenAI Agents SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import threading
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Dict, Optional, Protocol
|
|
9
|
+
|
|
10
|
+
from agents import Agent, Runner, RunResult, RunResultStreaming
|
|
11
|
+
from agents.run_context import RunContextWrapper
|
|
12
|
+
from agents.tool import FunctionTool
|
|
13
|
+
from jinja2 import Template
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class AgentConfigLike(Protocol):
|
|
17
|
+
"""Protocol describing the configuration attributes for BaseAgent."""
|
|
18
|
+
|
|
19
|
+
name: str
|
|
20
|
+
description: Optional[str]
|
|
21
|
+
model: Optional[str]
|
|
22
|
+
template_path: Optional[str]
|
|
23
|
+
input_type: Optional[Any]
|
|
24
|
+
output_type: Optional[Any]
|
|
25
|
+
tools: Optional[Any]
|
|
26
|
+
model_settings: Optional[Any]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class BaseAgent:
|
|
30
|
+
"""Factory for creating and configuring specialized agents.
|
|
31
|
+
|
|
32
|
+
Methods
|
|
33
|
+
-------
|
|
34
|
+
from_config(config, run_context_wrapper)
|
|
35
|
+
Instantiate a ``BaseAgent`` from configuration.
|
|
36
|
+
build_prompt_from_jinja(run_context_wrapper)
|
|
37
|
+
Render the agent prompt using Jinja and optional context.
|
|
38
|
+
get_prompt(run_context_wrapper, _)
|
|
39
|
+
Render the agent prompt using the provided run context.
|
|
40
|
+
get_agent()
|
|
41
|
+
Construct the configured :class:`agents.Agent` instance.
|
|
42
|
+
run(agent_input, agent_context, output_type)
|
|
43
|
+
Execute the agent asynchronously and optionally cast the result.
|
|
44
|
+
run_sync(agent_input, agent_context, output_type)
|
|
45
|
+
Execute the agent synchronously.
|
|
46
|
+
run_streamed(agent_input, agent_context, output_type)
|
|
47
|
+
Return a streaming result for the agent execution.
|
|
48
|
+
as_tool()
|
|
49
|
+
Return the agent as a callable tool.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def __init__(
|
|
53
|
+
self,
|
|
54
|
+
config: AgentConfigLike,
|
|
55
|
+
run_context_wrapper: Optional[RunContextWrapper[Dict[str, Any]]] = None,
|
|
56
|
+
prompt_dir: Optional[Path] = None,
|
|
57
|
+
default_model: Optional[str] = None,
|
|
58
|
+
) -> None:
|
|
59
|
+
"""Initialize the ``BaseAgent`` using a configuration object.
|
|
60
|
+
|
|
61
|
+
Parameters
|
|
62
|
+
----------
|
|
63
|
+
config
|
|
64
|
+
Configuration describing this agent.
|
|
65
|
+
run_context_wrapper
|
|
66
|
+
Optional wrapper providing runtime context for prompt rendering.
|
|
67
|
+
Default ``None``.
|
|
68
|
+
prompt_dir
|
|
69
|
+
Optional directory holding prompt templates.
|
|
70
|
+
default_model
|
|
71
|
+
Optional fallback model identifier if the config does not supply one.
|
|
72
|
+
|
|
73
|
+
Returns
|
|
74
|
+
-------
|
|
75
|
+
None
|
|
76
|
+
"""
|
|
77
|
+
name = config.name
|
|
78
|
+
description = config.description or ""
|
|
79
|
+
model = config.model or default_model
|
|
80
|
+
if not model:
|
|
81
|
+
raise ValueError("Model is required to construct the agent.")
|
|
82
|
+
|
|
83
|
+
prompt_path: Optional[Path]
|
|
84
|
+
if config.template_path:
|
|
85
|
+
prompt_path = Path(config.template_path)
|
|
86
|
+
elif prompt_dir is not None:
|
|
87
|
+
prompt_path = prompt_dir / f"{name}.jinja"
|
|
88
|
+
else:
|
|
89
|
+
prompt_path = None
|
|
90
|
+
|
|
91
|
+
if prompt_path is not None and prompt_path.exists():
|
|
92
|
+
self._template = Template(prompt_path.read_text())
|
|
93
|
+
else:
|
|
94
|
+
self._template = Template("")
|
|
95
|
+
|
|
96
|
+
self.agent_name = name
|
|
97
|
+
self.description = description
|
|
98
|
+
self.model = model
|
|
99
|
+
|
|
100
|
+
self._input_type = config.input_type
|
|
101
|
+
self._output_type = config.output_type or config.input_type
|
|
102
|
+
self._tools = config.tools
|
|
103
|
+
self._model_settings = config.model_settings
|
|
104
|
+
self._run_context_wrapper = run_context_wrapper
|
|
105
|
+
|
|
106
|
+
@classmethod
|
|
107
|
+
def from_config(
|
|
108
|
+
cls,
|
|
109
|
+
config: AgentConfigLike,
|
|
110
|
+
run_context_wrapper: Optional[RunContextWrapper[Dict[str, Any]]] = None,
|
|
111
|
+
prompt_dir: Optional[Path] = None,
|
|
112
|
+
default_model: Optional[str] = None,
|
|
113
|
+
) -> "BaseAgent":
|
|
114
|
+
"""Create a :class:`BaseAgent` instance from configuration.
|
|
115
|
+
|
|
116
|
+
Parameters
|
|
117
|
+
----------
|
|
118
|
+
config
|
|
119
|
+
Configuration describing the agent.
|
|
120
|
+
run_context_wrapper
|
|
121
|
+
Optional wrapper providing runtime context. Default ``None``.
|
|
122
|
+
prompt_dir
|
|
123
|
+
Optional directory holding prompt templates.
|
|
124
|
+
default_model
|
|
125
|
+
Optional fallback model identifier.
|
|
126
|
+
|
|
127
|
+
Returns
|
|
128
|
+
-------
|
|
129
|
+
BaseAgent
|
|
130
|
+
Instantiated agent.
|
|
131
|
+
"""
|
|
132
|
+
return cls(
|
|
133
|
+
config=config,
|
|
134
|
+
run_context_wrapper=run_context_wrapper,
|
|
135
|
+
prompt_dir=prompt_dir,
|
|
136
|
+
default_model=default_model,
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
def _build_instructions_from_jinja(self) -> str:
|
|
140
|
+
"""Return the rendered instructions prompt for this agent.
|
|
141
|
+
|
|
142
|
+
Returns
|
|
143
|
+
-------
|
|
144
|
+
str
|
|
145
|
+
Prompt text rendered from the Jinja template.
|
|
146
|
+
"""
|
|
147
|
+
return self.build_prompt_from_jinja(
|
|
148
|
+
run_context_wrapper=self._run_context_wrapper
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
def build_prompt_from_jinja(
|
|
152
|
+
self, run_context_wrapper: Optional[RunContextWrapper[Dict[str, Any]]] = None
|
|
153
|
+
) -> str:
|
|
154
|
+
"""Render the agent prompt using the provided run context.
|
|
155
|
+
|
|
156
|
+
Parameters
|
|
157
|
+
----------
|
|
158
|
+
run_context_wrapper
|
|
159
|
+
Wrapper whose ``context`` dictionary is used to render the Jinja
|
|
160
|
+
template. Default ``None``.
|
|
161
|
+
|
|
162
|
+
Returns
|
|
163
|
+
-------
|
|
164
|
+
str
|
|
165
|
+
Rendered prompt text.
|
|
166
|
+
"""
|
|
167
|
+
context = {}
|
|
168
|
+
if run_context_wrapper is not None:
|
|
169
|
+
context = run_context_wrapper.context
|
|
170
|
+
|
|
171
|
+
return self._template.render(context)
|
|
172
|
+
|
|
173
|
+
def get_prompt(
|
|
174
|
+
self, run_context_wrapper: RunContextWrapper[Dict[str, Any]], _: Agent
|
|
175
|
+
) -> str:
|
|
176
|
+
"""Render the agent prompt using the provided run context.
|
|
177
|
+
|
|
178
|
+
Parameters
|
|
179
|
+
----------
|
|
180
|
+
run_context_wrapper
|
|
181
|
+
Wrapper around the current run context whose ``context`` dictionary
|
|
182
|
+
is used to render the Jinja template.
|
|
183
|
+
_
|
|
184
|
+
Underlying :class:`agents.Agent` instance (ignored).
|
|
185
|
+
|
|
186
|
+
Returns
|
|
187
|
+
-------
|
|
188
|
+
str
|
|
189
|
+
The rendered prompt.
|
|
190
|
+
"""
|
|
191
|
+
return self.build_prompt_from_jinja(run_context_wrapper)
|
|
192
|
+
|
|
193
|
+
def get_agent(self) -> Agent:
|
|
194
|
+
"""Construct and return the configured :class:`agents.Agent` instance.
|
|
195
|
+
|
|
196
|
+
Returns
|
|
197
|
+
-------
|
|
198
|
+
Agent
|
|
199
|
+
Initialized agent ready for execution.
|
|
200
|
+
"""
|
|
201
|
+
agent_config: Dict[str, Any] = {
|
|
202
|
+
"name": self.agent_name,
|
|
203
|
+
"instructions": self._build_instructions_from_jinja(),
|
|
204
|
+
"model": self.model,
|
|
205
|
+
}
|
|
206
|
+
if self._output_type:
|
|
207
|
+
agent_config["output_type"] = self._output_type
|
|
208
|
+
if self._tools:
|
|
209
|
+
agent_config["tools"] = self._tools
|
|
210
|
+
if self._model_settings:
|
|
211
|
+
agent_config["model_settings"] = self._model_settings
|
|
212
|
+
|
|
213
|
+
return Agent(**agent_config)
|
|
214
|
+
|
|
215
|
+
async def run(
|
|
216
|
+
self,
|
|
217
|
+
agent_input: str,
|
|
218
|
+
agent_context: Optional[Dict[str, Any]] = None,
|
|
219
|
+
output_type: Optional[Any] = None,
|
|
220
|
+
) -> Any:
|
|
221
|
+
"""Execute the agent asynchronously.
|
|
222
|
+
|
|
223
|
+
Parameters
|
|
224
|
+
----------
|
|
225
|
+
agent_input
|
|
226
|
+
Prompt or query for the agent.
|
|
227
|
+
agent_context
|
|
228
|
+
Optional dictionary passed to the agent. Default ``None``.
|
|
229
|
+
output_type
|
|
230
|
+
Optional type used to cast the final output. Default ``None``.
|
|
231
|
+
|
|
232
|
+
Returns
|
|
233
|
+
-------
|
|
234
|
+
Any
|
|
235
|
+
Agent result, optionally converted to ``output_type``.
|
|
236
|
+
"""
|
|
237
|
+
if self._output_type is not None and output_type is None:
|
|
238
|
+
output_type = self._output_type
|
|
239
|
+
return await _run_agent(
|
|
240
|
+
agent=self.get_agent(),
|
|
241
|
+
agent_input=agent_input,
|
|
242
|
+
agent_context=agent_context,
|
|
243
|
+
output_type=output_type,
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
def run_sync(
|
|
247
|
+
self,
|
|
248
|
+
agent_input: str,
|
|
249
|
+
agent_context: Optional[Dict[str, Any]] = None,
|
|
250
|
+
output_type: Optional[Any] = None,
|
|
251
|
+
) -> Any:
|
|
252
|
+
"""Run the agent synchronously.
|
|
253
|
+
|
|
254
|
+
Parameters
|
|
255
|
+
----------
|
|
256
|
+
agent_input
|
|
257
|
+
Prompt or query for the agent.
|
|
258
|
+
agent_context
|
|
259
|
+
Optional dictionary passed to the agent. Default ``None``.
|
|
260
|
+
output_type
|
|
261
|
+
Optional type used to cast the final output. Default ``None``.
|
|
262
|
+
|
|
263
|
+
Returns
|
|
264
|
+
-------
|
|
265
|
+
Any
|
|
266
|
+
Agent result, optionally converted to ``output_type``.
|
|
267
|
+
"""
|
|
268
|
+
result = _run_agent_sync(
|
|
269
|
+
self.get_agent(),
|
|
270
|
+
agent_input,
|
|
271
|
+
agent_context=agent_context,
|
|
272
|
+
)
|
|
273
|
+
if self._output_type and not output_type:
|
|
274
|
+
output_type = self._output_type
|
|
275
|
+
if output_type:
|
|
276
|
+
return result.final_output_as(output_type)
|
|
277
|
+
return result
|
|
278
|
+
|
|
279
|
+
def run_streamed(
|
|
280
|
+
self,
|
|
281
|
+
agent_input: str,
|
|
282
|
+
agent_context: Optional[Dict[str, Any]] = None,
|
|
283
|
+
output_type: Optional[Any] = None,
|
|
284
|
+
) -> RunResultStreaming:
|
|
285
|
+
"""Return a streaming result for the agent execution.
|
|
286
|
+
|
|
287
|
+
Parameters
|
|
288
|
+
----------
|
|
289
|
+
agent_input
|
|
290
|
+
Prompt or query for the agent.
|
|
291
|
+
agent_context
|
|
292
|
+
Optional dictionary passed to the agent. Default ``None``.
|
|
293
|
+
output_type
|
|
294
|
+
Optional type used to cast the final output. Default ``None``.
|
|
295
|
+
|
|
296
|
+
Returns
|
|
297
|
+
-------
|
|
298
|
+
RunResultStreaming
|
|
299
|
+
Streaming output wrapper from the agent execution.
|
|
300
|
+
"""
|
|
301
|
+
result = _run_agent_streamed(
|
|
302
|
+
agent=self.get_agent(),
|
|
303
|
+
agent_input=agent_input,
|
|
304
|
+
context=agent_context,
|
|
305
|
+
)
|
|
306
|
+
if self._output_type and not output_type:
|
|
307
|
+
output_type = self._output_type
|
|
308
|
+
if output_type:
|
|
309
|
+
return result.final_output_as(output_type)
|
|
310
|
+
return result
|
|
311
|
+
|
|
312
|
+
def as_tool(self) -> FunctionTool:
|
|
313
|
+
"""Return the agent as a callable tool.
|
|
314
|
+
|
|
315
|
+
Returns
|
|
316
|
+
-------
|
|
317
|
+
FunctionTool
|
|
318
|
+
Tool instance wrapping this agent.
|
|
319
|
+
"""
|
|
320
|
+
agent = self.get_agent()
|
|
321
|
+
tool_obj: FunctionTool = agent.as_tool(
|
|
322
|
+
tool_name=self.agent_name, tool_description=self.description
|
|
323
|
+
) # type: ignore
|
|
324
|
+
return tool_obj
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
async def _run_agent(
|
|
328
|
+
agent: Agent,
|
|
329
|
+
agent_input: str,
|
|
330
|
+
agent_context: Optional[Dict[str, Any]] = None,
|
|
331
|
+
output_type: Optional[Any] = None,
|
|
332
|
+
) -> Any:
|
|
333
|
+
"""Run an ``Agent`` asynchronously.
|
|
334
|
+
|
|
335
|
+
Parameters
|
|
336
|
+
----------
|
|
337
|
+
agent
|
|
338
|
+
Configured agent instance to execute.
|
|
339
|
+
agent_input
|
|
340
|
+
Prompt or query string for the agent.
|
|
341
|
+
agent_context
|
|
342
|
+
Optional context dictionary passed to the agent. Default ``None``.
|
|
343
|
+
output_type
|
|
344
|
+
Optional type used to cast the final output. Default ``None``.
|
|
345
|
+
|
|
346
|
+
Returns
|
|
347
|
+
-------
|
|
348
|
+
Any
|
|
349
|
+
Agent response, optionally converted to ``output_type``.
|
|
350
|
+
"""
|
|
351
|
+
result = await Runner.run(agent, agent_input, context=agent_context)
|
|
352
|
+
if output_type is not None:
|
|
353
|
+
result = result.final_output_as(output_type)
|
|
354
|
+
return result
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
def _run_agent_sync(
|
|
358
|
+
agent: Agent,
|
|
359
|
+
agent_input: str,
|
|
360
|
+
agent_context: Optional[Dict[str, Any]] = None,
|
|
361
|
+
) -> RunResult:
|
|
362
|
+
"""Run an ``Agent`` synchronously.
|
|
363
|
+
|
|
364
|
+
Parameters
|
|
365
|
+
----------
|
|
366
|
+
agent
|
|
367
|
+
Configured agent instance to execute.
|
|
368
|
+
agent_input
|
|
369
|
+
Prompt or query string for the agent.
|
|
370
|
+
agent_context
|
|
371
|
+
Optional context dictionary passed to the agent. Default ``None``.
|
|
372
|
+
|
|
373
|
+
Returns
|
|
374
|
+
-------
|
|
375
|
+
RunResult
|
|
376
|
+
Result from the agent execution.
|
|
377
|
+
"""
|
|
378
|
+
coro = Runner.run(agent, agent_input, context=agent_context)
|
|
379
|
+
try:
|
|
380
|
+
loop = asyncio.get_running_loop()
|
|
381
|
+
except RuntimeError:
|
|
382
|
+
return asyncio.run(coro)
|
|
383
|
+
|
|
384
|
+
if loop.is_running():
|
|
385
|
+
result: RunResult | None = None
|
|
386
|
+
|
|
387
|
+
def _thread_runner() -> None:
|
|
388
|
+
nonlocal result
|
|
389
|
+
result = asyncio.run(coro)
|
|
390
|
+
|
|
391
|
+
thread = threading.Thread(target=_thread_runner, daemon=True)
|
|
392
|
+
thread.start()
|
|
393
|
+
thread.join()
|
|
394
|
+
if result is None:
|
|
395
|
+
raise RuntimeError("Agent execution did not return a result.")
|
|
396
|
+
return result
|
|
397
|
+
|
|
398
|
+
return loop.run_until_complete(coro)
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
def _run_agent_streamed(
|
|
402
|
+
agent: Agent,
|
|
403
|
+
agent_input: str,
|
|
404
|
+
context: Optional[Dict[str, Any]] = None,
|
|
405
|
+
) -> RunResultStreaming:
|
|
406
|
+
"""Run an ``Agent`` synchronously and return a streaming result.
|
|
407
|
+
|
|
408
|
+
Parameters
|
|
409
|
+
----------
|
|
410
|
+
agent
|
|
411
|
+
Configured agent to execute.
|
|
412
|
+
agent_input
|
|
413
|
+
Prompt or query string for the agent.
|
|
414
|
+
context
|
|
415
|
+
Optional context dictionary passed to the agent. Default ``None``.
|
|
416
|
+
|
|
417
|
+
Returns
|
|
418
|
+
-------
|
|
419
|
+
RunResultStreaming
|
|
420
|
+
Instance for streaming outputs.
|
|
421
|
+
"""
|
|
422
|
+
result = Runner.run_streamed(agent, agent_input, context=context)
|
|
423
|
+
return result
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
__all__ = [
|
|
427
|
+
"AgentConfigLike",
|
|
428
|
+
"BaseAgent",
|
|
429
|
+
"_run_agent",
|
|
430
|
+
"_run_agent_sync",
|
|
431
|
+
"_run_agent_streamed",
|
|
432
|
+
]
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"""Configuration helpers for ``BaseAgent``."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, List, Optional, Type
|
|
6
|
+
|
|
7
|
+
from agents.model_settings import ModelSettings
|
|
8
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
9
|
+
|
|
10
|
+
from ..structure import BaseStructure
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AgentConfig(BaseStructure):
|
|
14
|
+
"""Configuration required to build a :class:`BaseAgent`.
|
|
15
|
+
|
|
16
|
+
Methods
|
|
17
|
+
-------
|
|
18
|
+
print()
|
|
19
|
+
Return a human readable representation of the configuration.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
23
|
+
|
|
24
|
+
name: str = Field(title="Agent Name", description="Unique name for the agent")
|
|
25
|
+
description: Optional[str] = Field(
|
|
26
|
+
default=None, title="Description", description="Short description of the agent"
|
|
27
|
+
)
|
|
28
|
+
model: Optional[str] = Field(
|
|
29
|
+
default=None, title="Model", description="Model identifier to use"
|
|
30
|
+
)
|
|
31
|
+
template_path: Optional[str] = Field(
|
|
32
|
+
default=None, title="Template Path", description="Path to the Jinja template"
|
|
33
|
+
)
|
|
34
|
+
input_type: Optional[Type[BaseModel]] = Field(
|
|
35
|
+
default=None,
|
|
36
|
+
title="Input Type",
|
|
37
|
+
description="Pydantic model describing the agent input",
|
|
38
|
+
)
|
|
39
|
+
output_type: Optional[Type[Any]] = Field(
|
|
40
|
+
default=None,
|
|
41
|
+
title="Output Type",
|
|
42
|
+
description="Type describing the agent output; commonly a Pydantic model or builtin like ``str``",
|
|
43
|
+
)
|
|
44
|
+
tools: Optional[List[Any]] = Field(
|
|
45
|
+
default=None,
|
|
46
|
+
title="Tools",
|
|
47
|
+
description="Tools available to the agent",
|
|
48
|
+
)
|
|
49
|
+
model_settings: Optional[ModelSettings] = Field(
|
|
50
|
+
default=None, title="Model Settings", description="Additional model settings"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def print(self) -> str:
|
|
54
|
+
"""Return a human readable representation.
|
|
55
|
+
|
|
56
|
+
Returns
|
|
57
|
+
-------
|
|
58
|
+
str
|
|
59
|
+
The agent's name.
|
|
60
|
+
"""
|
|
61
|
+
return self.name
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
__all__ = ["AgentConfig"]
|
|
65
|
+
|
|
66
|
+
AgentConfig.model_rebuild()
|