openai-sdk-helpers 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +62 -0
- openai_sdk_helpers/agent/__init__.py +31 -0
- openai_sdk_helpers/agent/base.py +330 -0
- openai_sdk_helpers/agent/config.py +66 -0
- openai_sdk_helpers/agent/project_manager.py +511 -0
- openai_sdk_helpers/agent/prompt_utils.py +9 -0
- openai_sdk_helpers/agent/runner.py +215 -0
- openai_sdk_helpers/agent/summarizer.py +85 -0
- openai_sdk_helpers/agent/translator.py +139 -0
- openai_sdk_helpers/agent/utils.py +47 -0
- openai_sdk_helpers/agent/validation.py +97 -0
- openai_sdk_helpers/agent/vector_search.py +462 -0
- openai_sdk_helpers/agent/web_search.py +404 -0
- openai_sdk_helpers/config.py +153 -0
- openai_sdk_helpers/enums/__init__.py +7 -0
- openai_sdk_helpers/enums/base.py +29 -0
- openai_sdk_helpers/environment.py +27 -0
- openai_sdk_helpers/prompt/__init__.py +77 -0
- openai_sdk_helpers/py.typed +0 -0
- openai_sdk_helpers/response/__init__.py +18 -0
- openai_sdk_helpers/response/base.py +501 -0
- openai_sdk_helpers/response/messages.py +211 -0
- openai_sdk_helpers/response/runner.py +104 -0
- openai_sdk_helpers/response/tool_call.py +70 -0
- openai_sdk_helpers/structure/__init__.py +43 -0
- openai_sdk_helpers/structure/agent_blueprint.py +224 -0
- openai_sdk_helpers/structure/base.py +713 -0
- openai_sdk_helpers/structure/plan/__init__.py +13 -0
- openai_sdk_helpers/structure/plan/enum.py +64 -0
- openai_sdk_helpers/structure/plan/plan.py +253 -0
- openai_sdk_helpers/structure/plan/task.py +122 -0
- openai_sdk_helpers/structure/prompt.py +24 -0
- openai_sdk_helpers/structure/responses.py +132 -0
- openai_sdk_helpers/structure/summary.py +65 -0
- openai_sdk_helpers/structure/validation.py +47 -0
- openai_sdk_helpers/structure/vector_search.py +86 -0
- openai_sdk_helpers/structure/web_search.py +46 -0
- openai_sdk_helpers/utils/__init__.py +13 -0
- openai_sdk_helpers/utils/core.py +208 -0
- openai_sdk_helpers/vector_storage/__init__.py +15 -0
- openai_sdk_helpers/vector_storage/cleanup.py +91 -0
- openai_sdk_helpers/vector_storage/storage.py +501 -0
- openai_sdk_helpers/vector_storage/types.py +58 -0
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.6.dist-info}/METADATA +1 -1
- openai_sdk_helpers-0.0.6.dist-info/RECORD +50 -0
- openai_sdk_helpers-0.0.5.dist-info/RECORD +0 -7
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.6.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""Shared AI helpers and base structures."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .structure import *
|
|
6
|
+
from .prompt import PromptRenderer
|
|
7
|
+
from .config import OpenAISettings
|
|
8
|
+
from .vector_storage import *
|
|
9
|
+
from .agent import (
|
|
10
|
+
AgentBase,
|
|
11
|
+
AgentConfig,
|
|
12
|
+
AgentEnum,
|
|
13
|
+
ProjectManager,
|
|
14
|
+
SummarizerAgent,
|
|
15
|
+
TranslatorAgent,
|
|
16
|
+
ValidatorAgent,
|
|
17
|
+
VectorSearch,
|
|
18
|
+
WebAgentSearch,
|
|
19
|
+
)
|
|
20
|
+
from .response import (
|
|
21
|
+
ResponseBase,
|
|
22
|
+
ResponseMessage,
|
|
23
|
+
ResponseMessages,
|
|
24
|
+
ResponseToolCall,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
__all__ = [
|
|
28
|
+
"BaseStructure",
|
|
29
|
+
"SchemaOptions",
|
|
30
|
+
"spec_field",
|
|
31
|
+
"PromptRenderer",
|
|
32
|
+
"OpenAISettings",
|
|
33
|
+
"VectorStorage",
|
|
34
|
+
"VectorStorageFileInfo",
|
|
35
|
+
"VectorStorageFileStats",
|
|
36
|
+
"assistant_tool_definition",
|
|
37
|
+
"assistant_format",
|
|
38
|
+
"response_tool_definition",
|
|
39
|
+
"response_format",
|
|
40
|
+
"SummaryStructure",
|
|
41
|
+
"PromptStructure",
|
|
42
|
+
"AgentBlueprint",
|
|
43
|
+
"TaskStructure",
|
|
44
|
+
"PlanStructure",
|
|
45
|
+
"AgentEnum",
|
|
46
|
+
"AgentBase",
|
|
47
|
+
"AgentConfig",
|
|
48
|
+
"ProjectManager",
|
|
49
|
+
"SummarizerAgent",
|
|
50
|
+
"TranslatorAgent",
|
|
51
|
+
"ValidatorAgent",
|
|
52
|
+
"VectorSearch",
|
|
53
|
+
"WebAgentSearch",
|
|
54
|
+
"ExtendedSummaryStructure",
|
|
55
|
+
"WebSearchStructure",
|
|
56
|
+
"VectorSearchStructure",
|
|
57
|
+
"ValidationResultStructure",
|
|
58
|
+
"ResponseBase",
|
|
59
|
+
"ResponseMessage",
|
|
60
|
+
"ResponseMessages",
|
|
61
|
+
"ResponseToolCall",
|
|
62
|
+
]
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""Shared agent helpers built on the OpenAI Agents SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .base import AgentBase
|
|
6
|
+
from .config import AgentConfig
|
|
7
|
+
from ..structure.plan.enum import AgentEnum
|
|
8
|
+
from .project_manager import ProjectManager
|
|
9
|
+
from .runner import run_sync, run_async, run_streamed
|
|
10
|
+
from .summarizer import SummarizerAgent
|
|
11
|
+
from .translator import TranslatorAgent
|
|
12
|
+
from .validation import ValidatorAgent
|
|
13
|
+
from .utils import run_coroutine_agent_sync
|
|
14
|
+
from .vector_search import VectorSearch
|
|
15
|
+
from .web_search import WebAgentSearch
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"AgentBase",
|
|
19
|
+
"AgentConfig",
|
|
20
|
+
"AgentEnum",
|
|
21
|
+
"ProjectManager",
|
|
22
|
+
"run_sync",
|
|
23
|
+
"run_async",
|
|
24
|
+
"run_streamed",
|
|
25
|
+
"run_coroutine_agent_sync",
|
|
26
|
+
"SummarizerAgent",
|
|
27
|
+
"TranslatorAgent",
|
|
28
|
+
"ValidatorAgent",
|
|
29
|
+
"VectorSearch",
|
|
30
|
+
"WebAgentSearch",
|
|
31
|
+
]
|
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
"""Base agent helpers built on the OpenAI Agents SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import threading
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Dict, Optional, Protocol
|
|
9
|
+
|
|
10
|
+
from agents import Agent, Runner, RunResult, RunResultStreaming
|
|
11
|
+
from agents.run_context import RunContextWrapper
|
|
12
|
+
from agents.tool import FunctionTool
|
|
13
|
+
from jinja2 import Template
|
|
14
|
+
from .runner import run_sync, run_streamed, run_async
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class AgentConfigLike(Protocol):
|
|
18
|
+
"""Protocol describing the configuration attributes for AgentBase."""
|
|
19
|
+
|
|
20
|
+
name: str
|
|
21
|
+
description: Optional[str]
|
|
22
|
+
model: Optional[str]
|
|
23
|
+
template_path: Optional[str]
|
|
24
|
+
input_type: Optional[Any]
|
|
25
|
+
output_type: Optional[Any]
|
|
26
|
+
tools: Optional[Any]
|
|
27
|
+
model_settings: Optional[Any]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class AgentBase:
|
|
31
|
+
"""Factory for creating and configuring specialized agents.
|
|
32
|
+
|
|
33
|
+
Methods
|
|
34
|
+
-------
|
|
35
|
+
from_config(config, run_context_wrapper)
|
|
36
|
+
Instantiate a ``AgentBase`` from configuration.
|
|
37
|
+
build_prompt_from_jinja(run_context_wrapper)
|
|
38
|
+
Render the agent prompt using Jinja and optional context.
|
|
39
|
+
get_prompt(run_context_wrapper, _)
|
|
40
|
+
Render the agent prompt using the provided run context.
|
|
41
|
+
get_agent()
|
|
42
|
+
Construct the configured :class:`agents.Agent` instance.
|
|
43
|
+
run(input, context, output_type)
|
|
44
|
+
Execute the agent asynchronously (alias of ``run_async``).
|
|
45
|
+
run_async(input, context, output_type)
|
|
46
|
+
Execute the agent asynchronously and optionally cast the result.
|
|
47
|
+
run_sync(input, context, output_type)
|
|
48
|
+
Execute the agent synchronously.
|
|
49
|
+
run_streamed(input, context, output_type)
|
|
50
|
+
Return a streaming result for the agent execution.
|
|
51
|
+
as_tool()
|
|
52
|
+
Return the agent as a callable tool.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
def __init__(
|
|
56
|
+
self,
|
|
57
|
+
config: AgentConfigLike,
|
|
58
|
+
run_context_wrapper: Optional[RunContextWrapper[Dict[str, Any]]] = None,
|
|
59
|
+
prompt_dir: Optional[Path] = None,
|
|
60
|
+
default_model: Optional[str] = None,
|
|
61
|
+
) -> None:
|
|
62
|
+
"""Initialize the ``AgentBase`` using a configuration object.
|
|
63
|
+
|
|
64
|
+
Parameters
|
|
65
|
+
----------
|
|
66
|
+
config
|
|
67
|
+
Configuration describing this agent.
|
|
68
|
+
run_context_wrapper
|
|
69
|
+
Optional wrapper providing runtime context for prompt rendering.
|
|
70
|
+
Default ``None``.
|
|
71
|
+
prompt_dir
|
|
72
|
+
Optional directory holding prompt templates.
|
|
73
|
+
default_model
|
|
74
|
+
Optional fallback model identifier if the config does not supply one.
|
|
75
|
+
|
|
76
|
+
Returns
|
|
77
|
+
-------
|
|
78
|
+
None
|
|
79
|
+
"""
|
|
80
|
+
name = config.name
|
|
81
|
+
description = config.description or ""
|
|
82
|
+
model = config.model or default_model
|
|
83
|
+
if not model:
|
|
84
|
+
raise ValueError("Model is required to construct the agent.")
|
|
85
|
+
|
|
86
|
+
prompt_path: Optional[Path]
|
|
87
|
+
if config.template_path:
|
|
88
|
+
prompt_path = Path(config.template_path)
|
|
89
|
+
elif prompt_dir is not None:
|
|
90
|
+
prompt_path = prompt_dir / f"{name}.jinja"
|
|
91
|
+
else:
|
|
92
|
+
prompt_path = None
|
|
93
|
+
|
|
94
|
+
if prompt_path is None:
|
|
95
|
+
self._template = Template("")
|
|
96
|
+
elif prompt_path.exists():
|
|
97
|
+
self._template = Template(prompt_path.read_text())
|
|
98
|
+
else:
|
|
99
|
+
raise FileNotFoundError(
|
|
100
|
+
f"Prompt template for agent '{name}' not found at {prompt_path}."
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
self.agent_name = name
|
|
104
|
+
self.description = description
|
|
105
|
+
self.model = model
|
|
106
|
+
|
|
107
|
+
self._input_type = config.input_type
|
|
108
|
+
self._output_type = config.output_type or config.input_type
|
|
109
|
+
self._tools = config.tools
|
|
110
|
+
self._model_settings = config.model_settings
|
|
111
|
+
self._run_context_wrapper = run_context_wrapper
|
|
112
|
+
|
|
113
|
+
@classmethod
|
|
114
|
+
def from_config(
|
|
115
|
+
cls,
|
|
116
|
+
config: AgentConfigLike,
|
|
117
|
+
run_context_wrapper: Optional[RunContextWrapper[Dict[str, Any]]] = None,
|
|
118
|
+
prompt_dir: Optional[Path] = None,
|
|
119
|
+
default_model: Optional[str] = None,
|
|
120
|
+
) -> "AgentBase":
|
|
121
|
+
"""Create a :class:`AgentBase` instance from configuration.
|
|
122
|
+
|
|
123
|
+
Parameters
|
|
124
|
+
----------
|
|
125
|
+
config
|
|
126
|
+
Configuration describing the agent.
|
|
127
|
+
run_context_wrapper
|
|
128
|
+
Optional wrapper providing runtime context. Default ``None``.
|
|
129
|
+
prompt_dir
|
|
130
|
+
Optional directory holding prompt templates.
|
|
131
|
+
default_model
|
|
132
|
+
Optional fallback model identifier.
|
|
133
|
+
|
|
134
|
+
Returns
|
|
135
|
+
-------
|
|
136
|
+
AgentBase
|
|
137
|
+
Instantiated agent.
|
|
138
|
+
"""
|
|
139
|
+
return cls(
|
|
140
|
+
config=config,
|
|
141
|
+
run_context_wrapper=run_context_wrapper,
|
|
142
|
+
prompt_dir=prompt_dir,
|
|
143
|
+
default_model=default_model,
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
def _build_prompt_from_jinja(self) -> str:
|
|
147
|
+
"""Return the rendered instructions prompt for this agent.
|
|
148
|
+
|
|
149
|
+
Returns
|
|
150
|
+
-------
|
|
151
|
+
str
|
|
152
|
+
Prompt text rendered from the Jinja template.
|
|
153
|
+
"""
|
|
154
|
+
return self.build_prompt_from_jinja(
|
|
155
|
+
run_context_wrapper=self._run_context_wrapper
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
def build_prompt_from_jinja(
|
|
159
|
+
self, run_context_wrapper: Optional[RunContextWrapper[Dict[str, Any]]] = None
|
|
160
|
+
) -> str:
|
|
161
|
+
"""Render the agent prompt using the provided run context.
|
|
162
|
+
|
|
163
|
+
Parameters
|
|
164
|
+
----------
|
|
165
|
+
run_context_wrapper
|
|
166
|
+
Wrapper whose ``context`` dictionary is used to render the Jinja
|
|
167
|
+
template. Default ``None``.
|
|
168
|
+
|
|
169
|
+
Returns
|
|
170
|
+
-------
|
|
171
|
+
str
|
|
172
|
+
Rendered prompt text.
|
|
173
|
+
"""
|
|
174
|
+
context = {}
|
|
175
|
+
if run_context_wrapper is not None:
|
|
176
|
+
context = run_context_wrapper.context
|
|
177
|
+
|
|
178
|
+
return self._template.render(context)
|
|
179
|
+
|
|
180
|
+
def get_prompt(
|
|
181
|
+
self, run_context_wrapper: RunContextWrapper[Dict[str, Any]], _: Agent
|
|
182
|
+
) -> str:
|
|
183
|
+
"""Render the agent prompt using the provided run context.
|
|
184
|
+
|
|
185
|
+
Parameters
|
|
186
|
+
----------
|
|
187
|
+
run_context_wrapper
|
|
188
|
+
Wrapper around the current run context whose ``context`` dictionary
|
|
189
|
+
is used to render the Jinja template.
|
|
190
|
+
_
|
|
191
|
+
Underlying :class:`agents.Agent` instance (ignored).
|
|
192
|
+
|
|
193
|
+
Returns
|
|
194
|
+
-------
|
|
195
|
+
str
|
|
196
|
+
The rendered prompt.
|
|
197
|
+
"""
|
|
198
|
+
return self.build_prompt_from_jinja(run_context_wrapper)
|
|
199
|
+
|
|
200
|
+
def get_agent(self) -> Agent:
|
|
201
|
+
"""Construct and return the configured :class:`agents.Agent` instance.
|
|
202
|
+
|
|
203
|
+
Returns
|
|
204
|
+
-------
|
|
205
|
+
Agent
|
|
206
|
+
Initialized agent ready for execution.
|
|
207
|
+
"""
|
|
208
|
+
agent_config: Dict[str, Any] = {
|
|
209
|
+
"name": self.agent_name,
|
|
210
|
+
"instructions": self._build_prompt_from_jinja(),
|
|
211
|
+
"model": self.model,
|
|
212
|
+
}
|
|
213
|
+
if self._output_type:
|
|
214
|
+
agent_config["output_type"] = self._output_type
|
|
215
|
+
if self._tools:
|
|
216
|
+
agent_config["tools"] = self._tools
|
|
217
|
+
if self._model_settings:
|
|
218
|
+
agent_config["model_settings"] = self._model_settings
|
|
219
|
+
|
|
220
|
+
return Agent(**agent_config)
|
|
221
|
+
|
|
222
|
+
async def run_async(
|
|
223
|
+
self,
|
|
224
|
+
input: str,
|
|
225
|
+
context: Optional[Dict[str, Any]] = None,
|
|
226
|
+
output_type: Optional[Any] = None,
|
|
227
|
+
) -> Any:
|
|
228
|
+
"""Execute the agent asynchronously.
|
|
229
|
+
|
|
230
|
+
Parameters
|
|
231
|
+
----------
|
|
232
|
+
input
|
|
233
|
+
Prompt or query for the agent.
|
|
234
|
+
context
|
|
235
|
+
Optional dictionary passed to the agent. Default ``None``.
|
|
236
|
+
output_type
|
|
237
|
+
Optional type used to cast the final output. Default ``None``.
|
|
238
|
+
|
|
239
|
+
Returns
|
|
240
|
+
-------
|
|
241
|
+
Any
|
|
242
|
+
Agent result, optionally converted to ``output_type``.
|
|
243
|
+
"""
|
|
244
|
+
if self._output_type is not None and output_type is None:
|
|
245
|
+
output_type = self._output_type
|
|
246
|
+
return await run_async(
|
|
247
|
+
agent=self.get_agent(),
|
|
248
|
+
input=input,
|
|
249
|
+
context=context,
|
|
250
|
+
output_type=output_type,
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
def run_sync(
|
|
254
|
+
self,
|
|
255
|
+
input: str,
|
|
256
|
+
context: Optional[Dict[str, Any]] = None,
|
|
257
|
+
output_type: Optional[Any] = None,
|
|
258
|
+
) -> Any:
|
|
259
|
+
"""Run the agent synchronously.
|
|
260
|
+
|
|
261
|
+
Parameters
|
|
262
|
+
----------
|
|
263
|
+
input
|
|
264
|
+
Prompt or query for the agent.
|
|
265
|
+
context
|
|
266
|
+
Optional dictionary passed to the agent. Default ``None``.
|
|
267
|
+
output_type
|
|
268
|
+
Optional type used to cast the final output. Default ``None``.
|
|
269
|
+
|
|
270
|
+
Returns
|
|
271
|
+
-------
|
|
272
|
+
Any
|
|
273
|
+
Agent result, optionally converted to ``output_type``.
|
|
274
|
+
"""
|
|
275
|
+
return run_sync(
|
|
276
|
+
agent=self.get_agent(),
|
|
277
|
+
input=input,
|
|
278
|
+
context=context,
|
|
279
|
+
output_type=output_type,
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
def run_streamed(
|
|
283
|
+
self,
|
|
284
|
+
input: str,
|
|
285
|
+
context: Optional[Dict[str, Any]] = None,
|
|
286
|
+
output_type: Optional[Any] = None,
|
|
287
|
+
) -> RunResultStreaming:
|
|
288
|
+
"""Return a streaming result for the agent execution.
|
|
289
|
+
|
|
290
|
+
Parameters
|
|
291
|
+
----------
|
|
292
|
+
input
|
|
293
|
+
Prompt or query for the agent.
|
|
294
|
+
context
|
|
295
|
+
Optional dictionary passed to the agent. Default ``None``.
|
|
296
|
+
output_type
|
|
297
|
+
Optional type used to cast the final output. Default ``None``.
|
|
298
|
+
|
|
299
|
+
Returns
|
|
300
|
+
-------
|
|
301
|
+
RunResultStreaming
|
|
302
|
+
Streaming output wrapper from the agent execution.
|
|
303
|
+
"""
|
|
304
|
+
result = run_streamed(
|
|
305
|
+
agent=self.get_agent(),
|
|
306
|
+
input=input,
|
|
307
|
+
context=context,
|
|
308
|
+
)
|
|
309
|
+
if self._output_type and not output_type:
|
|
310
|
+
output_type = self._output_type
|
|
311
|
+
if output_type:
|
|
312
|
+
return result.final_output_as(output_type)
|
|
313
|
+
return result
|
|
314
|
+
|
|
315
|
+
def as_tool(self) -> FunctionTool:
|
|
316
|
+
"""Return the agent as a callable tool.
|
|
317
|
+
|
|
318
|
+
Returns
|
|
319
|
+
-------
|
|
320
|
+
FunctionTool
|
|
321
|
+
Tool instance wrapping this agent.
|
|
322
|
+
"""
|
|
323
|
+
agent = self.get_agent()
|
|
324
|
+
tool_obj: FunctionTool = agent.as_tool(
|
|
325
|
+
tool_name=self.agent_name, tool_description=self.description
|
|
326
|
+
) # type: ignore
|
|
327
|
+
return tool_obj
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
__all__ = ["AgentConfigLike", "AgentBase"]
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"""Configuration helpers for ``AgentBase``."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, List, Optional, Type
|
|
6
|
+
|
|
7
|
+
from agents.model_settings import ModelSettings
|
|
8
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
9
|
+
|
|
10
|
+
from ..structure import BaseStructure
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AgentConfig(BaseStructure):
|
|
14
|
+
"""Configuration required to build a :class:`AgentBase`.
|
|
15
|
+
|
|
16
|
+
Methods
|
|
17
|
+
-------
|
|
18
|
+
print()
|
|
19
|
+
Return a human readable representation of the configuration.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
23
|
+
|
|
24
|
+
name: str = Field(title="Agent Name", description="Unique name for the agent")
|
|
25
|
+
description: Optional[str] = Field(
|
|
26
|
+
default=None, title="Description", description="Short description of the agent"
|
|
27
|
+
)
|
|
28
|
+
model: Optional[str] = Field(
|
|
29
|
+
default=None, title="Model", description="Model identifier to use"
|
|
30
|
+
)
|
|
31
|
+
template_path: Optional[str] = Field(
|
|
32
|
+
default=None, title="Template Path", description="Path to the Jinja template"
|
|
33
|
+
)
|
|
34
|
+
input_type: Optional[Type[BaseModel]] = Field(
|
|
35
|
+
default=None,
|
|
36
|
+
title="Input Type",
|
|
37
|
+
description="Pydantic model describing the agent input",
|
|
38
|
+
)
|
|
39
|
+
output_type: Optional[Type[Any]] = Field(
|
|
40
|
+
default=None,
|
|
41
|
+
title="Output Type",
|
|
42
|
+
description="Type describing the agent output; commonly a Pydantic model or builtin like ``str``",
|
|
43
|
+
)
|
|
44
|
+
tools: Optional[List[Any]] = Field(
|
|
45
|
+
default=None,
|
|
46
|
+
title="Tools",
|
|
47
|
+
description="Tools available to the agent",
|
|
48
|
+
)
|
|
49
|
+
model_settings: Optional[ModelSettings] = Field(
|
|
50
|
+
default=None, title="Model Settings", description="Additional model settings"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def print(self) -> str:
|
|
54
|
+
"""Return a human readable representation.
|
|
55
|
+
|
|
56
|
+
Returns
|
|
57
|
+
-------
|
|
58
|
+
str
|
|
59
|
+
The agent's name.
|
|
60
|
+
"""
|
|
61
|
+
return self.name
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
__all__ = ["AgentConfig"]
|
|
65
|
+
|
|
66
|
+
AgentConfig.model_rebuild()
|