openai-sdk-helpers 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +62 -0
- openai_sdk_helpers/agent/__init__.py +31 -0
- openai_sdk_helpers/agent/base.py +330 -0
- openai_sdk_helpers/agent/config.py +66 -0
- openai_sdk_helpers/agent/project_manager.py +511 -0
- openai_sdk_helpers/agent/prompt_utils.py +9 -0
- openai_sdk_helpers/agent/runner.py +215 -0
- openai_sdk_helpers/agent/summarizer.py +85 -0
- openai_sdk_helpers/agent/translator.py +139 -0
- openai_sdk_helpers/agent/utils.py +47 -0
- openai_sdk_helpers/agent/validation.py +97 -0
- openai_sdk_helpers/agent/vector_search.py +462 -0
- openai_sdk_helpers/agent/web_search.py +404 -0
- openai_sdk_helpers/config.py +199 -0
- openai_sdk_helpers/enums/__init__.py +7 -0
- openai_sdk_helpers/enums/base.py +29 -0
- openai_sdk_helpers/environment.py +27 -0
- openai_sdk_helpers/prompt/__init__.py +77 -0
- openai_sdk_helpers/py.typed +0 -0
- openai_sdk_helpers/response/__init__.py +20 -0
- openai_sdk_helpers/response/base.py +505 -0
- openai_sdk_helpers/response/messages.py +211 -0
- openai_sdk_helpers/response/runner.py +104 -0
- openai_sdk_helpers/response/tool_call.py +70 -0
- openai_sdk_helpers/response/vector_store.py +84 -0
- openai_sdk_helpers/structure/__init__.py +43 -0
- openai_sdk_helpers/structure/agent_blueprint.py +224 -0
- openai_sdk_helpers/structure/base.py +713 -0
- openai_sdk_helpers/structure/plan/__init__.py +13 -0
- openai_sdk_helpers/structure/plan/enum.py +64 -0
- openai_sdk_helpers/structure/plan/plan.py +253 -0
- openai_sdk_helpers/structure/plan/task.py +122 -0
- openai_sdk_helpers/structure/prompt.py +24 -0
- openai_sdk_helpers/structure/responses.py +132 -0
- openai_sdk_helpers/structure/summary.py +65 -0
- openai_sdk_helpers/structure/validation.py +47 -0
- openai_sdk_helpers/structure/vector_search.py +86 -0
- openai_sdk_helpers/structure/web_search.py +46 -0
- openai_sdk_helpers/utils/__init__.py +25 -0
- openai_sdk_helpers/utils/core.py +300 -0
- openai_sdk_helpers/vector_storage/__init__.py +15 -0
- openai_sdk_helpers/vector_storage/cleanup.py +91 -0
- openai_sdk_helpers/vector_storage/storage.py +564 -0
- openai_sdk_helpers/vector_storage/types.py +58 -0
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.7.dist-info}/METADATA +6 -3
- openai_sdk_helpers-0.0.7.dist-info/RECORD +51 -0
- openai_sdk_helpers-0.0.5.dist-info/RECORD +0 -7
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.7.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.0.5.dist-info → openai_sdk_helpers-0.0.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""Core prompt rendering utilities."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import warnings
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any, Mapping, Optional
|
|
8
|
+
|
|
9
|
+
from dotenv import load_dotenv
|
|
10
|
+
from jinja2 import Environment, FileSystemLoader, Template
|
|
11
|
+
|
|
12
|
+
load_dotenv()
|
|
13
|
+
warnings.filterwarnings("ignore")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class PromptRenderer:
|
|
17
|
+
"""Render prompts using Jinja2 templates.
|
|
18
|
+
|
|
19
|
+
The renderer loads templates from a base directory (defaulting to the
|
|
20
|
+
``prompt`` package directory) and exposes a rendering helper for
|
|
21
|
+
injecting context values.
|
|
22
|
+
|
|
23
|
+
Methods
|
|
24
|
+
-------
|
|
25
|
+
render(template_path, context)
|
|
26
|
+
Render the template at ``template_path`` with the supplied context.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, base_dir: Optional[Path] = None) -> None:
|
|
30
|
+
"""Initialize the renderer with a Jinja2 environment.
|
|
31
|
+
|
|
32
|
+
Parameters
|
|
33
|
+
----------
|
|
34
|
+
base_dir : Path or None, default=None
|
|
35
|
+
Base directory containing Jinja2 templates. Defaults to the
|
|
36
|
+
``prompt`` directory adjacent to this file when ``None``.
|
|
37
|
+
|
|
38
|
+
Returns
|
|
39
|
+
-------
|
|
40
|
+
None
|
|
41
|
+
"""
|
|
42
|
+
if base_dir is None:
|
|
43
|
+
# Defaults to the directory containing this file, which also
|
|
44
|
+
# contains the builtin prompt templates.
|
|
45
|
+
self.base_dir = Path(__file__).resolve().parent
|
|
46
|
+
else:
|
|
47
|
+
self.base_dir = base_dir
|
|
48
|
+
|
|
49
|
+
self._env = Environment(
|
|
50
|
+
loader=FileSystemLoader(str(self.base_dir)),
|
|
51
|
+
autoescape=False, # Prompts are plain text
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
def render(
|
|
55
|
+
self, template_path: str, context: Optional[Mapping[str, Any]] = None
|
|
56
|
+
) -> str:
|
|
57
|
+
"""Render a Jinja2 template with the given context.
|
|
58
|
+
|
|
59
|
+
Parameters
|
|
60
|
+
----------
|
|
61
|
+
template_path : str
|
|
62
|
+
Path to the template file, relative to ``base_dir``.
|
|
63
|
+
context : Mapping[str, Any] or None, default=None
|
|
64
|
+
Context variables passed to the template.
|
|
65
|
+
|
|
66
|
+
Returns
|
|
67
|
+
-------
|
|
68
|
+
str
|
|
69
|
+
Rendered prompt as a string.
|
|
70
|
+
"""
|
|
71
|
+
template_path_ = Path(self.base_dir, template_path)
|
|
72
|
+
template_path_text = template_path_.read_text()
|
|
73
|
+
template = Template(template_path_text)
|
|
74
|
+
return template.render(context or {})
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
__all__ = ["PromptRenderer"]
|
|
File without changes
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""Shared response helpers for OpenAI interactions."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .base import ResponseBase
|
|
6
|
+
from .messages import ResponseMessage, ResponseMessages
|
|
7
|
+
from .runner import run_sync, run_async, run_streamed
|
|
8
|
+
from .vector_store import attach_vector_store
|
|
9
|
+
from .tool_call import ResponseToolCall
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"ResponseBase",
|
|
13
|
+
"ResponseMessage",
|
|
14
|
+
"ResponseMessages",
|
|
15
|
+
"run_sync",
|
|
16
|
+
"run_async",
|
|
17
|
+
"run_streamed",
|
|
18
|
+
"ResponseToolCall",
|
|
19
|
+
"attach_vector_store",
|
|
20
|
+
]
|
|
@@ -0,0 +1,505 @@
|
|
|
1
|
+
"""Base response handling for OpenAI interactions."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import inspect
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import threading
|
|
10
|
+
import uuid
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import (
|
|
13
|
+
Any,
|
|
14
|
+
Callable,
|
|
15
|
+
Generic,
|
|
16
|
+
List,
|
|
17
|
+
Optional,
|
|
18
|
+
Tuple,
|
|
19
|
+
Type,
|
|
20
|
+
TypeVar,
|
|
21
|
+
Union,
|
|
22
|
+
cast,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
from openai import OpenAI
|
|
26
|
+
from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
|
|
27
|
+
from openai.types.responses.response_input_file_param import ResponseInputFileParam
|
|
28
|
+
from openai.types.responses.response_input_message_content_list_param import (
|
|
29
|
+
ResponseInputMessageContentListParam,
|
|
30
|
+
)
|
|
31
|
+
from openai.types.responses.response_input_param import ResponseInputItemParam
|
|
32
|
+
from openai.types.responses.response_input_text_param import ResponseInputTextParam
|
|
33
|
+
from openai.types.responses.response_output_message import ResponseOutputMessage
|
|
34
|
+
|
|
35
|
+
from .messages import ResponseMessages
|
|
36
|
+
from ..structure import BaseStructure
|
|
37
|
+
from ..utils import ensure_list, log
|
|
38
|
+
|
|
39
|
+
T = TypeVar("T", bound=BaseStructure)
|
|
40
|
+
ToolHandler = Callable[[ResponseFunctionToolCall], Union[str, Any]]
|
|
41
|
+
ProcessContent = Callable[[str], Tuple[str, List[str]]]
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class ResponseBase(Generic[T]):
|
|
45
|
+
"""Manage OpenAI interactions for structured responses.
|
|
46
|
+
|
|
47
|
+
This base class handles input construction, OpenAI requests, tool calls,
|
|
48
|
+
and optional parsing into structured output models.
|
|
49
|
+
|
|
50
|
+
Methods
|
|
51
|
+
-------
|
|
52
|
+
run_async(content, attachments)
|
|
53
|
+
Generate a response asynchronously and return parsed output.
|
|
54
|
+
run_sync(content, attachments)
|
|
55
|
+
Synchronous wrapper around ``run_async``.
|
|
56
|
+
run_streamed(content, attachments)
|
|
57
|
+
Await ``run_async`` to mirror the agent API.
|
|
58
|
+
save(filepath)
|
|
59
|
+
Serialize the message history to disk.
|
|
60
|
+
close()
|
|
61
|
+
Clean up remote resources (vector stores).
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
def __init__(
|
|
65
|
+
self,
|
|
66
|
+
*,
|
|
67
|
+
instructions: str,
|
|
68
|
+
tools: Optional[list],
|
|
69
|
+
schema: Optional[Any],
|
|
70
|
+
output_structure: Optional[Type[T]],
|
|
71
|
+
tool_handlers: dict[str, ToolHandler],
|
|
72
|
+
process_content: Optional[ProcessContent] = None,
|
|
73
|
+
module_name: Optional[str] = None,
|
|
74
|
+
vector_storage_cls: Optional[type] = None,
|
|
75
|
+
client: Optional[OpenAI] = None,
|
|
76
|
+
model: Optional[str] = None,
|
|
77
|
+
api_key: Optional[str] = None,
|
|
78
|
+
attachments: Optional[Union[Tuple[str, str], list[Tuple[str, str]]]] = None,
|
|
79
|
+
data_path_fn: Optional[Callable[[str], Path]] = None,
|
|
80
|
+
save_path: Optional[Path | str] = None,
|
|
81
|
+
) -> None:
|
|
82
|
+
"""Initialize a response session.
|
|
83
|
+
|
|
84
|
+
Parameters
|
|
85
|
+
----------
|
|
86
|
+
instructions : str
|
|
87
|
+
System instructions for the OpenAI response.
|
|
88
|
+
tools : list or None
|
|
89
|
+
Tool definitions for the OpenAI request.
|
|
90
|
+
schema : object or None
|
|
91
|
+
Optional response schema configuration.
|
|
92
|
+
output_structure : type[BaseStructure] or None
|
|
93
|
+
Structure type used to parse tool call outputs.
|
|
94
|
+
tool_handlers : dict[str, ToolHandler]
|
|
95
|
+
Mapping of tool names to handler callables.
|
|
96
|
+
process_content : callable, optional
|
|
97
|
+
Callback that cleans input text and extracts attachments.
|
|
98
|
+
module_name : str, optional
|
|
99
|
+
Module name used to build the data path.
|
|
100
|
+
vector_storage_cls : type, optional
|
|
101
|
+
Vector storage class used for file uploads.
|
|
102
|
+
client : OpenAI or None, default=None
|
|
103
|
+
Optional pre-initialized OpenAI client.
|
|
104
|
+
model : str or None, default=None
|
|
105
|
+
Optional OpenAI model name override.
|
|
106
|
+
api_key : str or None, default=None
|
|
107
|
+
Optional OpenAI API key override.
|
|
108
|
+
attachments : tuple or list of tuples, optional
|
|
109
|
+
File attachments in the form ``(file_path, tool_type)``.
|
|
110
|
+
data_path_fn : callable or None, default=None
|
|
111
|
+
Function that maps ``module_name`` to a base data path.
|
|
112
|
+
save_path : Path | str or None, default=None
|
|
113
|
+
Optional path to a directory or file for persisted messages.
|
|
114
|
+
|
|
115
|
+
Raises
|
|
116
|
+
------
|
|
117
|
+
ValueError
|
|
118
|
+
If API key or model is missing.
|
|
119
|
+
RuntimeError
|
|
120
|
+
If the OpenAI client fails to initialize.
|
|
121
|
+
"""
|
|
122
|
+
self._tool_handlers = tool_handlers
|
|
123
|
+
self._process_content = process_content
|
|
124
|
+
self._module_name = module_name
|
|
125
|
+
self._vector_storage_cls = vector_storage_cls
|
|
126
|
+
self._data_path_fn = data_path_fn
|
|
127
|
+
self._save_path = Path(save_path) if save_path is not None else None
|
|
128
|
+
self._instructions = instructions
|
|
129
|
+
self._tools = tools if tools is not None else []
|
|
130
|
+
self._schema = schema
|
|
131
|
+
self._output_structure = output_structure
|
|
132
|
+
self._cleanup_user_vector_storage = False
|
|
133
|
+
self._cleanup_system_vector_storage = False
|
|
134
|
+
|
|
135
|
+
if client is None:
|
|
136
|
+
if api_key is None:
|
|
137
|
+
raise ValueError("OpenAI API key is required")
|
|
138
|
+
try:
|
|
139
|
+
self._client = OpenAI(api_key=api_key)
|
|
140
|
+
except Exception as exc:
|
|
141
|
+
raise RuntimeError("Failed to initialize OpenAI client") from exc
|
|
142
|
+
else:
|
|
143
|
+
self._client = client
|
|
144
|
+
|
|
145
|
+
self._model = model
|
|
146
|
+
if not self._model:
|
|
147
|
+
raise ValueError("OpenAI model is required")
|
|
148
|
+
|
|
149
|
+
self.uuid = uuid.uuid4()
|
|
150
|
+
self.name = self.__class__.__name__.lower()
|
|
151
|
+
|
|
152
|
+
system_content: ResponseInputMessageContentListParam = [
|
|
153
|
+
ResponseInputTextParam(type="input_text", text=instructions)
|
|
154
|
+
]
|
|
155
|
+
|
|
156
|
+
self._system_vector_storage: Optional[Any] = None
|
|
157
|
+
self._user_vector_storage: Optional[Any] = None
|
|
158
|
+
|
|
159
|
+
if attachments:
|
|
160
|
+
if self._vector_storage_cls is None:
|
|
161
|
+
raise RuntimeError("vector_storage_cls is required for attachments.")
|
|
162
|
+
self.file_objects: dict[str, List[str]] = {}
|
|
163
|
+
storage_name = f"{self.__class__.__name__.lower()}_{self.name}_system"
|
|
164
|
+
self._system_vector_storage = self._vector_storage_cls(
|
|
165
|
+
store_name=storage_name, client=self._client, model=self._model
|
|
166
|
+
)
|
|
167
|
+
self._cleanup_system_vector_storage = True
|
|
168
|
+
system_vector_storage = cast(Any, self._system_vector_storage)
|
|
169
|
+
for file_path, tool_type in attachments:
|
|
170
|
+
uploaded_file = system_vector_storage.upload_file(file_path=file_path)
|
|
171
|
+
self.file_objects.setdefault(tool_type, []).append(uploaded_file.id)
|
|
172
|
+
|
|
173
|
+
self.tool_resources = {}
|
|
174
|
+
required_tools = []
|
|
175
|
+
|
|
176
|
+
for tool_type, file_ids in self.file_objects.items():
|
|
177
|
+
required_tools.append({"type": tool_type})
|
|
178
|
+
self.tool_resources[tool_type] = {"file_ids": file_ids}
|
|
179
|
+
if tool_type == "file_search":
|
|
180
|
+
self.tool_resources[tool_type]["vector_store_ids"] = [
|
|
181
|
+
system_vector_storage.id
|
|
182
|
+
]
|
|
183
|
+
|
|
184
|
+
existing_tool_types = {tool["type"] for tool in self._tools}
|
|
185
|
+
for tool in required_tools:
|
|
186
|
+
tool_type = tool["type"]
|
|
187
|
+
if tool_type == "file_search":
|
|
188
|
+
tool["vector_store_ids"] = [system_vector_storage.id]
|
|
189
|
+
if tool_type not in existing_tool_types:
|
|
190
|
+
self._tools.append(tool)
|
|
191
|
+
|
|
192
|
+
self.messages = ResponseMessages()
|
|
193
|
+
self.messages.add_system_message(content=system_content)
|
|
194
|
+
if self._save_path is not None or (
|
|
195
|
+
self._data_path_fn is not None and self._module_name is not None
|
|
196
|
+
):
|
|
197
|
+
self.save()
|
|
198
|
+
|
|
199
|
+
@property
|
|
200
|
+
def data_path(self) -> Path:
|
|
201
|
+
"""Return the directory used to persist artifacts for this session.
|
|
202
|
+
|
|
203
|
+
Returns
|
|
204
|
+
-------
|
|
205
|
+
Path
|
|
206
|
+
Absolute path for persisting response artifacts.
|
|
207
|
+
"""
|
|
208
|
+
if self._data_path_fn is None or self._module_name is None:
|
|
209
|
+
raise RuntimeError(
|
|
210
|
+
"data_path_fn and module_name are required to build data paths."
|
|
211
|
+
)
|
|
212
|
+
base_path = self._data_path_fn(self._module_name)
|
|
213
|
+
return base_path / self.__class__.__name__.lower() / self.name
|
|
214
|
+
|
|
215
|
+
def _build_input(
|
|
216
|
+
self,
|
|
217
|
+
content: Union[str, List[str]],
|
|
218
|
+
attachments: Optional[List[str]] = None,
|
|
219
|
+
) -> None:
|
|
220
|
+
"""Build the list of input messages for the OpenAI request.
|
|
221
|
+
|
|
222
|
+
Parameters
|
|
223
|
+
----------
|
|
224
|
+
content
|
|
225
|
+
String or list of strings to include as user messages.
|
|
226
|
+
attachments
|
|
227
|
+
Optional list of file paths to upload and attach.
|
|
228
|
+
"""
|
|
229
|
+
contents = ensure_list(content)
|
|
230
|
+
|
|
231
|
+
for raw_content in contents:
|
|
232
|
+
if self._process_content is None:
|
|
233
|
+
processed_text, content_attachments = raw_content, []
|
|
234
|
+
else:
|
|
235
|
+
processed_text, content_attachments = self._process_content(raw_content)
|
|
236
|
+
input_content: List[
|
|
237
|
+
Union[ResponseInputTextParam, ResponseInputFileParam]
|
|
238
|
+
] = [ResponseInputTextParam(type="input_text", text=processed_text)]
|
|
239
|
+
|
|
240
|
+
all_attachments = (attachments or []) + content_attachments
|
|
241
|
+
|
|
242
|
+
for file_path in all_attachments:
|
|
243
|
+
if self._user_vector_storage is None:
|
|
244
|
+
if self._vector_storage_cls is None:
|
|
245
|
+
raise RuntimeError(
|
|
246
|
+
"vector_storage_cls is required for attachments."
|
|
247
|
+
)
|
|
248
|
+
store_name = f"{self.__class__.__name__.lower()}_{self.name}_{self.uuid}_user"
|
|
249
|
+
self._user_vector_storage = self._vector_storage_cls(
|
|
250
|
+
store_name=store_name,
|
|
251
|
+
client=self._client,
|
|
252
|
+
model=self._model,
|
|
253
|
+
)
|
|
254
|
+
self._cleanup_user_vector_storage = True
|
|
255
|
+
user_vector_storage = cast(Any, self._user_vector_storage)
|
|
256
|
+
if not any(
|
|
257
|
+
tool.get("type") == "file_search" for tool in self._tools
|
|
258
|
+
):
|
|
259
|
+
self._tools.append(
|
|
260
|
+
{
|
|
261
|
+
"type": "file_search",
|
|
262
|
+
"vector_store_ids": [user_vector_storage.id],
|
|
263
|
+
}
|
|
264
|
+
)
|
|
265
|
+
else:
|
|
266
|
+
for tool in self._tools:
|
|
267
|
+
if tool.get("type") == "file_search":
|
|
268
|
+
if self._system_vector_storage is not None:
|
|
269
|
+
tool["vector_store_ids"] = [
|
|
270
|
+
cast(Any, self._system_vector_storage).id,
|
|
271
|
+
user_vector_storage.id,
|
|
272
|
+
]
|
|
273
|
+
user_vector_storage = cast(Any, self._user_vector_storage)
|
|
274
|
+
uploaded_file = user_vector_storage.upload_file(file_path)
|
|
275
|
+
input_content.append(
|
|
276
|
+
ResponseInputFileParam(type="input_file", file_id=uploaded_file.id)
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
message = cast(
|
|
280
|
+
ResponseInputItemParam,
|
|
281
|
+
{"role": "user", "content": input_content},
|
|
282
|
+
)
|
|
283
|
+
self.messages.add_user_message(message)
|
|
284
|
+
|
|
285
|
+
async def run_async(
|
|
286
|
+
self,
|
|
287
|
+
content: Union[str, List[str]],
|
|
288
|
+
attachments: Optional[Union[str, List[str]]] = None,
|
|
289
|
+
) -> Optional[T]:
|
|
290
|
+
"""Generate a response asynchronously.
|
|
291
|
+
|
|
292
|
+
Parameters
|
|
293
|
+
----------
|
|
294
|
+
content
|
|
295
|
+
Prompt text or list of texts.
|
|
296
|
+
attachments
|
|
297
|
+
Optional file path or list of paths to upload and attach.
|
|
298
|
+
|
|
299
|
+
Returns
|
|
300
|
+
-------
|
|
301
|
+
Optional[T]
|
|
302
|
+
Parsed response object or ``None``.
|
|
303
|
+
|
|
304
|
+
Raises
|
|
305
|
+
------
|
|
306
|
+
RuntimeError
|
|
307
|
+
If the API returns no output or a tool handler errors.
|
|
308
|
+
ValueError
|
|
309
|
+
If no handler is found for a tool invoked by the API.
|
|
310
|
+
"""
|
|
311
|
+
log(f"{self.__class__.__name__}::run_response")
|
|
312
|
+
parsed_result: Optional[T] = None
|
|
313
|
+
|
|
314
|
+
self._build_input(
|
|
315
|
+
content=content,
|
|
316
|
+
attachments=(ensure_list(attachments) if attachments else None),
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
kwargs = {
|
|
320
|
+
"input": self.messages.to_openai_payload(),
|
|
321
|
+
"model": self._model,
|
|
322
|
+
}
|
|
323
|
+
if self._schema is not None:
|
|
324
|
+
kwargs["text"] = self._schema
|
|
325
|
+
|
|
326
|
+
if self._tools:
|
|
327
|
+
kwargs["tools"] = self._tools
|
|
328
|
+
kwargs["tool_choice"] = "auto"
|
|
329
|
+
response = self._client.responses.create(**kwargs)
|
|
330
|
+
|
|
331
|
+
if not response.output:
|
|
332
|
+
log("No output returned from OpenAI.", level=logging.ERROR)
|
|
333
|
+
raise RuntimeError("No output returned from OpenAI.")
|
|
334
|
+
|
|
335
|
+
for response_output in response.output:
|
|
336
|
+
if isinstance(response_output, ResponseFunctionToolCall):
|
|
337
|
+
log(
|
|
338
|
+
f"Tool call detected. Executing {response_output.name}.",
|
|
339
|
+
level=logging.INFO,
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
tool_name = response_output.name
|
|
343
|
+
handler = self._tool_handlers.get(tool_name)
|
|
344
|
+
|
|
345
|
+
if handler is None:
|
|
346
|
+
log(
|
|
347
|
+
f"No handler found for tool '{tool_name}'",
|
|
348
|
+
level=logging.ERROR,
|
|
349
|
+
)
|
|
350
|
+
raise ValueError(f"No handler for tool: {tool_name}")
|
|
351
|
+
|
|
352
|
+
try:
|
|
353
|
+
if inspect.iscoroutinefunction(handler):
|
|
354
|
+
tool_result_json = await handler(response_output)
|
|
355
|
+
else:
|
|
356
|
+
tool_result_json = handler(response_output)
|
|
357
|
+
if isinstance(tool_result_json, str):
|
|
358
|
+
tool_result = json.loads(tool_result_json)
|
|
359
|
+
tool_output = tool_result_json
|
|
360
|
+
else:
|
|
361
|
+
tool_result = tool_result_json
|
|
362
|
+
tool_output = json.dumps(tool_result)
|
|
363
|
+
self.messages.add_tool_message(
|
|
364
|
+
content=response_output, output=tool_output
|
|
365
|
+
)
|
|
366
|
+
self.save()
|
|
367
|
+
except Exception as exc:
|
|
368
|
+
log(
|
|
369
|
+
f"Error executing tool handler '{tool_name}': {exc}",
|
|
370
|
+
level=logging.ERROR,
|
|
371
|
+
)
|
|
372
|
+
raise RuntimeError(f"Error in tool handler '{tool_name}': {exc}")
|
|
373
|
+
|
|
374
|
+
if self._output_structure:
|
|
375
|
+
output_dict = self._output_structure.from_raw_input(tool_result)
|
|
376
|
+
output_dict.console_print()
|
|
377
|
+
parsed_result = output_dict
|
|
378
|
+
else:
|
|
379
|
+
print(tool_result)
|
|
380
|
+
parsed_result = cast(T, tool_result)
|
|
381
|
+
|
|
382
|
+
if isinstance(response_output, ResponseOutputMessage):
|
|
383
|
+
self.messages.add_assistant_message(response_output, kwargs)
|
|
384
|
+
self.save()
|
|
385
|
+
if hasattr(response, "output_text") and response.output_text:
|
|
386
|
+
raw_text = response.output_text
|
|
387
|
+
log("No tool call. Parsing output_text.")
|
|
388
|
+
try:
|
|
389
|
+
output_dict = json.loads(raw_text)
|
|
390
|
+
if self._output_structure and self._schema:
|
|
391
|
+
return self._output_structure.from_raw_input(output_dict)
|
|
392
|
+
return output_dict
|
|
393
|
+
except Exception:
|
|
394
|
+
print(raw_text)
|
|
395
|
+
if parsed_result is not None:
|
|
396
|
+
return parsed_result
|
|
397
|
+
return None
|
|
398
|
+
|
|
399
|
+
def run_sync(
|
|
400
|
+
self,
|
|
401
|
+
content: Union[str, List[str]],
|
|
402
|
+
attachments: Optional[Union[str, List[str]]] = None,
|
|
403
|
+
) -> Optional[T]:
|
|
404
|
+
"""Run :meth:`run_response_async` synchronously."""
|
|
405
|
+
|
|
406
|
+
async def runner() -> Optional[T]:
|
|
407
|
+
return await self.run_async(content=content, attachments=attachments)
|
|
408
|
+
|
|
409
|
+
try:
|
|
410
|
+
asyncio.get_running_loop()
|
|
411
|
+
except RuntimeError:
|
|
412
|
+
return asyncio.run(runner())
|
|
413
|
+
result: Optional[T] = None
|
|
414
|
+
|
|
415
|
+
def _thread_func() -> None:
|
|
416
|
+
nonlocal result
|
|
417
|
+
result = asyncio.run(runner())
|
|
418
|
+
|
|
419
|
+
thread = threading.Thread(target=_thread_func)
|
|
420
|
+
thread.start()
|
|
421
|
+
thread.join()
|
|
422
|
+
return result
|
|
423
|
+
|
|
424
|
+
def run_streamed(
|
|
425
|
+
self,
|
|
426
|
+
content: Union[str, List[str]],
|
|
427
|
+
attachments: Optional[Union[str, List[str]]] = None,
|
|
428
|
+
) -> Optional[T]:
|
|
429
|
+
"""Generate a response asynchronously and return the awaited result.
|
|
430
|
+
|
|
431
|
+
Streaming is not yet supported for responses, so this helper simply
|
|
432
|
+
awaits :meth:`run_async` to mirror the agent API.
|
|
433
|
+
|
|
434
|
+
Parameters
|
|
435
|
+
----------
|
|
436
|
+
content
|
|
437
|
+
Prompt text or list of texts.
|
|
438
|
+
attachments
|
|
439
|
+
Optional file path or list of paths to upload and attach.
|
|
440
|
+
|
|
441
|
+
Returns
|
|
442
|
+
-------
|
|
443
|
+
Optional[T]
|
|
444
|
+
Parsed response object or ``None``.
|
|
445
|
+
"""
|
|
446
|
+
return asyncio.run(self.run_async(content=content, attachments=attachments))
|
|
447
|
+
|
|
448
|
+
def save(self, filepath: Optional[str | Path] = None) -> None:
|
|
449
|
+
"""Serialize the message history to a JSON file."""
|
|
450
|
+
if filepath is not None:
|
|
451
|
+
target = Path(filepath)
|
|
452
|
+
elif self._save_path is not None:
|
|
453
|
+
if self._save_path.suffix == ".json":
|
|
454
|
+
target = self._save_path
|
|
455
|
+
else:
|
|
456
|
+
filename = f"{str(self.uuid).lower()}.json"
|
|
457
|
+
target = self._save_path / filename
|
|
458
|
+
elif self._data_path_fn is not None and self._module_name is not None:
|
|
459
|
+
filename = f"{str(self.uuid).lower()}.json"
|
|
460
|
+
target = self.data_path / filename
|
|
461
|
+
else:
|
|
462
|
+
log(
|
|
463
|
+
"Skipping save: no filepath, save_path, or data_path_fn configured.",
|
|
464
|
+
level=logging.DEBUG,
|
|
465
|
+
)
|
|
466
|
+
return
|
|
467
|
+
|
|
468
|
+
self.messages.to_json_file(str(target))
|
|
469
|
+
log(f"Saved messages to {target}")
|
|
470
|
+
|
|
471
|
+
def __repr__(self) -> str:
|
|
472
|
+
"""Return an unambiguous representation including model and UUID."""
|
|
473
|
+
data_path = None
|
|
474
|
+
if self._data_path_fn is not None and self._module_name is not None:
|
|
475
|
+
data_path = self.data_path
|
|
476
|
+
return (
|
|
477
|
+
f"<{self.__class__.__name__}(model={self._model}, uuid={self.uuid}, "
|
|
478
|
+
f"messages={len(self.messages.messages)}, data_path={data_path}>"
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
def __enter__(self) -> "ResponseBase[T]":
|
|
482
|
+
"""Enter the context manager for this response session."""
|
|
483
|
+
return self
|
|
484
|
+
|
|
485
|
+
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
|
|
486
|
+
"""Exit the context manager and close remote resources."""
|
|
487
|
+
self.close()
|
|
488
|
+
|
|
489
|
+
def close(self) -> None:
|
|
490
|
+
"""Delete managed vector stores and clean up the session."""
|
|
491
|
+
log(f"Closing session {self.uuid} for {self.__class__.__name__}")
|
|
492
|
+
|
|
493
|
+
try:
|
|
494
|
+
if self._user_vector_storage and self._cleanup_user_vector_storage:
|
|
495
|
+
self._user_vector_storage.delete()
|
|
496
|
+
log("User vector store deleted.")
|
|
497
|
+
except Exception as exc:
|
|
498
|
+
log(f"Error deleting user vector store: {exc}", level=logging.WARNING)
|
|
499
|
+
try:
|
|
500
|
+
if self._system_vector_storage and self._cleanup_system_vector_storage:
|
|
501
|
+
self._system_vector_storage.delete()
|
|
502
|
+
log("System vector store deleted.")
|
|
503
|
+
except Exception as exc:
|
|
504
|
+
log(f"Error deleting system vector store: {exc}", level=logging.WARNING)
|
|
505
|
+
log(f"Session {self.uuid} closed.")
|