openai-sdk-helpers 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +34 -0
- openai_sdk_helpers/agent/__init__.py +23 -0
- openai_sdk_helpers/agent/base.py +432 -0
- openai_sdk_helpers/agent/config.py +66 -0
- openai_sdk_helpers/agent/project_manager.py +416 -0
- openai_sdk_helpers/agent/runner.py +117 -0
- openai_sdk_helpers/agent/utils.py +47 -0
- openai_sdk_helpers/agent/vector_search.py +418 -0
- openai_sdk_helpers/agent/web_search.py +404 -0
- openai_sdk_helpers/config.py +141 -0
- openai_sdk_helpers/enums/__init__.py +7 -0
- openai_sdk_helpers/enums/base.py +17 -0
- openai_sdk_helpers/environment.py +27 -0
- openai_sdk_helpers/prompt/__init__.py +77 -0
- openai_sdk_helpers/response/__init__.py +16 -0
- openai_sdk_helpers/response/base.py +477 -0
- openai_sdk_helpers/response/messages.py +211 -0
- openai_sdk_helpers/response/runner.py +42 -0
- openai_sdk_helpers/response/tool_call.py +70 -0
- openai_sdk_helpers/structure/__init__.py +57 -0
- openai_sdk_helpers/structure/base.py +591 -0
- openai_sdk_helpers/structure/plan/__init__.py +13 -0
- openai_sdk_helpers/structure/plan/enum.py +48 -0
- openai_sdk_helpers/structure/plan/plan.py +104 -0
- openai_sdk_helpers/structure/plan/task.py +122 -0
- openai_sdk_helpers/structure/prompt.py +24 -0
- openai_sdk_helpers/structure/responses.py +148 -0
- openai_sdk_helpers/structure/summary.py +65 -0
- openai_sdk_helpers/structure/vector_search.py +82 -0
- openai_sdk_helpers/structure/web_search.py +46 -0
- openai_sdk_helpers/utils/__init__.py +13 -0
- openai_sdk_helpers/utils/core.py +208 -0
- openai_sdk_helpers/vector_storage/__init__.py +15 -0
- openai_sdk_helpers/vector_storage/cleanup.py +91 -0
- openai_sdk_helpers/vector_storage/storage.py +501 -0
- openai_sdk_helpers/vector_storage/types.py +58 -0
- openai_sdk_helpers-0.0.2.dist-info/METADATA +137 -0
- openai_sdk_helpers-0.0.2.dist-info/RECORD +40 -0
- openai_sdk_helpers-0.0.2.dist-info/WHEEL +4 -0
- openai_sdk_helpers-0.0.2.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,477 @@
|
|
|
1
|
+
"""Base response handling for OpenAI interactions."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import inspect
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import threading
|
|
10
|
+
import uuid
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import (
|
|
13
|
+
Any,
|
|
14
|
+
Callable,
|
|
15
|
+
Generic,
|
|
16
|
+
List,
|
|
17
|
+
Optional,
|
|
18
|
+
Tuple,
|
|
19
|
+
Type,
|
|
20
|
+
TypeVar,
|
|
21
|
+
Union,
|
|
22
|
+
cast,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
from openai import OpenAI
|
|
26
|
+
from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
|
|
27
|
+
from openai.types.responses.response_input_file_param import ResponseInputFileParam
|
|
28
|
+
from openai.types.responses.response_input_message_content_list_param import (
|
|
29
|
+
ResponseInputMessageContentListParam,
|
|
30
|
+
)
|
|
31
|
+
from openai.types.responses.response_input_param import ResponseInputItemParam
|
|
32
|
+
from openai.types.responses.response_input_text_param import ResponseInputTextParam
|
|
33
|
+
from openai.types.responses.response_output_message import ResponseOutputMessage
|
|
34
|
+
|
|
35
|
+
from .messages import ResponseMessages
|
|
36
|
+
from ..structure import BaseStructure
|
|
37
|
+
from ..utils import ensure_list, log
|
|
38
|
+
|
|
39
|
+
T = TypeVar("T", bound=BaseStructure)
|
|
40
|
+
ToolHandler = Callable[[ResponseFunctionToolCall], Union[str, Any]]
|
|
41
|
+
ProcessContent = Callable[[str], Tuple[str, List[str]]]
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class ResponseBase(Generic[T]):
|
|
45
|
+
"""Manage OpenAI interactions for structured responses.
|
|
46
|
+
|
|
47
|
+
This base class handles input construction, OpenAI requests, tool calls,
|
|
48
|
+
and optional parsing into structured output models.
|
|
49
|
+
|
|
50
|
+
Methods
|
|
51
|
+
-------
|
|
52
|
+
generate_response_async(content, attachments)
|
|
53
|
+
Generate a response asynchronously and return parsed output.
|
|
54
|
+
generate_response(content, attachments)
|
|
55
|
+
Synchronous wrapper around ``generate_response_async``.
|
|
56
|
+
save(filepath)
|
|
57
|
+
Serialize the message history to disk.
|
|
58
|
+
close()
|
|
59
|
+
Clean up remote resources (vector stores).
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
def __init__(
|
|
63
|
+
self,
|
|
64
|
+
*,
|
|
65
|
+
instructions: str,
|
|
66
|
+
tools: Optional[list],
|
|
67
|
+
schema: Optional[Any],
|
|
68
|
+
output_structure: Optional[Type[T]],
|
|
69
|
+
tool_handlers: dict[str, ToolHandler],
|
|
70
|
+
process_content: Optional[ProcessContent] = None,
|
|
71
|
+
module_name: Optional[str] = None,
|
|
72
|
+
vector_storage_cls: Optional[type] = None,
|
|
73
|
+
client: Optional[OpenAI] = None,
|
|
74
|
+
model: Optional[str] = None,
|
|
75
|
+
api_key: Optional[str] = None,
|
|
76
|
+
attachments: Optional[Union[Tuple[str, str], list[Tuple[str, str]]]] = None,
|
|
77
|
+
data_path_fn: Optional[Callable[[str], Path]] = None,
|
|
78
|
+
save_path: Optional[Path | str] = None,
|
|
79
|
+
) -> None:
|
|
80
|
+
"""Initialize a response session.
|
|
81
|
+
|
|
82
|
+
Parameters
|
|
83
|
+
----------
|
|
84
|
+
instructions : str
|
|
85
|
+
System instructions for the OpenAI response.
|
|
86
|
+
tools : list or None
|
|
87
|
+
Tool definitions for the OpenAI request.
|
|
88
|
+
schema : object or None
|
|
89
|
+
Optional response schema configuration.
|
|
90
|
+
output_structure : type[BaseStructure] or None
|
|
91
|
+
Structure type used to parse tool call outputs.
|
|
92
|
+
tool_handlers : dict[str, ToolHandler]
|
|
93
|
+
Mapping of tool names to handler callables.
|
|
94
|
+
process_content : callable, optional
|
|
95
|
+
Callback that cleans input text and extracts attachments.
|
|
96
|
+
module_name : str, optional
|
|
97
|
+
Module name used to build the data path.
|
|
98
|
+
vector_storage_cls : type, optional
|
|
99
|
+
Vector storage class used for file uploads.
|
|
100
|
+
client : OpenAI or None, default=None
|
|
101
|
+
Optional pre-initialized OpenAI client.
|
|
102
|
+
model : str or None, default=None
|
|
103
|
+
Optional OpenAI model name override.
|
|
104
|
+
api_key : str or None, default=None
|
|
105
|
+
Optional OpenAI API key override.
|
|
106
|
+
attachments : tuple or list of tuples, optional
|
|
107
|
+
File attachments in the form ``(file_path, tool_type)``.
|
|
108
|
+
data_path_fn : callable or None, default=None
|
|
109
|
+
Function that maps ``module_name`` to a base data path.
|
|
110
|
+
save_path : Path | str or None, default=None
|
|
111
|
+
Optional path to a directory or file for persisted messages.
|
|
112
|
+
|
|
113
|
+
Raises
|
|
114
|
+
------
|
|
115
|
+
ValueError
|
|
116
|
+
If API key or model is missing.
|
|
117
|
+
RuntimeError
|
|
118
|
+
If the OpenAI client fails to initialize.
|
|
119
|
+
"""
|
|
120
|
+
self._tool_handlers = tool_handlers
|
|
121
|
+
self._process_content = process_content
|
|
122
|
+
self._module_name = module_name
|
|
123
|
+
self._vector_storage_cls = vector_storage_cls
|
|
124
|
+
self._data_path_fn = data_path_fn
|
|
125
|
+
self._save_path = Path(save_path) if save_path is not None else None
|
|
126
|
+
self._instructions = instructions
|
|
127
|
+
self._tools = tools if tools is not None else []
|
|
128
|
+
self._schema = schema
|
|
129
|
+
self._output_structure = output_structure
|
|
130
|
+
|
|
131
|
+
if client is None:
|
|
132
|
+
if api_key is None:
|
|
133
|
+
raise ValueError("OpenAI API key is required")
|
|
134
|
+
try:
|
|
135
|
+
self._client = OpenAI(api_key=api_key)
|
|
136
|
+
except Exception as exc:
|
|
137
|
+
raise RuntimeError("Failed to initialize OpenAI client") from exc
|
|
138
|
+
else:
|
|
139
|
+
self._client = client
|
|
140
|
+
|
|
141
|
+
self._model = model
|
|
142
|
+
if not self._model:
|
|
143
|
+
raise ValueError("OpenAI model is required")
|
|
144
|
+
|
|
145
|
+
self.uuid = uuid.uuid4()
|
|
146
|
+
self.name = self.__class__.__name__.lower()
|
|
147
|
+
|
|
148
|
+
system_content: ResponseInputMessageContentListParam = [
|
|
149
|
+
ResponseInputTextParam(type="input_text", text=instructions)
|
|
150
|
+
]
|
|
151
|
+
|
|
152
|
+
self._system_vector_storage: Optional[Any] = None
|
|
153
|
+
self._user_vector_storage: Optional[Any] = None
|
|
154
|
+
|
|
155
|
+
if attachments:
|
|
156
|
+
if self._vector_storage_cls is None:
|
|
157
|
+
raise RuntimeError("vector_storage_cls is required for attachments.")
|
|
158
|
+
self.file_objects: dict[str, List[str]] = {}
|
|
159
|
+
storage_name = f"{self.__class__.__name__.lower()}_{self.name}_system"
|
|
160
|
+
self._system_vector_storage = self._vector_storage_cls(
|
|
161
|
+
store_name=storage_name, client=self._client, model=self._model
|
|
162
|
+
)
|
|
163
|
+
system_vector_storage = cast(Any, self._system_vector_storage)
|
|
164
|
+
for file_path, tool_type in attachments:
|
|
165
|
+
uploaded_file = system_vector_storage.upload_file(file_path=file_path)
|
|
166
|
+
self.file_objects.setdefault(tool_type, []).append(uploaded_file.id)
|
|
167
|
+
|
|
168
|
+
self.tool_resources = {}
|
|
169
|
+
required_tools = []
|
|
170
|
+
|
|
171
|
+
for tool_type, file_ids in self.file_objects.items():
|
|
172
|
+
required_tools.append({"type": tool_type})
|
|
173
|
+
self.tool_resources[tool_type] = {"file_ids": file_ids}
|
|
174
|
+
if tool_type == "file_search":
|
|
175
|
+
self.tool_resources[tool_type]["vector_store_ids"] = [
|
|
176
|
+
system_vector_storage.id
|
|
177
|
+
]
|
|
178
|
+
|
|
179
|
+
existing_tool_types = {tool["type"] for tool in self._tools}
|
|
180
|
+
for tool in required_tools:
|
|
181
|
+
tool_type = tool["type"]
|
|
182
|
+
if tool_type == "file_search":
|
|
183
|
+
tool["vector_store_ids"] = [system_vector_storage.id]
|
|
184
|
+
if tool_type not in existing_tool_types:
|
|
185
|
+
self._tools.append(tool)
|
|
186
|
+
|
|
187
|
+
self.messages = ResponseMessages()
|
|
188
|
+
self.messages.add_system_message(content=system_content)
|
|
189
|
+
if self._save_path is not None or (
|
|
190
|
+
self._data_path_fn is not None and self._module_name is not None
|
|
191
|
+
):
|
|
192
|
+
self.save()
|
|
193
|
+
|
|
194
|
+
@property
|
|
195
|
+
def data_path(self) -> Path:
|
|
196
|
+
"""Return the directory used to persist artifacts for this session.
|
|
197
|
+
|
|
198
|
+
Returns
|
|
199
|
+
-------
|
|
200
|
+
Path
|
|
201
|
+
Absolute path for persisting response artifacts.
|
|
202
|
+
"""
|
|
203
|
+
if self._data_path_fn is None or self._module_name is None:
|
|
204
|
+
raise RuntimeError(
|
|
205
|
+
"data_path_fn and module_name are required to build data paths."
|
|
206
|
+
)
|
|
207
|
+
base_path = self._data_path_fn(self._module_name)
|
|
208
|
+
return base_path / self.__class__.__name__.lower() / self.name
|
|
209
|
+
|
|
210
|
+
def _build_input(
|
|
211
|
+
self,
|
|
212
|
+
content: Union[str, List[str]],
|
|
213
|
+
attachments: Optional[List[str]] = None,
|
|
214
|
+
) -> None:
|
|
215
|
+
"""Build the list of input messages for the OpenAI request.
|
|
216
|
+
|
|
217
|
+
Parameters
|
|
218
|
+
----------
|
|
219
|
+
content
|
|
220
|
+
String or list of strings to include as user messages.
|
|
221
|
+
attachments
|
|
222
|
+
Optional list of file paths to upload and attach.
|
|
223
|
+
"""
|
|
224
|
+
contents = ensure_list(content)
|
|
225
|
+
|
|
226
|
+
for raw_content in contents:
|
|
227
|
+
if self._process_content is None:
|
|
228
|
+
processed_text, content_attachments = raw_content, []
|
|
229
|
+
else:
|
|
230
|
+
processed_text, content_attachments = self._process_content(raw_content)
|
|
231
|
+
input_content: List[
|
|
232
|
+
Union[ResponseInputTextParam, ResponseInputFileParam]
|
|
233
|
+
] = [ResponseInputTextParam(type="input_text", text=processed_text)]
|
|
234
|
+
|
|
235
|
+
all_attachments = (attachments or []) + content_attachments
|
|
236
|
+
|
|
237
|
+
for file_path in all_attachments:
|
|
238
|
+
if self._user_vector_storage is None:
|
|
239
|
+
if self._vector_storage_cls is None:
|
|
240
|
+
raise RuntimeError(
|
|
241
|
+
"vector_storage_cls is required for attachments."
|
|
242
|
+
)
|
|
243
|
+
store_name = f"{self.__class__.__name__.lower()}_{self.name}_{self.uuid}_user"
|
|
244
|
+
self._user_vector_storage = self._vector_storage_cls(
|
|
245
|
+
store_name=store_name,
|
|
246
|
+
client=self._client,
|
|
247
|
+
model=self._model,
|
|
248
|
+
)
|
|
249
|
+
user_vector_storage = cast(Any, self._user_vector_storage)
|
|
250
|
+
if not any(
|
|
251
|
+
tool.get("type") == "file_search" for tool in self._tools
|
|
252
|
+
):
|
|
253
|
+
self._tools.append(
|
|
254
|
+
{
|
|
255
|
+
"type": "file_search",
|
|
256
|
+
"vector_store_ids": [user_vector_storage.id],
|
|
257
|
+
}
|
|
258
|
+
)
|
|
259
|
+
else:
|
|
260
|
+
for tool in self._tools:
|
|
261
|
+
if tool.get("type") == "file_search":
|
|
262
|
+
if self._system_vector_storage is not None:
|
|
263
|
+
tool["vector_store_ids"] = [
|
|
264
|
+
cast(Any, self._system_vector_storage).id,
|
|
265
|
+
user_vector_storage.id,
|
|
266
|
+
]
|
|
267
|
+
user_vector_storage = cast(Any, self._user_vector_storage)
|
|
268
|
+
uploaded_file = user_vector_storage.upload_file(file_path)
|
|
269
|
+
input_content.append(
|
|
270
|
+
ResponseInputFileParam(type="input_file", file_id=uploaded_file.id)
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
message = cast(
|
|
274
|
+
ResponseInputItemParam,
|
|
275
|
+
{"role": "user", "content": input_content},
|
|
276
|
+
)
|
|
277
|
+
self.messages.add_user_message(message)
|
|
278
|
+
|
|
279
|
+
async def generate_response_async(
|
|
280
|
+
self,
|
|
281
|
+
content: Union[str, List[str]],
|
|
282
|
+
attachments: Optional[Union[str, List[str]]] = None,
|
|
283
|
+
) -> Optional[T]:
|
|
284
|
+
"""Generate a response asynchronously.
|
|
285
|
+
|
|
286
|
+
Parameters
|
|
287
|
+
----------
|
|
288
|
+
content
|
|
289
|
+
Prompt text or list of texts.
|
|
290
|
+
attachments
|
|
291
|
+
Optional file path or list of paths to upload and attach.
|
|
292
|
+
|
|
293
|
+
Returns
|
|
294
|
+
-------
|
|
295
|
+
Optional[T]
|
|
296
|
+
Parsed response object or ``None``.
|
|
297
|
+
|
|
298
|
+
Raises
|
|
299
|
+
------
|
|
300
|
+
RuntimeError
|
|
301
|
+
If the API returns no output or a tool handler errors.
|
|
302
|
+
ValueError
|
|
303
|
+
If no handler is found for a tool invoked by the API.
|
|
304
|
+
"""
|
|
305
|
+
log(f"{self.__class__.__name__}::generate_response")
|
|
306
|
+
parsed_result: Optional[T] = None
|
|
307
|
+
|
|
308
|
+
self._build_input(
|
|
309
|
+
content=content,
|
|
310
|
+
attachments=(ensure_list(attachments) if attachments else None),
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
kwargs = {
|
|
314
|
+
"input": self.messages.to_openai_payload(),
|
|
315
|
+
"model": self._model,
|
|
316
|
+
}
|
|
317
|
+
if self._schema is not None:
|
|
318
|
+
kwargs["text"] = self._schema
|
|
319
|
+
|
|
320
|
+
if self._tools:
|
|
321
|
+
kwargs["tools"] = self._tools
|
|
322
|
+
kwargs["tool_choice"] = "auto"
|
|
323
|
+
response = self._client.responses.create(**kwargs)
|
|
324
|
+
|
|
325
|
+
if not response.output:
|
|
326
|
+
log("No output returned from OpenAI.", level=logging.ERROR)
|
|
327
|
+
raise RuntimeError("No output returned from OpenAI.")
|
|
328
|
+
|
|
329
|
+
for response_output in response.output:
|
|
330
|
+
if isinstance(response_output, ResponseFunctionToolCall):
|
|
331
|
+
log(
|
|
332
|
+
f"Tool call detected. Executing {response_output.name}.",
|
|
333
|
+
level=logging.INFO,
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
tool_name = response_output.name
|
|
337
|
+
handler = self._tool_handlers.get(tool_name)
|
|
338
|
+
|
|
339
|
+
if handler is None:
|
|
340
|
+
log(
|
|
341
|
+
f"No handler found for tool '{tool_name}'",
|
|
342
|
+
level=logging.ERROR,
|
|
343
|
+
)
|
|
344
|
+
raise ValueError(f"No handler for tool: {tool_name}")
|
|
345
|
+
|
|
346
|
+
try:
|
|
347
|
+
if inspect.iscoroutinefunction(handler):
|
|
348
|
+
tool_result_json = await handler(response_output)
|
|
349
|
+
else:
|
|
350
|
+
tool_result_json = handler(response_output)
|
|
351
|
+
if isinstance(tool_result_json, str):
|
|
352
|
+
tool_result = json.loads(tool_result_json)
|
|
353
|
+
tool_output = tool_result_json
|
|
354
|
+
else:
|
|
355
|
+
tool_result = tool_result_json
|
|
356
|
+
tool_output = json.dumps(tool_result)
|
|
357
|
+
self.messages.add_tool_message(
|
|
358
|
+
content=response_output, output=tool_output
|
|
359
|
+
)
|
|
360
|
+
self.save()
|
|
361
|
+
except Exception as exc:
|
|
362
|
+
log(
|
|
363
|
+
f"Error executing tool handler '{tool_name}': {exc}",
|
|
364
|
+
level=logging.ERROR,
|
|
365
|
+
)
|
|
366
|
+
raise RuntimeError(f"Error in tool handler '{tool_name}': {exc}")
|
|
367
|
+
|
|
368
|
+
if self._output_structure:
|
|
369
|
+
output_dict = self._output_structure.from_raw_input(tool_result)
|
|
370
|
+
output_dict.console_print()
|
|
371
|
+
parsed_result = output_dict
|
|
372
|
+
else:
|
|
373
|
+
print(tool_result)
|
|
374
|
+
parsed_result = cast(T, tool_result)
|
|
375
|
+
|
|
376
|
+
if isinstance(response_output, ResponseOutputMessage):
|
|
377
|
+
self.messages.add_assistant_message(response_output, kwargs)
|
|
378
|
+
self.save()
|
|
379
|
+
if hasattr(response, "output_text") and response.output_text:
|
|
380
|
+
raw_text = response.output_text
|
|
381
|
+
log("No tool call. Parsing output_text.")
|
|
382
|
+
try:
|
|
383
|
+
output_dict = json.loads(raw_text)
|
|
384
|
+
if self._output_structure and self._schema:
|
|
385
|
+
return self._output_structure.from_raw_input(output_dict)
|
|
386
|
+
return output_dict
|
|
387
|
+
except Exception:
|
|
388
|
+
print(raw_text)
|
|
389
|
+
if parsed_result is not None:
|
|
390
|
+
return parsed_result
|
|
391
|
+
return None
|
|
392
|
+
|
|
393
|
+
def generate_response(
|
|
394
|
+
self,
|
|
395
|
+
content: Union[str, List[str]],
|
|
396
|
+
attachments: Optional[Union[str, List[str]]] = None,
|
|
397
|
+
) -> Optional[T]:
|
|
398
|
+
"""Run :meth:`generate_response_async` synchronously."""
|
|
399
|
+
|
|
400
|
+
async def runner() -> Optional[T]:
|
|
401
|
+
return await self.generate_response_async(
|
|
402
|
+
content=content, attachments=attachments
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
try:
|
|
406
|
+
asyncio.get_running_loop()
|
|
407
|
+
except RuntimeError:
|
|
408
|
+
return asyncio.run(runner())
|
|
409
|
+
result: Optional[T] = None
|
|
410
|
+
|
|
411
|
+
def _thread_func() -> None:
|
|
412
|
+
nonlocal result
|
|
413
|
+
result = asyncio.run(runner())
|
|
414
|
+
|
|
415
|
+
thread = threading.Thread(target=_thread_func)
|
|
416
|
+
thread.start()
|
|
417
|
+
thread.join()
|
|
418
|
+
return result
|
|
419
|
+
|
|
420
|
+
def save(self, filepath: Optional[str | Path] = None) -> None:
|
|
421
|
+
"""Serialize the message history to a JSON file."""
|
|
422
|
+
if filepath is not None:
|
|
423
|
+
target = Path(filepath)
|
|
424
|
+
elif self._save_path is not None:
|
|
425
|
+
if self._save_path.suffix == ".json":
|
|
426
|
+
target = self._save_path
|
|
427
|
+
else:
|
|
428
|
+
filename = f"{str(self.uuid).lower()}.json"
|
|
429
|
+
target = self._save_path / filename
|
|
430
|
+
elif self._data_path_fn is not None and self._module_name is not None:
|
|
431
|
+
filename = f"{str(self.uuid).lower()}.json"
|
|
432
|
+
target = self.data_path / filename
|
|
433
|
+
else:
|
|
434
|
+
log(
|
|
435
|
+
"Skipping save: no filepath, save_path, or data_path_fn configured.",
|
|
436
|
+
level=logging.DEBUG,
|
|
437
|
+
)
|
|
438
|
+
return
|
|
439
|
+
|
|
440
|
+
self.messages.to_json_file(str(target))
|
|
441
|
+
log(f"Saved messages to {target}")
|
|
442
|
+
|
|
443
|
+
def __repr__(self) -> str:
|
|
444
|
+
"""Return an unambiguous representation including model and UUID."""
|
|
445
|
+
data_path = None
|
|
446
|
+
if self._data_path_fn is not None and self._module_name is not None:
|
|
447
|
+
data_path = self.data_path
|
|
448
|
+
return (
|
|
449
|
+
f"<{self.__class__.__name__}(model={self._model}, uuid={self.uuid}, "
|
|
450
|
+
f"messages={len(self.messages.messages)}, data_path={data_path}>"
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
def __enter__(self) -> "ResponseBase[T]":
|
|
454
|
+
"""Enter the context manager for this response session."""
|
|
455
|
+
return self
|
|
456
|
+
|
|
457
|
+
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
|
|
458
|
+
"""Exit the context manager and close remote resources."""
|
|
459
|
+
self.close()
|
|
460
|
+
|
|
461
|
+
def close(self) -> None:
|
|
462
|
+
"""Delete remote vector stores and clean up the session."""
|
|
463
|
+
log(f"Closing session {self.uuid} for {self.__class__.__name__}")
|
|
464
|
+
|
|
465
|
+
try:
|
|
466
|
+
if self._user_vector_storage:
|
|
467
|
+
self._user_vector_storage.delete()
|
|
468
|
+
log("User vector store deleted.")
|
|
469
|
+
except Exception as exc:
|
|
470
|
+
log(f"Error deleting user vector store: {exc}", level=logging.WARNING)
|
|
471
|
+
try:
|
|
472
|
+
if self._system_vector_storage:
|
|
473
|
+
self._system_vector_storage.delete()
|
|
474
|
+
log("System vector store deleted.")
|
|
475
|
+
except Exception as exc:
|
|
476
|
+
log(f"Error deleting system vector store: {exc}", level=logging.WARNING)
|
|
477
|
+
log(f"Session {self.uuid} closed.")
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
"""Message containers for shared OpenAI responses."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from datetime import datetime, timezone
|
|
7
|
+
from typing import Dict, List, Union, cast
|
|
8
|
+
|
|
9
|
+
from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
|
|
10
|
+
from openai.types.responses.response_function_tool_call_param import (
|
|
11
|
+
ResponseFunctionToolCallParam,
|
|
12
|
+
)
|
|
13
|
+
from openai.types.responses.response_input_message_content_list_param import (
|
|
14
|
+
ResponseInputMessageContentListParam,
|
|
15
|
+
)
|
|
16
|
+
from openai.types.responses.response_input_param import (
|
|
17
|
+
FunctionCallOutput,
|
|
18
|
+
ResponseInputItemParam,
|
|
19
|
+
)
|
|
20
|
+
from openai.types.responses.response_output_message import ResponseOutputMessage
|
|
21
|
+
|
|
22
|
+
from ..utils import JSONSerializable
|
|
23
|
+
from .tool_call import ResponseToolCall
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class ResponseMessage(JSONSerializable):
|
|
28
|
+
"""Single message exchanged with the OpenAI client.
|
|
29
|
+
|
|
30
|
+
Methods
|
|
31
|
+
-------
|
|
32
|
+
to_openai_format()
|
|
33
|
+
Return the payload in the format expected by the OpenAI client.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
role: str # "user", "assistant", "tool", etc.
|
|
37
|
+
content: (
|
|
38
|
+
ResponseInputItemParam
|
|
39
|
+
| ResponseOutputMessage
|
|
40
|
+
| ResponseFunctionToolCallParam
|
|
41
|
+
| FunctionCallOutput
|
|
42
|
+
| ResponseInputMessageContentListParam
|
|
43
|
+
)
|
|
44
|
+
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
45
|
+
metadata: Dict[str, Union[str, float, bool]] = field(default_factory=dict)
|
|
46
|
+
|
|
47
|
+
def to_openai_format(
|
|
48
|
+
self,
|
|
49
|
+
) -> (
|
|
50
|
+
ResponseInputItemParam
|
|
51
|
+
| ResponseOutputMessage
|
|
52
|
+
| ResponseFunctionToolCallParam
|
|
53
|
+
| FunctionCallOutput
|
|
54
|
+
| ResponseInputMessageContentListParam
|
|
55
|
+
):
|
|
56
|
+
"""Return the message in the format expected by the OpenAI client.
|
|
57
|
+
|
|
58
|
+
Returns
|
|
59
|
+
-------
|
|
60
|
+
ResponseInputItemParam | ResponseOutputMessage | ResponseFunctionToolCallParam | FunctionCallOutput | ResponseInputMessageContentListParam
|
|
61
|
+
Stored message content in OpenAI format.
|
|
62
|
+
"""
|
|
63
|
+
return self.content
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclass
|
|
67
|
+
class ResponseMessages(JSONSerializable):
|
|
68
|
+
"""Represent a collection of messages in a response.
|
|
69
|
+
|
|
70
|
+
This dataclass encapsulates user inputs and assistant outputs during an
|
|
71
|
+
OpenAI API interaction.
|
|
72
|
+
|
|
73
|
+
Methods
|
|
74
|
+
-------
|
|
75
|
+
add_system_message(content, **metadata)
|
|
76
|
+
Append a system message to the conversation.
|
|
77
|
+
add_user_message(input_content, **metadata)
|
|
78
|
+
Append a user message to the conversation.
|
|
79
|
+
add_assistant_message(content, metadata)
|
|
80
|
+
Append an assistant message to the conversation.
|
|
81
|
+
add_tool_message(content, output, **metadata)
|
|
82
|
+
Record a tool call and its output.
|
|
83
|
+
to_openai_payload()
|
|
84
|
+
Convert stored messages to the OpenAI input payload.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
messages: List[ResponseMessage] = field(default_factory=list)
|
|
88
|
+
|
|
89
|
+
def add_system_message(
|
|
90
|
+
self, content: ResponseInputMessageContentListParam, **metadata
|
|
91
|
+
) -> None:
|
|
92
|
+
"""Append a system message to the conversation.
|
|
93
|
+
|
|
94
|
+
Parameters
|
|
95
|
+
----------
|
|
96
|
+
content : ResponseInputMessageContentListParam
|
|
97
|
+
System message content in OpenAI format.
|
|
98
|
+
**metadata
|
|
99
|
+
Optional metadata to store with the message.
|
|
100
|
+
|
|
101
|
+
Returns
|
|
102
|
+
-------
|
|
103
|
+
None
|
|
104
|
+
"""
|
|
105
|
+
response_input = cast(
|
|
106
|
+
ResponseInputItemParam, {"role": "system", "content": content}
|
|
107
|
+
)
|
|
108
|
+
self.messages.append(
|
|
109
|
+
ResponseMessage(role="system", content=response_input, metadata=metadata)
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def add_user_message(
|
|
113
|
+
self, input_content: ResponseInputItemParam, **metadata
|
|
114
|
+
) -> None:
|
|
115
|
+
"""Append a user message to the conversation.
|
|
116
|
+
|
|
117
|
+
Parameters
|
|
118
|
+
----------
|
|
119
|
+
input_content : ResponseInputItemParam
|
|
120
|
+
Message payload supplied by the user.
|
|
121
|
+
**metadata
|
|
122
|
+
Optional metadata to store with the message.
|
|
123
|
+
|
|
124
|
+
Returns
|
|
125
|
+
-------
|
|
126
|
+
None
|
|
127
|
+
"""
|
|
128
|
+
self.messages.append(
|
|
129
|
+
ResponseMessage(role="user", content=input_content, metadata=metadata)
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
def add_assistant_message(
|
|
133
|
+
self,
|
|
134
|
+
content: ResponseOutputMessage,
|
|
135
|
+
metadata: Dict[str, Union[str, float, bool]],
|
|
136
|
+
) -> None:
|
|
137
|
+
"""Append an assistant message to the conversation.
|
|
138
|
+
|
|
139
|
+
Parameters
|
|
140
|
+
----------
|
|
141
|
+
content : ResponseOutputMessage
|
|
142
|
+
Assistant response message.
|
|
143
|
+
metadata : dict[str, Union[str, float, bool]]
|
|
144
|
+
Optional metadata to store with the message.
|
|
145
|
+
|
|
146
|
+
Returns
|
|
147
|
+
-------
|
|
148
|
+
None
|
|
149
|
+
"""
|
|
150
|
+
self.messages.append(
|
|
151
|
+
ResponseMessage(role="assistant", content=content, metadata=metadata)
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
def add_tool_message(
|
|
155
|
+
self, content: ResponseFunctionToolCall, output: str, **metadata
|
|
156
|
+
) -> None:
|
|
157
|
+
"""Record a tool call and its output in the conversation history.
|
|
158
|
+
|
|
159
|
+
Parameters
|
|
160
|
+
----------
|
|
161
|
+
content : ResponseFunctionToolCall
|
|
162
|
+
Tool call received from OpenAI.
|
|
163
|
+
output : str
|
|
164
|
+
JSON string returned by the executed tool.
|
|
165
|
+
**metadata
|
|
166
|
+
Optional metadata to store with the message.
|
|
167
|
+
|
|
168
|
+
Returns
|
|
169
|
+
-------
|
|
170
|
+
None
|
|
171
|
+
"""
|
|
172
|
+
tool_call = ResponseToolCall(
|
|
173
|
+
call_id=content.call_id,
|
|
174
|
+
name=content.name,
|
|
175
|
+
arguments=content.arguments,
|
|
176
|
+
output=output,
|
|
177
|
+
)
|
|
178
|
+
function_call, function_call_output = tool_call.to_response_input_item_param()
|
|
179
|
+
self.messages.append(
|
|
180
|
+
ResponseMessage(role="tool", content=function_call, metadata=metadata)
|
|
181
|
+
)
|
|
182
|
+
self.messages.append(
|
|
183
|
+
ResponseMessage(
|
|
184
|
+
role="tool", content=function_call_output, metadata=metadata
|
|
185
|
+
)
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
def to_openai_payload(
|
|
189
|
+
self,
|
|
190
|
+
) -> List[
|
|
191
|
+
ResponseInputItemParam
|
|
192
|
+
| ResponseOutputMessage
|
|
193
|
+
| ResponseFunctionToolCallParam
|
|
194
|
+
| FunctionCallOutput
|
|
195
|
+
| ResponseInputMessageContentListParam
|
|
196
|
+
]:
|
|
197
|
+
"""Convert stored messages to the input payload expected by OpenAI.
|
|
198
|
+
|
|
199
|
+
Notes
|
|
200
|
+
-----
|
|
201
|
+
Assistant messages are model outputs and are not included in the
|
|
202
|
+
next request's input payload.
|
|
203
|
+
|
|
204
|
+
Returns
|
|
205
|
+
-------
|
|
206
|
+
list
|
|
207
|
+
List of message payloads excluding assistant outputs.
|
|
208
|
+
"""
|
|
209
|
+
return [
|
|
210
|
+
msg.to_openai_format() for msg in self.messages if msg.role != "assistant"
|
|
211
|
+
]
|