openai-sdk-helpers 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. openai_sdk_helpers/__init__.py +44 -7
  2. openai_sdk_helpers/agent/base.py +5 -1
  3. openai_sdk_helpers/agent/coordination.py +4 -5
  4. openai_sdk_helpers/agent/runner.py +4 -1
  5. openai_sdk_helpers/agent/search/base.py +1 -0
  6. openai_sdk_helpers/agent/search/vector.py +2 -0
  7. openai_sdk_helpers/cli.py +265 -0
  8. openai_sdk_helpers/config.py +93 -2
  9. openai_sdk_helpers/context_manager.py +1 -1
  10. openai_sdk_helpers/deprecation.py +167 -0
  11. openai_sdk_helpers/environment.py +3 -2
  12. openai_sdk_helpers/errors.py +0 -12
  13. openai_sdk_helpers/files_api.py +373 -0
  14. openai_sdk_helpers/logging_config.py +24 -95
  15. openai_sdk_helpers/prompt/base.py +1 -1
  16. openai_sdk_helpers/response/__init__.py +7 -3
  17. openai_sdk_helpers/response/base.py +217 -147
  18. openai_sdk_helpers/response/config.py +16 -1
  19. openai_sdk_helpers/response/files.py +392 -0
  20. openai_sdk_helpers/response/messages.py +1 -0
  21. openai_sdk_helpers/retry.py +1 -1
  22. openai_sdk_helpers/streamlit_app/app.py +97 -7
  23. openai_sdk_helpers/streamlit_app/streamlit_web_search.py +15 -8
  24. openai_sdk_helpers/structure/base.py +6 -6
  25. openai_sdk_helpers/structure/plan/helpers.py +1 -0
  26. openai_sdk_helpers/structure/plan/task.py +7 -7
  27. openai_sdk_helpers/tools.py +116 -13
  28. openai_sdk_helpers/utils/__init__.py +100 -35
  29. openai_sdk_helpers/{async_utils.py → utils/async_utils.py} +5 -6
  30. openai_sdk_helpers/utils/coercion.py +138 -0
  31. openai_sdk_helpers/utils/deprecation.py +167 -0
  32. openai_sdk_helpers/utils/encoding.py +189 -0
  33. openai_sdk_helpers/utils/json_utils.py +98 -0
  34. openai_sdk_helpers/utils/output_validation.py +448 -0
  35. openai_sdk_helpers/utils/path_utils.py +46 -0
  36. openai_sdk_helpers/{validation.py → utils/validation.py} +7 -3
  37. openai_sdk_helpers/vector_storage/storage.py +59 -28
  38. {openai_sdk_helpers-0.1.0.dist-info → openai_sdk_helpers-0.1.2.dist-info}/METADATA +152 -3
  39. openai_sdk_helpers-0.1.2.dist-info/RECORD +79 -0
  40. openai_sdk_helpers-0.1.2.dist-info/entry_points.txt +2 -0
  41. openai_sdk_helpers/utils/core.py +0 -596
  42. openai_sdk_helpers-0.1.0.dist-info/RECORD +0 -69
  43. {openai_sdk_helpers-0.1.0.dist-info → openai_sdk_helpers-0.1.2.dist-info}/WHEEL +0 -0
  44. {openai_sdk_helpers-0.1.0.dist-info → openai_sdk_helpers-0.1.2.dist-info}/licenses/LICENSE +0 -0
@@ -25,8 +25,16 @@ from typing import (
25
25
  cast,
26
26
  )
27
27
 
28
- from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
28
+ from openai.types.responses.response_function_tool_call import (
29
+ ResponseFunctionToolCall,
30
+ )
31
+ from openai.types.responses.response_input_file_content_param import (
32
+ ResponseInputFileContentParam,
33
+ )
29
34
  from openai.types.responses.response_input_file_param import ResponseInputFileParam
35
+ from openai.types.responses.response_input_image_content_param import (
36
+ ResponseInputImageContentParam,
37
+ )
30
38
  from openai.types.responses.response_input_message_content_list_param import (
31
39
  ResponseInputMessageContentListParam,
32
40
  )
@@ -38,16 +46,19 @@ from .messages import ResponseMessage, ResponseMessages
38
46
  from ..config import OpenAISettings
39
47
  from ..structure import BaseStructure
40
48
  from ..types import OpenAIClient
41
- from ..utils import ensure_list, log
49
+ from ..utils import (
50
+ check_filepath,
51
+ coerce_jsonable,
52
+ customJSONEncoder,
53
+ ensure_list,
54
+ log,
55
+ )
42
56
 
43
57
  if TYPE_CHECKING: # pragma: no cover - only for typing hints
44
58
  from openai_sdk_helpers.streamlit_app.config import StreamlitAppConfig
45
59
 
46
60
  T = TypeVar("T", bound=BaseStructure)
47
61
  ToolHandler = Callable[[ResponseFunctionToolCall], str | Any]
48
- ProcessContent = Callable[[str], tuple[str, list[str]]]
49
-
50
-
51
62
  RB = TypeVar("RB", bound="BaseResponse[BaseStructure]")
52
63
 
53
64
 
@@ -111,16 +122,14 @@ class BaseResponse(Generic[T]):
111
122
  def __init__(
112
123
  self,
113
124
  *,
125
+ name: str,
114
126
  instructions: str,
115
127
  tools: list | None,
116
128
  output_structure: type[T] | None,
117
129
  tool_handlers: dict[str, ToolHandler],
118
130
  openai_settings: OpenAISettings,
119
- process_content: ProcessContent | None = None,
120
- name: str | None = None,
121
131
  system_vector_store: list[str] | None = None,
122
- data_path_fn: Callable[[str], Path] | None = None,
123
- save_path: Path | str | None = None,
132
+ data_path: Path | str | None = None,
124
133
  ) -> None:
125
134
  """Initialize a response session with OpenAI configuration.
126
135
 
@@ -130,6 +139,9 @@ class BaseResponse(Generic[T]):
130
139
 
131
140
  Parameters
132
141
  ----------
142
+ name : str
143
+ Name for this response session, used for organizing artifacts
144
+ and naming vector stores.
133
145
  instructions : str
134
146
  System instructions provided to the OpenAI API for context.
135
147
  tools : list or None
@@ -144,18 +156,12 @@ class BaseResponse(Generic[T]):
144
156
  result.
145
157
  openai_settings : OpenAISettings
146
158
  Fully configured OpenAI settings with API key and default model.
147
- process_content : callable or None, default None
148
- Optional callback that processes input text and extracts file
149
- attachments. Must return a tuple of (processed_text, attachment_list).
150
- name : str or None, default None
151
- Module name used for data path construction when data_path_fn is set.
152
159
  system_vector_store : list[str] or None, default None
153
160
  Optional list of vector store names to attach as system context.
154
- data_path_fn : callable or None, default None
155
- Function mapping name to a base directory path for artifact storage.
156
- save_path : Path, str, or None, default None
157
- Optional path to a directory or file where message history is saved.
158
- If a directory, files are named using the session UUID.
161
+ data_path : Path, str, or None, default None
162
+ Optional absolute directory path for storing artifacts. If not provided,
163
+ defaults to get_data_path(class_name). Session files are saved as
164
+ data_path / uuid.json.
159
165
 
160
166
  Raises
161
167
  ------
@@ -170,18 +176,30 @@ class BaseResponse(Generic[T]):
170
176
  >>> from openai_sdk_helpers import BaseResponse, OpenAISettings
171
177
  >>> settings = OpenAISettings(api_key="sk-...", default_model="gpt-4")
172
178
  >>> response = BaseResponse(
179
+ ... name="my_session",
173
180
  ... instructions="You are helpful",
174
181
  ... tools=None,
175
182
  ... output_structure=None,
176
183
  ... tool_handlers={},
177
- ... openai_settings=settings
184
+ ... openai_settings=settings,
178
185
  ... )
179
186
  """
180
187
  self._tool_handlers = tool_handlers
181
- self._process_content = process_content
182
188
  self._name = name
183
- self._data_path_fn = data_path_fn
184
- self._save_path = Path(save_path) if save_path is not None else None
189
+
190
+ # Resolve data_path with class name appended
191
+ class_name = self.__class__.__name__.lower()
192
+ if data_path is not None:
193
+ data_path_obj = Path(data_path)
194
+ if data_path_obj.name == class_name:
195
+ self._data_path = data_path_obj
196
+ else:
197
+ self._data_path = data_path_obj / class_name
198
+ else:
199
+ from ..environment import get_data_path
200
+
201
+ self._data_path = get_data_path(class_name)
202
+
185
203
  self._instructions = instructions
186
204
  self._tools = tools if tools is not None else []
187
205
  self._output_structure = output_structure
@@ -203,7 +221,6 @@ class BaseResponse(Generic[T]):
203
221
  )
204
222
 
205
223
  self.uuid = uuid.uuid4()
206
- self.name = self.__class__.__name__.lower()
207
224
 
208
225
  system_content: ResponseInputMessageContentListParam = [
209
226
  ResponseInputTextParam(type="input_text", text=instructions)
@@ -211,6 +228,11 @@ class BaseResponse(Generic[T]):
211
228
 
212
229
  self._user_vector_storage: Any | None = None
213
230
 
231
+ # Initialize Files API manager for tracking uploaded files
232
+ from ..files_api import FilesAPIManager
233
+
234
+ self._files_manager = FilesAPIManager(self._client, auto_track=True)
235
+
214
236
  # New logic: system_vector_store is a list of vector store names to attach
215
237
  if system_vector_store:
216
238
  from .vector_store import attach_vector_store
@@ -227,106 +249,93 @@ class BaseResponse(Generic[T]):
227
249
 
228
250
  self.messages = ResponseMessages()
229
251
  self.messages.add_system_message(content=system_content)
230
- if self._save_path is not None or (
231
- self._data_path_fn is not None and self._name is not None
232
- ):
252
+ if self._data_path is not None:
233
253
  self.save()
234
254
 
235
255
  @property
236
- def data_path(self) -> Path:
237
- """Return the directory for persisting session artifacts.
238
-
239
- Constructs a path using data_path_fn, name, class name, and the
240
- session name. Both data_path_fn and name must be set during
241
- initialization for this property to work.
256
+ def name(self) -> str:
257
+ """Return the name of this response session.
242
258
 
243
259
  Returns
244
260
  -------
245
- Path
246
- Absolute path for persisting response artifacts and message history.
247
-
248
- Raises
249
- ------
250
- RuntimeError
251
- If data_path_fn or name were not provided during initialization.
252
-
253
- Examples
254
- --------
255
- >>> response.data_path
256
- PosixPath('/data/myapp/baseresponse/session_123')
261
+ str
262
+ Name used for organizing artifacts and naming vector stores.
257
263
  """
258
- if self._data_path_fn is None or self._name is None:
259
- raise RuntimeError(
260
- "data_path_fn and name are required to build data paths."
261
- )
262
- base_path = self._data_path_fn(self._name)
263
- return base_path / self.__class__.__name__.lower() / self.name
264
+ return self._name
264
265
 
265
266
  def _build_input(
266
267
  self,
267
268
  content: str | list[str],
268
- attachments: list[str] | None = None,
269
+ files: list[str] | None = None,
270
+ use_vector_store: bool = False,
269
271
  ) -> None:
270
272
  """Construct input messages for the OpenAI API request.
271
273
 
272
- Processes content through the optional process_content callback,
273
- uploads any file attachments to vector stores, and adds all
274
- messages to the conversation history.
274
+ Automatically detects file types and handles them appropriately:
275
+ - Images (jpg, png, gif, etc.) are sent as base64-encoded images
276
+ - Documents are sent as base64-encoded file data by default
277
+ - Documents can optionally be uploaded to vector stores for RAG
275
278
 
276
279
  Parameters
277
280
  ----------
278
281
  content : str or list[str]
279
282
  String or list of strings to include as user messages.
280
- attachments : list[str] or None, default None
281
- Optional list of file paths to upload and attach to the message.
283
+ files : list[str] or None, default None
284
+ Optional list of file paths. Each file is automatically processed
285
+ based on its type:
286
+ - Images are base64-encoded as input_image
287
+ - Documents are base64-encoded as input_file (default)
288
+ - Documents can be uploaded to vector stores if use_vector_store=True
289
+ use_vector_store : bool, default False
290
+ If True, non-image files are uploaded to a vector store for
291
+ RAG-enabled file search instead of inline base64 encoding.
282
292
 
283
293
  Notes
284
294
  -----
285
- If attachments are provided and no user vector storage exists, this
286
- method automatically creates one and adds a file_search tool to
287
- the tools list.
295
+ When use_vector_store is True, this method automatically creates
296
+ a vector store and adds a file_search tool for document retrieval.
297
+ Images are always base64-encoded regardless of this setting.
298
+
299
+ Examples
300
+ --------
301
+ >>> # Automatic handling - images as base64, docs inline
302
+ >>> response._build_input("Analyze these", files=["photo.jpg", "doc.pdf"])
303
+
304
+ >>> # Use vector store for documents (RAG)
305
+ >>> response._build_input(
306
+ ... "Search these documents",
307
+ ... files=["doc1.pdf", "doc2.pdf"],
308
+ ... use_vector_store=True
309
+ ... )
288
310
  """
311
+ from .files import process_files
312
+
289
313
  contents = ensure_list(content)
314
+ all_files = files or []
290
315
 
316
+ # Process files using the dedicated files module
317
+ vector_file_refs, base64_files, image_contents = process_files(
318
+ self, all_files, use_vector_store
319
+ )
320
+
321
+ # Add each content as a separate message with the same attachments
291
322
  for raw_content in contents:
292
- if self._process_content is None:
293
- processed_text, content_attachments = raw_content, []
294
- else:
295
- processed_text, content_attachments = self._process_content(raw_content)
296
- input_content: list[ResponseInputTextParam | ResponseInputFileParam] = [
297
- ResponseInputTextParam(type="input_text", text=processed_text)
298
- ]
299
-
300
- all_attachments = (attachments or []) + content_attachments
301
-
302
- for file_path in all_attachments:
303
- if self._user_vector_storage is None:
304
- from openai_sdk_helpers.vector_storage import VectorStorage
305
-
306
- store_name = f"{self.__class__.__name__.lower()}_{self.name}_{self.uuid}_user"
307
- self._user_vector_storage = VectorStorage(
308
- store_name=store_name,
309
- client=self._client,
310
- model=self._model,
311
- )
312
- user_vector_storage = cast(Any, self._user_vector_storage)
313
- if not any(
314
- tool.get("type") == "file_search" for tool in self._tools
315
- ):
316
- self._tools.append(
317
- {
318
- "type": "file_search",
319
- "vector_store_ids": [user_vector_storage.id],
320
- }
321
- )
322
- else:
323
- # If system vector store is attached, its ID will be in tool config
324
- pass
325
- user_vector_storage = cast(Any, self._user_vector_storage)
326
- uploaded_file = user_vector_storage.upload_file(file_path)
327
- input_content.append(
328
- ResponseInputFileParam(type="input_file", file_id=uploaded_file.id)
329
- )
323
+ processed_text = raw_content.strip()
324
+ input_content: list[
325
+ ResponseInputTextParam
326
+ | ResponseInputFileParam
327
+ | ResponseInputFileContentParam
328
+ | ResponseInputImageContentParam
329
+ ] = [ResponseInputTextParam(type="input_text", text=processed_text)]
330
+
331
+ # Add vector store file references
332
+ input_content.extend(vector_file_refs)
333
+
334
+ # Add base64 files
335
+ input_content.extend(base64_files)
336
+
337
+ # Add images
338
+ input_content.extend(image_contents)
330
339
 
331
340
  message = cast(
332
341
  ResponseInputItemParam,
@@ -337,7 +346,8 @@ class BaseResponse(Generic[T]):
337
346
  async def run_async(
338
347
  self,
339
348
  content: str | list[str],
340
- attachments: str | list[str] | None = None,
349
+ files: str | list[str] | None = None,
350
+ use_vector_store: bool = False,
341
351
  ) -> T | None:
342
352
  """Generate a response asynchronously from the OpenAI API.
343
353
 
@@ -345,12 +355,21 @@ class BaseResponse(Generic[T]):
345
355
  tool calls with registered handlers, and optionally parses the
346
356
  result into the configured output_structure.
347
357
 
358
+ Automatically detects file types:
359
+ - Images are sent as base64-encoded images
360
+ - Documents are sent as base64-encoded files (default)
361
+ - Documents can optionally use vector stores for RAG
362
+
348
363
  Parameters
349
364
  ----------
350
365
  content : str or list[str]
351
366
  Prompt text or list of prompt texts to send.
352
- attachments : str, list[str], or None, default None
353
- Optional file path or list of file paths to upload and attach.
367
+ files : str, list[str], or None, default None
368
+ Optional file path or list of file paths. Each file is
369
+ automatically processed based on its type.
370
+ use_vector_store : bool, default False
371
+ If True, non-image files are uploaded to a vector store
372
+ for RAG-enabled search instead of inline base64 encoding.
354
373
 
355
374
  Returns
356
375
  -------
@@ -368,15 +387,26 @@ class BaseResponse(Generic[T]):
368
387
 
369
388
  Examples
370
389
  --------
371
- >>> result = await response.run_async("Analyze this text")
372
- >>> print(result)
390
+ >>> # Automatic type detection
391
+ >>> result = await response.run_async(
392
+ ... "Analyze these files",
393
+ ... files=["photo.jpg", "document.pdf"]
394
+ ... )
395
+
396
+ >>> # Use vector store for documents
397
+ >>> result = await response.run_async(
398
+ ... "Search these documents",
399
+ ... files=["doc1.pdf", "doc2.pdf"],
400
+ ... use_vector_store=True
401
+ ... )
373
402
  """
374
403
  log(f"{self.__class__.__name__}::run_response")
375
404
  parsed_result: T | None = None
376
405
 
377
406
  self._build_input(
378
407
  content=content,
379
- attachments=(ensure_list(attachments) if attachments else None),
408
+ files=(ensure_list(files) if files else None),
409
+ use_vector_store=use_vector_store,
380
410
  )
381
411
 
382
412
  kwargs = {
@@ -421,8 +451,8 @@ class BaseResponse(Generic[T]):
421
451
  tool_result = json.loads(tool_result_json)
422
452
  tool_output = tool_result_json
423
453
  else:
424
- tool_result = tool_result_json
425
- tool_output = json.dumps(tool_result)
454
+ tool_result = coerce_jsonable(tool_result_json)
455
+ tool_output = json.dumps(tool_result, cls=customJSONEncoder)
426
456
  self.messages.add_tool_message(
427
457
  content=response_output, output=tool_output
428
458
  )
@@ -443,7 +473,7 @@ class BaseResponse(Generic[T]):
443
473
  parsed_result = cast(T, tool_result)
444
474
 
445
475
  if isinstance(response_output, ResponseOutputMessage):
446
- self.messages.add_assistant_message(response_output, kwargs)
476
+ self.messages.add_assistant_message(response_output, metadata=kwargs)
447
477
  self.save()
448
478
  if hasattr(response, "output_text") and response.output_text:
449
479
  raw_text = response.output_text
@@ -462,7 +492,9 @@ class BaseResponse(Generic[T]):
462
492
  def run_sync(
463
493
  self,
464
494
  content: str | list[str],
465
- attachments: str | list[str] | None = None,
495
+ *,
496
+ files: str | list[str] | None = None,
497
+ use_vector_store: bool = False,
466
498
  ) -> T | None:
467
499
  """Execute run_async synchronously with proper event loop handling.
468
500
 
@@ -470,12 +502,21 @@ class BaseResponse(Generic[T]):
470
502
  a separate thread if necessary. This enables safe usage in both
471
503
  synchronous and asynchronous contexts.
472
504
 
505
+ Automatically detects file types:
506
+ - Images are sent as base64-encoded images
507
+ - Documents are sent as base64-encoded files (default)
508
+ - Documents can optionally use vector stores for RAG
509
+
473
510
  Parameters
474
511
  ----------
475
512
  content : str or list[str]
476
513
  Prompt text or list of prompt texts to send.
477
- attachments : str, list[str], or None, default None
478
- Optional file path or list of file paths to upload and attach.
514
+ files : str, list[str], or None, default None
515
+ Optional file path or list of file paths. Each file is
516
+ automatically processed based on its type.
517
+ use_vector_store : bool, default False
518
+ If True, non-image files are uploaded to a vector store
519
+ for RAG-enabled search instead of inline base64 encoding.
479
520
 
480
521
  Returns
481
522
  -------
@@ -484,12 +525,26 @@ class BaseResponse(Generic[T]):
484
525
 
485
526
  Examples
486
527
  --------
487
- >>> result = response.run_sync("Summarize this document")
488
- >>> print(result)
528
+ >>> # Automatic type detection
529
+ >>> result = response.run_sync(
530
+ ... "Analyze these files",
531
+ ... files=["photo.jpg", "document.pdf"]
532
+ ... )
533
+
534
+ >>> # Use vector store for documents
535
+ >>> result = response.run_sync(
536
+ ... "Search these documents",
537
+ ... files=["doc1.pdf", "doc2.pdf"],
538
+ ... use_vector_store=True
539
+ ... )
489
540
  """
490
541
 
491
542
  async def runner() -> T | None:
492
- return await self.run_async(content=content, attachments=attachments)
543
+ return await self.run_async(
544
+ content=content,
545
+ files=files,
546
+ use_vector_store=use_vector_store,
547
+ )
493
548
 
494
549
  try:
495
550
  asyncio.get_running_loop()
@@ -509,7 +564,9 @@ class BaseResponse(Generic[T]):
509
564
  def run_streamed(
510
565
  self,
511
566
  content: str | list[str],
512
- attachments: str | list[str] | None = None,
567
+ *,
568
+ files: str | list[str] | None = None,
569
+ use_vector_store: bool = False,
513
570
  ) -> T | None:
514
571
  """Execute run_async and await the result.
515
572
 
@@ -517,12 +574,21 @@ class BaseResponse(Generic[T]):
517
574
  simply awaits run_async to provide API compatibility with agent
518
575
  interfaces.
519
576
 
577
+ Automatically detects file types:
578
+ - Images are sent as base64-encoded images
579
+ - Documents are sent as base64-encoded files (default)
580
+ - Documents can optionally use vector stores for RAG
581
+
520
582
  Parameters
521
583
  ----------
522
584
  content : str or list[str]
523
585
  Prompt text or list of prompt texts to send.
524
- attachments : str, list[str], or None, default None
525
- Optional file path or list of file paths to upload and attach.
586
+ files : str, list[str], or None, default None
587
+ Optional file path or list of file paths. Each file is
588
+ automatically processed based on its type.
589
+ use_vector_store : bool, default False
590
+ If True, non-image files are uploaded to a vector store
591
+ for RAG-enabled search instead of inline base64 encoding.
526
592
 
527
593
  Returns
528
594
  -------
@@ -534,7 +600,13 @@ class BaseResponse(Generic[T]):
534
600
  This method exists for API consistency but does not currently
535
601
  provide true streaming functionality.
536
602
  """
537
- return asyncio.run(self.run_async(content=content, attachments=attachments))
603
+ return asyncio.run(
604
+ self.run_async(
605
+ content=content,
606
+ files=files,
607
+ use_vector_store=use_vector_store,
608
+ )
609
+ )
538
610
 
539
611
  def get_last_tool_message(self) -> ResponseMessage | None:
540
612
  """Return the most recent tool message from conversation history.
@@ -629,44 +701,32 @@ class BaseResponse(Generic[T]):
629
701
  """Serialize the message history to a JSON file.
630
702
 
631
703
  Saves the complete conversation history to disk. The target path
632
- is determined by filepath parameter, save_path from initialization,
633
- or data_path_fn if configured.
704
+ is determined by filepath parameter, or data_path if configured.
634
705
 
635
706
  Parameters
636
707
  ----------
637
708
  filepath : str, Path, or None, default None
638
- Optional explicit path for the JSON file. If None, uses save_path
639
- or constructs path from data_path_fn and session UUID.
709
+ Optional explicit path for the JSON file. If None, constructs
710
+ path from data_path and session UUID.
640
711
 
641
712
  Notes
642
713
  -----
643
- If no save location is configured (no filepath, save_path, or
644
- data_path_fn), the save operation is silently skipped.
714
+ If no filepath is provided and no data_path was configured during
715
+ initialization, the save operation is silently skipped.
645
716
 
646
717
  Examples
647
718
  --------
648
719
  >>> response.save("/path/to/session.json")
649
- >>> response.save() # Uses configured save_path or data_path
720
+ >>> response.save() # Uses data_path / uuid.json
650
721
  """
651
722
  if filepath is not None:
652
723
  target = Path(filepath)
653
- elif self._save_path is not None:
654
- if self._save_path.suffix == ".json":
655
- target = self._save_path
656
- else:
657
- filename = f"{str(self.uuid).lower()}.json"
658
- target = self._save_path / filename
659
- elif self._data_path_fn is not None and self._name is not None:
660
- filename = f"{str(self.uuid).lower()}.json"
661
- target = self.data_path / filename
662
724
  else:
663
- log(
664
- "Skipping save: no filepath, save_path, or data_path_fn configured.",
665
- level=logging.DEBUG,
666
- )
667
- return
725
+ filename = f"{str(self.uuid).lower()}.json"
726
+ target = self._data_path / self._name / filename
668
727
 
669
- self.messages.to_json_file(str(target))
728
+ checked = check_filepath(filepath=target)
729
+ self.messages.to_json_file(str(checked))
670
730
  log(f"Saved messages to {target}")
671
731
 
672
732
  def __repr__(self) -> str:
@@ -677,12 +737,9 @@ class BaseResponse(Generic[T]):
677
737
  str
678
738
  String showing class name, model, UUID, message count, and data path.
679
739
  """
680
- data_path = None
681
- if self._data_path_fn is not None and self._name is not None:
682
- data_path = self.data_path
683
740
  return (
684
741
  f"<{self.__class__.__name__}(model={self._model}, uuid={self.uuid}, "
685
- f"messages={len(self.messages.messages)}, data_path={data_path}>"
742
+ f"messages={len(self.messages.messages)}, data_path={self._data_path}>"
686
743
  )
687
744
 
688
745
  def __enter__(self) -> BaseResponse[T]:
@@ -710,11 +767,11 @@ class BaseResponse(Generic[T]):
710
767
  self.close()
711
768
 
712
769
  def close(self) -> None:
713
- """Clean up session resources including vector stores.
770
+ """Clean up session resources including vector stores and uploaded files.
714
771
 
715
- Saves the current message history and deletes managed vector stores.
716
- User vector stores are always cleaned up. System vector store cleanup
717
- is handled via tool configuration.
772
+ Saves the current message history, deletes managed vector stores, and
773
+ cleans up all tracked Files API uploads. User vector stores are always
774
+ cleaned up. System vector store cleanup is handled via tool configuration.
718
775
 
719
776
  Notes
720
777
  -----
@@ -732,6 +789,19 @@ class BaseResponse(Generic[T]):
732
789
  """
733
790
  log(f"Closing session {self.uuid} for {self.__class__.__name__}")
734
791
  self.save()
792
+
793
+ # Clean up tracked Files API uploads
794
+ try:
795
+ if hasattr(self, "_files_manager") and self._files_manager:
796
+ cleanup_results = self._files_manager.cleanup()
797
+ if cleanup_results:
798
+ successful = sum(cleanup_results.values())
799
+ log(
800
+ f"Files API cleanup: {successful}/{len(cleanup_results)} files deleted"
801
+ )
802
+ except Exception as exc:
803
+ log(f"Error cleaning up Files API uploads: {exc}", level=logging.WARNING)
804
+
735
805
  # Always clean user vector storage if it exists
736
806
  try:
737
807
  if self._user_vector_storage:
@@ -291,6 +291,7 @@ class ResponseConfiguration(Generic[TIn, TOut]):
291
291
  self,
292
292
  openai_settings: OpenAISettings,
293
293
  tool_handlers: dict[str, ToolHandler] = {},
294
+ add_output_instructions: bool = True,
294
295
  ) -> BaseResponse[TOut]:
295
296
  """Generate a BaseResponse instance based on the configuration.
296
297
 
@@ -302,15 +303,29 @@ class ResponseConfiguration(Generic[TIn, TOut]):
302
303
  tool_handlers : dict[str, Callable], optional
303
304
  Mapping of tool names to handler callables. Defaults to an empty
304
305
  dictionary when not provided.
306
+ add_output_instructions : bool, default=True
307
+ Whether to append the structured output prompt to the instructions.
305
308
 
306
309
  Returns
307
310
  -------
308
311
  BaseResponse[TOut]
309
312
  An instance of BaseResponse configured with ``openai_settings``.
310
313
  """
314
+ output_instructions = ""
315
+ if self.output_structure is not None and add_output_instructions:
316
+ output_instructions = self.output_structure.get_prompt(
317
+ add_enum_values=False
318
+ )
319
+
320
+ instructions = (
321
+ f"{self.instructions_text}\n{output_instructions}"
322
+ if output_instructions
323
+ else self.instructions_text
324
+ )
325
+
311
326
  return BaseResponse[TOut](
312
327
  name=self.name,
313
- instructions=self.instructions_text,
328
+ instructions=instructions,
314
329
  tools=self.tools,
315
330
  output_structure=self.output_structure,
316
331
  tool_handlers=tool_handlers,