openai-sdk-helpers 0.4.3__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. openai_sdk_helpers/__init__.py +41 -7
  2. openai_sdk_helpers/agent/__init__.py +1 -2
  3. openai_sdk_helpers/agent/base.py +169 -190
  4. openai_sdk_helpers/agent/configuration.py +12 -20
  5. openai_sdk_helpers/agent/coordinator.py +14 -17
  6. openai_sdk_helpers/agent/runner.py +3 -45
  7. openai_sdk_helpers/agent/search/base.py +49 -71
  8. openai_sdk_helpers/agent/search/vector.py +82 -110
  9. openai_sdk_helpers/agent/search/web.py +103 -81
  10. openai_sdk_helpers/agent/summarizer.py +20 -28
  11. openai_sdk_helpers/agent/translator.py +17 -23
  12. openai_sdk_helpers/agent/validator.py +17 -23
  13. openai_sdk_helpers/errors.py +9 -0
  14. openai_sdk_helpers/extract/__init__.py +23 -0
  15. openai_sdk_helpers/extract/extractor.py +157 -0
  16. openai_sdk_helpers/extract/generator.py +476 -0
  17. openai_sdk_helpers/files_api.py +1 -0
  18. openai_sdk_helpers/logging.py +12 -1
  19. openai_sdk_helpers/prompt/extractor_config_agent_instructions.jinja +6 -0
  20. openai_sdk_helpers/prompt/extractor_config_generator.jinja +37 -0
  21. openai_sdk_helpers/prompt/extractor_config_generator_instructions.jinja +9 -0
  22. openai_sdk_helpers/prompt/extractor_prompt_optimizer_agent_instructions.jinja +4 -0
  23. openai_sdk_helpers/prompt/extractor_prompt_optimizer_request.jinja +11 -0
  24. openai_sdk_helpers/response/__init__.py +2 -6
  25. openai_sdk_helpers/response/base.py +233 -164
  26. openai_sdk_helpers/response/configuration.py +39 -14
  27. openai_sdk_helpers/response/files.py +41 -2
  28. openai_sdk_helpers/response/runner.py +1 -48
  29. openai_sdk_helpers/response/tool_call.py +0 -141
  30. openai_sdk_helpers/response/vector_store.py +8 -5
  31. openai_sdk_helpers/streamlit_app/app.py +1 -9
  32. openai_sdk_helpers/structure/__init__.py +16 -0
  33. openai_sdk_helpers/structure/base.py +239 -278
  34. openai_sdk_helpers/structure/extraction.py +1228 -0
  35. openai_sdk_helpers/structure/plan/plan.py +0 -20
  36. openai_sdk_helpers/structure/plan/task.py +0 -33
  37. openai_sdk_helpers/structure/prompt.py +16 -0
  38. openai_sdk_helpers/structure/responses.py +2 -2
  39. openai_sdk_helpers/structure/web_search.py +0 -10
  40. openai_sdk_helpers/tools.py +346 -99
  41. openai_sdk_helpers/utils/__init__.py +7 -0
  42. openai_sdk_helpers/utils/json/base_model.py +315 -32
  43. openai_sdk_helpers/utils/langextract.py +194 -0
  44. openai_sdk_helpers/vector_storage/cleanup.py +7 -2
  45. openai_sdk_helpers/vector_storage/storage.py +37 -7
  46. {openai_sdk_helpers-0.4.3.dist-info → openai_sdk_helpers-0.5.1.dist-info}/METADATA +21 -6
  47. openai_sdk_helpers-0.5.1.dist-info/RECORD +95 -0
  48. openai_sdk_helpers/streamlit_app/streamlit_web_search.py +0 -75
  49. openai_sdk_helpers-0.4.3.dist-info/RECORD +0 -86
  50. {openai_sdk_helpers-0.4.3.dist-info → openai_sdk_helpers-0.5.1.dist-info}/WHEEL +0 -0
  51. {openai_sdk_helpers-0.4.3.dist-info → openai_sdk_helpers-0.5.1.dist-info}/entry_points.txt +0 -0
  52. {openai_sdk_helpers-0.4.3.dist-info → openai_sdk_helpers-0.5.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,37 @@
1
+ Build a DocumentExtractorConfig using the details below:
2
+ Name: {{ name }}
3
+ Prompt description: {{ prompt_description }}
4
+ Extraction classes:
5
+ {% for item in extraction_classes -%}
6
+ - {{ item }}
7
+ {% else -%}
8
+ - None provided
9
+ {% endfor %}
10
+
11
+ Example requirements:
12
+ {% for requirement in example_requirements -%}
13
+ - {{ requirement }}
14
+ {% endfor %}
15
+
16
+ Attributes guidance:
17
+ - Every extraction must include an "attributes" object.
18
+ - Use attributes to capture meaningful structured details (e.g., confidence, type, qualifiers).
19
+ - If no attributes apply, provide an empty object {}.
20
+
21
+ Examples (JSON):
22
+ {{ examples_json }}
23
+
24
+ Source files for grounding:
25
+ {% if example_files -%}
26
+ {% for file in example_files -%}
27
+ - Path: {{ file.path }}
28
+ Content:
29
+ {{ file.content }}
30
+ {% endfor %}
31
+ {% else -%}
32
+ - None provided.
33
+ {% endif %}
34
+
35
+ Grounding requirements:
36
+ - Use source file content to craft example text when available.
37
+ - Prefer quoting or lightly paraphrasing source text over inventing details.
@@ -0,0 +1,9 @@
1
+ Generate a DocumentExtractorConfig using the provided inputs.
2
+ Requirements:
3
+ - Generate high-quality examples that match the prompt and extraction classes.
4
+ - Ensure examples include realistic source text and cover all extraction classes.
5
+ - Include meaningful attributes on each extraction when applicable.
6
+ - If source files are provided, ground example text in that content.
7
+ - Set the configuration name exactly as provided.
8
+ - Preserve the provided prompt description and extraction classes.
9
+ - Do not add or remove extraction classes.
@@ -0,0 +1,4 @@
1
+ Optimize the extraction prompt for clarity and precision. Return the optimized prompt
2
+ as structured output.
3
+
4
+ {{ prompt_schema }}
@@ -0,0 +1,11 @@
1
+ Optimize the extraction prompt using the details below:
2
+ User prompt: {{ prompt }}
3
+ Extraction classes:
4
+ {% for item in extraction_classes -%}
5
+ - {{ item }}
6
+ {% else -%}
7
+ - None provided
8
+ {% endfor %}
9
+ {% if additional_context -%}
10
+ Additional context: {{ additional_context }}
11
+ {% endif %}
@@ -24,8 +24,6 @@ run_sync
24
24
  Execute a response workflow synchronously with resource cleanup.
25
25
  run_async
26
26
  Execute a response workflow asynchronously with resource cleanup.
27
- run_streamed
28
- Execute a response workflow and return the asynchronous result.
29
27
  attach_vector_store
30
28
  Attach vector stores to a response's file_search tool.
31
29
  process_files
@@ -38,8 +36,8 @@ from .base import ResponseBase
38
36
  from .configuration import ResponseConfiguration, ResponseRegistry, get_default_registry
39
37
  from .files import process_files
40
38
  from .messages import ResponseMessage, ResponseMessages
41
- from .runner import run_async, run_streamed, run_sync
42
- from .tool_call import ResponseToolCall, parse_tool_arguments
39
+ from .runner import run_async, run_sync
40
+ from .tool_call import ResponseToolCall
43
41
  from .vector_store import attach_vector_store
44
42
 
45
43
  __all__ = [
@@ -51,9 +49,7 @@ __all__ = [
51
49
  "ResponseMessages",
52
50
  "run_sync",
53
51
  "run_async",
54
- "run_streamed",
55
52
  "ResponseToolCall",
56
- "parse_tool_arguments",
57
53
  "attach_vector_store",
58
54
  "process_files",
59
55
  ]
@@ -13,6 +13,7 @@ import inspect
13
13
  import json
14
14
  import logging
15
15
  import threading
16
+ import traceback
16
17
  import uuid
17
18
  from pathlib import Path
18
19
  from typing import (
@@ -46,6 +47,7 @@ from .messages import ResponseMessage, ResponseMessages
46
47
  from ..settings import OpenAISettings
47
48
  from ..structure import StructureBase
48
49
  from ..types import OpenAIClient
50
+ from ..tools import ToolHandlerRegistration, ToolSpec
49
51
  from ..utils import (
50
52
  check_filepath,
51
53
  coerce_jsonable,
@@ -58,7 +60,6 @@ if TYPE_CHECKING: # pragma: no cover - only for typing hints
58
60
  from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
59
61
 
60
62
  T = TypeVar("T", bound=StructureBase)
61
- ToolHandler = Callable[[ResponseFunctionToolCall], str | Any]
62
63
  RB = TypeVar("RB", bound="ResponseBase[StructureBase]")
63
64
 
64
65
 
@@ -81,8 +82,9 @@ class ResponseBase(Generic[T]):
81
82
  and naming vector stores.
82
83
  instructions : str
83
84
  System instructions provided to the OpenAI API for context.
84
- tools : list or None
85
- Tool definitions for the OpenAI API request. Pass None for no tools.
85
+ tools : list[ToolHandlerRegistration] or None
86
+ Tool handler registrations for the OpenAI API request. Pass None for
87
+ no tools.
86
88
  output_structure : type[StructureBase] or None
87
89
  Structure class used to parse tool call outputs. When provided,
88
90
  the schema is automatically generated using the structure's
@@ -93,8 +95,9 @@ class ResponseBase(Generic[T]):
93
95
  Optional absolute directory path for storing artifacts. If not provided,
94
96
  defaults to get_data_path(class_name). Session files are saved as
95
97
  data_path / uuid.json.
96
- tool_handlers : dict[str, ToolHandler] or None, default None
97
- Mapping from tool names to callable handlers. Each handler receives
98
+ tool_handlers : dict[str, ToolHandlerRegistration] or None, default None
99
+ Mapping from tool names to handler registrations that include optional
100
+ ToolSpec metadata to parse tool outputs by name. Each handler receives
98
101
  a ResponseFunctionToolCall and returns a string or any serializable
99
102
  result. Defaults to an empty dict when not provided.
100
103
  openai_settings : OpenAISettings or None, default None
@@ -119,12 +122,12 @@ class ResponseBase(Generic[T]):
119
122
 
120
123
  Methods
121
124
  -------
122
- run_async(content, attachments=None)
125
+ run_async(content, files=None, use_vector_store=False)
123
126
  Generate a response asynchronously and return parsed output.
124
- run_sync(content, attachments=None)
127
+ run_sync(content, files=None, use_vector_store=False)
125
128
  Execute run_async synchronously with thread management.
126
- run_streamed(content, attachments=None)
127
- Execute run_async and await the result (streaming not yet supported).
129
+ register_tool(func, tool_spec)
130
+ Register a tool handler and definition from a ToolSpec.
128
131
  get_last_tool_message()
129
132
  Return the most recent tool message or None.
130
133
  get_last_user_message()
@@ -135,6 +138,8 @@ class ResponseBase(Generic[T]):
135
138
  Construct a StreamlitAppConfig using this class as the builder.
136
139
  save(filepath=None)
137
140
  Serialize the message history to a JSON file.
141
+ save_error(exc)
142
+ Persist error details to a file named with the response UUID.
138
143
  close()
139
144
  Clean up remote resources including vector stores.
140
145
 
@@ -158,11 +163,11 @@ class ResponseBase(Generic[T]):
158
163
  *,
159
164
  name: str,
160
165
  instructions: str,
161
- tools: list | None,
166
+ tools: list[ToolHandlerRegistration] | None,
162
167
  output_structure: type[T] | None,
163
168
  system_vector_store: list[str] | None = None,
164
169
  data_path: Path | str | None = None,
165
- tool_handlers: dict[str, ToolHandler] | None = None,
170
+ tool_handlers: dict[str, ToolHandlerRegistration] | None = None,
166
171
  openai_settings: OpenAISettings | None = None,
167
172
  ) -> None:
168
173
  """Initialize a response session with OpenAI configuration.
@@ -178,8 +183,9 @@ class ResponseBase(Generic[T]):
178
183
  and naming vector stores.
179
184
  instructions : str
180
185
  System instructions provided to the OpenAI API for context.
181
- tools : list or None
182
- Tool definitions for the OpenAI API request. Pass None for no tools.
186
+ tools : list[ToolHandlerRegistration] or None
187
+ Tool handler registrations for the OpenAI API request. Pass None for
188
+ no tools.
183
189
  output_structure : type[StructureBase] or None
184
190
  Structure class used to parse tool call outputs. When provided,
185
191
  the schema is automatically generated using the structure's
@@ -190,8 +196,9 @@ class ResponseBase(Generic[T]):
190
196
  Optional absolute directory path for storing artifacts. If not provided,
191
197
  defaults to get_data_path(class_name). Session files are saved as
192
198
  data_path / uuid.json.
193
- tool_handlers : dict[str, ToolHandler] or None, default None
194
- Mapping from tool names to callable handlers. Each handler receives
199
+ tool_handlers : dict[str, ToolHandlerRegistration] or None, default None
200
+ Mapping from tool names to handler registrations that include optional
201
+ ToolSpec metadata to parse tool outputs by name. Each handler receives
195
202
  a ResponseFunctionToolCall and returns a string or any serializable
196
203
  result. Defaults to an empty dict when not provided.
197
204
  openai_settings : OpenAISettings or None, default None
@@ -222,9 +229,7 @@ class ResponseBase(Generic[T]):
222
229
  if openai_settings is None:
223
230
  raise ValueError("openai_settings is required")
224
231
 
225
- if tool_handlers is None:
226
- tool_handlers = {}
227
- self._tool_handlers = tool_handlers
232
+ self._tool_handlers = tool_handlers or {}
228
233
  self.uuid = uuid.uuid4()
229
234
  self._name = name
230
235
 
@@ -242,8 +247,14 @@ class ResponseBase(Generic[T]):
242
247
  self._data_path = get_data_path(self.__class__.__name__)
243
248
 
244
249
  self._instructions = instructions
245
- self._tools = tools if tools is not None else []
250
+ self._tools: list[dict[str, Any]] | None = None
251
+ if tools is not None:
252
+ self._tools = [
253
+ tool_handler.tool_spec.as_tool_definition() for tool_handler in tools
254
+ ]
255
+
246
256
  self._output_structure = output_structure
257
+ self._system_vector_store = system_vector_store
247
258
  self._openai_settings = openai_settings
248
259
 
249
260
  if not self._openai_settings.api_key:
@@ -339,7 +350,7 @@ class ResponseBase(Generic[T]):
339
350
  return self._instructions
340
351
 
341
352
  @property
342
- def tools(self) -> list | None:
353
+ def tools(self) -> list[dict[str, Any]] | None:
343
354
  """Return the tool definitions for this response.
344
355
 
345
356
  Returns
@@ -370,6 +381,43 @@ class ResponseBase(Generic[T]):
370
381
  """
371
382
  return self._output_structure
372
383
 
384
+ def register_tool(
385
+ self,
386
+ func: Callable[..., Any],
387
+ *,
388
+ tool_spec: ToolSpec,
389
+ ) -> None:
390
+ """Register a tool handler and definition using a ToolSpec.
391
+
392
+ Parameters
393
+ ----------
394
+ func : Callable[..., Any]
395
+ Tool implementation function to wrap for argument parsing and
396
+ result serialization.
397
+ tool_spec : ToolSpec
398
+ Tool specification describing input/output structures and metadata.
399
+
400
+ Returns
401
+ -------
402
+ None
403
+ Register the tool handler and definition for this response session.
404
+
405
+ Raises
406
+ ------
407
+ ValueError
408
+ If tool_spec.tool_name is empty.
409
+
410
+ Examples
411
+ --------
412
+ >>> response.register_tool(run_search, tool_spec=search_tool_spec)
413
+ """
414
+ if not tool_spec.tool_name:
415
+ raise ValueError("tool_spec.tool_name must be a non-empty string")
416
+ self._tool_handlers[tool_spec.tool_name] = ToolHandlerRegistration(
417
+ handler=func,
418
+ tool_spec=tool_spec,
419
+ )
420
+
373
421
  def _build_input(
374
422
  self,
375
423
  content: str | list[str],
@@ -455,7 +503,7 @@ class ResponseBase(Generic[T]):
455
503
  content: str | list[str],
456
504
  files: str | list[str] | None = None,
457
505
  use_vector_store: bool = False,
458
- ) -> T | None:
506
+ ) -> T | str:
459
507
  """Generate a response asynchronously from the OpenAI API.
460
508
 
461
509
  Builds input messages, sends the request to OpenAI, processes any
@@ -480,8 +528,8 @@ class ResponseBase(Generic[T]):
480
528
 
481
529
  Returns
482
530
  -------
483
- T or None
484
- Parsed response object of type output_structure, or None if
531
+ T or str
532
+ Parsed response object of type output_structure, or raw string if
485
533
  no structured output was produced.
486
534
 
487
535
  Raises
@@ -510,91 +558,117 @@ class ResponseBase(Generic[T]):
510
558
  log(f"{self.__class__.__name__}::run_response")
511
559
  parsed_result: T | None = None
512
560
 
513
- self._build_input(
514
- content=content,
515
- files=(ensure_list(files) if files else None),
516
- use_vector_store=use_vector_store,
517
- )
518
-
519
- kwargs = {
520
- "input": self.messages.to_openai_payload(),
521
- "model": self._model,
522
- }
523
- if not self._tools and self._output_structure is not None:
524
- kwargs["text"] = self._output_structure.response_format()
525
-
526
- if self._tools:
527
- kwargs["tools"] = self._tools
528
- kwargs["tool_choice"] = "auto"
529
- response = self._client.responses.create(**kwargs)
561
+ try:
562
+ self._build_input(
563
+ content=content,
564
+ files=(ensure_list(files) if files else None),
565
+ use_vector_store=use_vector_store,
566
+ )
530
567
 
531
- if not response.output:
532
- log("No output returned from OpenAI.", level=logging.ERROR)
533
- raise RuntimeError("No output returned from OpenAI.")
568
+ kwargs = {
569
+ "input": self.messages.to_openai_payload(),
570
+ "model": self._model,
571
+ }
572
+ if not self._tools and self._output_structure is not None:
573
+ kwargs["text"] = self._output_structure.response_format()
534
574
 
535
- for response_output in response.output:
536
- if isinstance(response_output, ResponseFunctionToolCall):
537
- log(
538
- f"Tool call detected. Executing {response_output.name}.",
539
- level=logging.INFO,
540
- )
575
+ if self._tools:
576
+ kwargs["tools"] = self._tools
577
+ kwargs["tool_choice"] = "auto"
578
+ response = self._client.responses.create(**kwargs)
541
579
 
542
- tool_name = response_output.name
543
- handler = self._tool_handlers.get(tool_name)
580
+ if not response.output:
581
+ log("No output returned from OpenAI.", level=logging.ERROR)
582
+ raise RuntimeError("No output returned from OpenAI.")
544
583
 
545
- if handler is None:
584
+ for response_output in response.output:
585
+ if isinstance(response_output, ResponseFunctionToolCall):
546
586
  log(
547
- f"No handler found for tool '{tool_name}'",
548
- level=logging.ERROR,
587
+ f"Tool call detected. Executing {response_output.name}.",
588
+ level=logging.INFO,
549
589
  )
550
- raise ValueError(f"No handler for tool: {tool_name}")
551
590
 
552
- try:
553
- if inspect.iscoroutinefunction(handler):
554
- tool_result_json = await handler(response_output)
555
- else:
556
- tool_result_json = handler(response_output)
557
- if isinstance(tool_result_json, str):
558
- tool_result = json.loads(tool_result_json)
559
- tool_output = tool_result_json
591
+ tool_name = response_output.name
592
+ registration = self._tool_handlers.get(tool_name)
593
+
594
+ if registration is None:
595
+ log(
596
+ f"No handler found for tool '{tool_name}'",
597
+ level=logging.ERROR,
598
+ )
599
+ raise ValueError(f"No handler for tool: {tool_name}")
600
+
601
+ handler = registration.handler
602
+ tool_spec = registration.tool_spec
603
+
604
+ try:
605
+ if inspect.iscoroutinefunction(handler):
606
+ tool_result_json = await handler(response_output)
607
+ else:
608
+ tool_result_json = handler(response_output)
609
+ if isinstance(tool_result_json, str):
610
+ tool_result = json.loads(tool_result_json)
611
+ tool_output = tool_result_json
612
+ else:
613
+ tool_result = coerce_jsonable(tool_result_json)
614
+ tool_output = json.dumps(tool_result, cls=customJSONEncoder)
615
+ self.messages.add_tool_message(
616
+ content=response_output, output=tool_output
617
+ )
618
+ self.save()
619
+ except Exception as exc:
620
+ log(
621
+ f"Error executing tool handler '{tool_name}': {exc}",
622
+ level=logging.ERROR,
623
+ exc=exc,
624
+ )
625
+ raise RuntimeError(
626
+ f"Error in tool handler '{tool_name}': {exc}"
627
+ )
628
+
629
+ if tool_spec is not None:
630
+ output_dict = tool_spec.output_structure.from_json(tool_result)
631
+ parsed_result = cast(T, output_dict)
632
+ elif self._output_structure:
633
+ output_dict = self._output_structure.from_json(tool_result)
634
+ parsed_result = output_dict
560
635
  else:
561
- tool_result = coerce_jsonable(tool_result_json)
562
- tool_output = json.dumps(tool_result, cls=customJSONEncoder)
563
- self.messages.add_tool_message(
564
- content=response_output, output=tool_output
636
+ print(tool_result)
637
+ parsed_result = cast(T, tool_result)
638
+
639
+ if isinstance(response_output, ResponseOutputMessage):
640
+ self.messages.add_assistant_message(
641
+ response_output, metadata=kwargs
565
642
  )
566
643
  self.save()
567
- except Exception as exc:
568
- log(
569
- f"Error executing tool handler '{tool_name}': {exc}",
570
- level=logging.ERROR,
571
- )
572
- raise RuntimeError(f"Error in tool handler '{tool_name}': {exc}")
573
-
574
- if self._output_structure:
575
- output_dict = self._output_structure.from_raw_input(tool_result)
576
- output_dict.console_print()
577
- parsed_result = output_dict
578
- else:
579
- print(tool_result)
580
- parsed_result = cast(T, tool_result)
581
-
582
- if isinstance(response_output, ResponseOutputMessage):
583
- self.messages.add_assistant_message(response_output, metadata=kwargs)
584
- self.save()
585
- if hasattr(response, "output_text") and response.output_text:
586
- raw_text = response.output_text
587
- log("No tool call. Parsing output_text.")
588
- try:
589
- output_dict = json.loads(raw_text)
590
- if self._output_structure:
591
- return self._output_structure.from_raw_input(output_dict)
592
- return output_dict
593
- except Exception:
594
- print(raw_text)
595
- if parsed_result is not None:
596
- return parsed_result
597
- return None
644
+ if hasattr(response, "output_text") and response.output_text:
645
+ raw_text = response.output_text
646
+ log("No tool call. Parsing output_text.")
647
+ try:
648
+ output_dict = json.loads(raw_text)
649
+ if self._output_structure:
650
+ return self._output_structure.from_json(output_dict)
651
+ return output_dict
652
+ except Exception:
653
+ print(raw_text)
654
+ if parsed_result is not None:
655
+ return parsed_result
656
+ return response.output_text
657
+ except Exception as exc:
658
+ try:
659
+ self.save_error(exc)
660
+ except Exception as save_exc:
661
+ log(
662
+ f"Failed to save error details for response {self.uuid}: {save_exc}",
663
+ level=logging.ERROR,
664
+ exc=save_exc,
665
+ )
666
+ log(
667
+ f"Error running response '{self._name}': {exc}",
668
+ level=logging.ERROR,
669
+ exc=exc,
670
+ )
671
+ raise
598
672
 
599
673
  def run_sync(
600
674
  self,
@@ -602,7 +676,7 @@ class ResponseBase(Generic[T]):
602
676
  *,
603
677
  files: str | list[str] | None = None,
604
678
  use_vector_store: bool = False,
605
- ) -> T | None:
679
+ ) -> T | str:
606
680
  """Execute run_async synchronously with proper event loop handling.
607
681
 
608
682
  Automatically detects if an event loop is already running and uses
@@ -627,8 +701,8 @@ class ResponseBase(Generic[T]):
627
701
 
628
702
  Returns
629
703
  -------
630
- T or None
631
- Parsed response object of type output_structure, or None.
704
+ T or str
705
+ Parsed response object of type output_structure, or raw string.
632
706
 
633
707
  Raises
634
708
  ------
@@ -654,7 +728,7 @@ class ResponseBase(Generic[T]):
654
728
  ... )
655
729
  """
656
730
 
657
- async def runner() -> T | None:
731
+ async def runner() -> T | str:
658
732
  return await self.run_async(
659
733
  content=content,
660
734
  files=files,
@@ -665,7 +739,7 @@ class ResponseBase(Generic[T]):
665
739
  asyncio.get_running_loop()
666
740
  except RuntimeError:
667
741
  return asyncio.run(runner())
668
- result: T | None = None
742
+ result: T | str = ""
669
743
 
670
744
  def _thread_func() -> None:
671
745
  nonlocal result
@@ -676,65 +750,6 @@ class ResponseBase(Generic[T]):
676
750
  thread.join()
677
751
  return result
678
752
 
679
- def run_streamed(
680
- self,
681
- content: str | list[str],
682
- *,
683
- files: str | list[str] | None = None,
684
- use_vector_store: bool = False,
685
- ) -> T | None:
686
- """Execute run_async and await the result.
687
-
688
- Streaming responses are not yet fully supported, so this method
689
- simply awaits run_async to provide API compatibility with agent
690
- interfaces.
691
-
692
- Automatically detects file types:
693
- - Images are sent as base64-encoded images
694
- - Documents are sent as base64-encoded files (default)
695
- - Documents can optionally use vector stores for RAG
696
-
697
- Parameters
698
- ----------
699
- content : str or list[str]
700
- Prompt text or list of prompt texts to send.
701
- files : str, list[str], or None, default None
702
- Optional file path or list of file paths. Each file is
703
- automatically processed based on its type.
704
- use_vector_store : bool, default False
705
- If True, non-image files are uploaded to a vector store
706
- for RAG-enabled search instead of inline base64 encoding.
707
-
708
- Returns
709
- -------
710
- T or None
711
- Parsed response object of type output_structure, or None.
712
-
713
- Raises
714
- ------
715
- RuntimeError
716
- If the API returns no output.
717
- If a tool handler raises an exception.
718
- ValueError
719
- If the API invokes a tool with no registered handler.
720
-
721
- Notes
722
- -----
723
- This method exists for API consistency but does not currently
724
- provide true streaming functionality.
725
-
726
- Examples
727
- --------
728
- >>> result = response.run_streamed("Analyze these files")
729
- """
730
- return asyncio.run(
731
- self.run_async(
732
- content=content,
733
- files=files,
734
- use_vector_store=use_vector_store,
735
- )
736
- )
737
-
738
753
  def get_last_tool_message(self) -> ResponseMessage | None:
739
754
  """Return the most recent tool message from conversation history.
740
755
 
@@ -874,6 +889,36 @@ class ResponseBase(Generic[T]):
874
889
  self.messages.to_json_file(str(checked))
875
890
  log(f"Saved messages to {target}")
876
891
 
892
+ def save_error(self, exc: BaseException) -> Path:
893
+ """Persist error details to a file named with the response UUID.
894
+
895
+ Parameters
896
+ ----------
897
+ exc : BaseException
898
+ Exception instance to serialize.
899
+
900
+ Returns
901
+ -------
902
+ Path
903
+ Path to the error file written to disk.
904
+
905
+ Examples
906
+ --------
907
+ >>> try:
908
+ ... response.run_sync("trigger error")
909
+ ... except Exception as exc:
910
+ ... response.save_error(exc)
911
+ """
912
+ error_text = "".join(
913
+ traceback.format_exception(type(exc), exc, exc.__traceback__)
914
+ )
915
+ filename = f"{str(self.uuid).lower()}_error.txt"
916
+ target = self._data_path / self._name / filename
917
+ checked = check_filepath(filepath=target)
918
+ checked.write_text(error_text, encoding="utf-8")
919
+ log(f"Saved error details to {checked}")
920
+ return checked
921
+
877
922
  def __repr__(self) -> str:
878
923
  """Return a detailed string representation of the response session.
879
924
 
@@ -945,7 +990,19 @@ class ResponseBase(Generic[T]):
945
990
  f"Files API cleanup: {successful}/{len(cleanup_results)} files deleted"
946
991
  )
947
992
  except Exception as exc:
948
- log(f"Error cleaning up Files API uploads: {exc}", level=logging.WARNING)
993
+ try:
994
+ self.save_error(exc)
995
+ except Exception as save_exc:
996
+ log(
997
+ f"Failed to save error details for response {self.uuid}: {save_exc}",
998
+ level=logging.ERROR,
999
+ exc=save_exc,
1000
+ )
1001
+ log(
1002
+ f"Error cleaning up Files API uploads: {exc}",
1003
+ level=logging.WARNING,
1004
+ exc=exc,
1005
+ )
949
1006
 
950
1007
  # Always clean user vector storage if it exists
951
1008
  try:
@@ -953,6 +1010,18 @@ class ResponseBase(Generic[T]):
953
1010
  self._user_vector_storage.delete()
954
1011
  log("User vector store deleted.")
955
1012
  except Exception as exc:
956
- log(f"Error deleting user vector store: {exc}", level=logging.WARNING)
1013
+ try:
1014
+ self.save_error(exc)
1015
+ except Exception as save_exc:
1016
+ log(
1017
+ f"Failed to save error details for response {self.uuid}: {save_exc}",
1018
+ level=logging.ERROR,
1019
+ exc=save_exc,
1020
+ )
1021
+ log(
1022
+ f"Error deleting user vector store: {exc}",
1023
+ level=logging.WARNING,
1024
+ exc=exc,
1025
+ )
957
1026
  # System vector store cleanup is now handled via tool configuration
958
1027
  log(f"Session {self.uuid} closed.")