openai-sdk-helpers 0.0.8__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. openai_sdk_helpers/__init__.py +66 -2
  2. openai_sdk_helpers/agent/__init__.py +8 -4
  3. openai_sdk_helpers/agent/base.py +80 -45
  4. openai_sdk_helpers/agent/config.py +6 -4
  5. openai_sdk_helpers/agent/{project_manager.py → coordination.py} +29 -45
  6. openai_sdk_helpers/agent/prompt_utils.py +7 -1
  7. openai_sdk_helpers/agent/runner.py +67 -141
  8. openai_sdk_helpers/agent/search/__init__.py +33 -0
  9. openai_sdk_helpers/agent/search/base.py +297 -0
  10. openai_sdk_helpers/agent/{vector_search.py → search/vector.py} +89 -157
  11. openai_sdk_helpers/agent/{web_search.py → search/web.py} +77 -156
  12. openai_sdk_helpers/agent/summarizer.py +29 -8
  13. openai_sdk_helpers/agent/translator.py +40 -13
  14. openai_sdk_helpers/agent/validation.py +32 -8
  15. openai_sdk_helpers/async_utils.py +132 -0
  16. openai_sdk_helpers/config.py +74 -36
  17. openai_sdk_helpers/context_manager.py +241 -0
  18. openai_sdk_helpers/enums/__init__.py +9 -1
  19. openai_sdk_helpers/enums/base.py +67 -8
  20. openai_sdk_helpers/environment.py +33 -6
  21. openai_sdk_helpers/errors.py +133 -0
  22. openai_sdk_helpers/logging_config.py +105 -0
  23. openai_sdk_helpers/prompt/__init__.py +10 -71
  24. openai_sdk_helpers/prompt/base.py +172 -0
  25. openai_sdk_helpers/response/__init__.py +35 -3
  26. openai_sdk_helpers/response/base.py +363 -210
  27. openai_sdk_helpers/response/config.py +176 -0
  28. openai_sdk_helpers/response/messages.py +56 -40
  29. openai_sdk_helpers/response/runner.py +77 -33
  30. openai_sdk_helpers/response/tool_call.py +49 -25
  31. openai_sdk_helpers/response/vector_store.py +27 -14
  32. openai_sdk_helpers/retry.py +175 -0
  33. openai_sdk_helpers/streamlit_app/__init__.py +19 -2
  34. openai_sdk_helpers/streamlit_app/app.py +114 -39
  35. openai_sdk_helpers/streamlit_app/config.py +502 -0
  36. openai_sdk_helpers/streamlit_app/streamlit_web_search.py +5 -6
  37. openai_sdk_helpers/structure/__init__.py +69 -3
  38. openai_sdk_helpers/structure/agent_blueprint.py +82 -19
  39. openai_sdk_helpers/structure/base.py +208 -93
  40. openai_sdk_helpers/structure/plan/__init__.py +15 -1
  41. openai_sdk_helpers/structure/plan/enum.py +41 -5
  42. openai_sdk_helpers/structure/plan/plan.py +101 -45
  43. openai_sdk_helpers/structure/plan/task.py +38 -6
  44. openai_sdk_helpers/structure/prompt.py +21 -2
  45. openai_sdk_helpers/structure/responses.py +52 -11
  46. openai_sdk_helpers/structure/summary.py +55 -7
  47. openai_sdk_helpers/structure/validation.py +34 -6
  48. openai_sdk_helpers/structure/vector_search.py +132 -18
  49. openai_sdk_helpers/structure/web_search.py +125 -13
  50. openai_sdk_helpers/types.py +57 -0
  51. openai_sdk_helpers/utils/__init__.py +30 -1
  52. openai_sdk_helpers/utils/core.py +168 -34
  53. openai_sdk_helpers/validation.py +302 -0
  54. openai_sdk_helpers/vector_storage/__init__.py +21 -1
  55. openai_sdk_helpers/vector_storage/cleanup.py +25 -13
  56. openai_sdk_helpers/vector_storage/storage.py +123 -64
  57. openai_sdk_helpers/vector_storage/types.py +20 -19
  58. openai_sdk_helpers-0.0.9.dist-info/METADATA +550 -0
  59. openai_sdk_helpers-0.0.9.dist-info/RECORD +66 -0
  60. openai_sdk_helpers/streamlit_app/configuration.py +0 -324
  61. openai_sdk_helpers-0.0.8.dist-info/METADATA +0 -194
  62. openai_sdk_helpers-0.0.8.dist-info/RECORD +0 -55
  63. {openai_sdk_helpers-0.0.8.dist-info → openai_sdk_helpers-0.0.9.dist-info}/WHEEL +0 -0
  64. {openai_sdk_helpers-0.0.8.dist-info → openai_sdk_helpers-0.0.9.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,176 @@
1
+ """Module defining the ResponseConfiguration dataclass for managing OpenAI SDK responses."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+ from typing import Generic, Optional, Sequence, Type, TypeVar
8
+ from openai.types.responses.response_text_config_param import ResponseTextConfigParam
9
+
10
+ from ..config import OpenAISettings
11
+ from ..structure.base import BaseStructure
12
+ from ..response.base import BaseResponse, ToolHandler
13
+
14
+ TIn = TypeVar("TIn", bound="BaseStructure")
15
+ TOut = TypeVar("TOut", bound="BaseStructure")
16
+
17
+
18
+ @dataclass(frozen=True, slots=True)
19
+ class ResponseConfiguration(Generic[TIn, TOut]):
20
+ """
21
+ Represent an immutable configuration describing input and output structures.
22
+
23
+ Encapsulate all metadata required to define how a request is interpreted and
24
+ how a response is structured, while enforcing strict type and runtime safety.
25
+
26
+ Parameters
27
+ ----------
28
+ name : str
29
+ Unique configuration identifier. Must be a non-empty string.
30
+ instructions : str or Path
31
+ Plain text instructions or a path to a Jinja template file whose
32
+ contents are loaded at runtime.
33
+ tools : Sequence[object], optional
34
+ Tool definitions associated with the configuration. Default is None.
35
+ input_structure : Type[BaseStructure], optional
36
+ Structure class used to parse or validate input. Must subclass
37
+ BaseStructure. Default is None.
38
+ output_structure : Type[BaseStructure], optional
39
+ Structure class used to format or validate output. Schema is
40
+ automatically generated from this structure. Must subclass
41
+ BaseStructure. Default is None.
42
+
43
+ Raises
44
+ ------
45
+ TypeError
46
+ If name is not a non-empty string.
47
+ If instructions is not a string or Path.
48
+ If tools is provided and is not a sequence.
49
+ If input_structure or output_structure is not a class.
50
+ If input_structure or output_structure does not subclass BaseStructure.
51
+ ValueError
52
+ If instructions is a string that is empty or only whitespace.
53
+ FileNotFoundError
54
+ If instructions is a Path that does not point to a readable file.
55
+
56
+ Methods
57
+ -------
58
+ __post_init__()
59
+ Validate configuration invariants and enforce BaseStructure subclassing.
60
+ instructions_text
61
+ Return the resolved instruction content as a string.
62
+
63
+ Examples
64
+ --------
65
+ >>> config = Configuration(
66
+ ... name="targeting_to_plan",
67
+ ... tools=None,
68
+ ... input_structure=PromptStructure,
69
+ ... output_structure=WebSearchStructure,
70
+ ... )
71
+ >>> config.name
72
+ 'prompt_to_websearch'
73
+ """
74
+
75
+ name: str
76
+ instructions: str | Path
77
+ tools: Optional[list]
78
+ input_structure: Optional[Type[TIn]]
79
+ output_structure: Optional[Type[TOut]]
80
+
81
+ def __post_init__(self) -> None:
82
+ """
83
+ Validate configuration invariants after initialization.
84
+
85
+ Enforce non-empty naming, correct typing of structures, and ensure that
86
+ any declared structure subclasses BaseStructure.
87
+
88
+ Raises
89
+ ------
90
+ TypeError
91
+ If name is not a non-empty string.
92
+ If tools is provided and is not a sequence.
93
+ If input_structure or output_structure is not a class.
94
+ If input_structure or output_structure does not subclass BaseStructure.
95
+ """
96
+ if not self.name or not isinstance(self.name, str):
97
+ raise TypeError("Configuration.name must be a non-empty str")
98
+
99
+ instructions_value = self.instructions
100
+ if isinstance(instructions_value, str):
101
+ if not instructions_value.strip():
102
+ raise ValueError("Configuration.instructions must be a non-empty str")
103
+ elif isinstance(instructions_value, Path):
104
+ instruction_path = instructions_value.expanduser()
105
+ if not instruction_path.is_file():
106
+ raise FileNotFoundError(
107
+ f"Instruction template not found: {instruction_path}"
108
+ )
109
+ else:
110
+ raise TypeError("Configuration.instructions must be a str or Path")
111
+
112
+ for attr in ("input_structure", "output_structure"):
113
+ cls = getattr(self, attr)
114
+ if cls is None:
115
+ continue
116
+ if not isinstance(cls, type):
117
+ raise TypeError(
118
+ f"Configuration.{attr} must be a class (Type[BaseStructure]) or None"
119
+ )
120
+ if not issubclass(cls, BaseStructure):
121
+ raise TypeError(f"Configuration.{attr} must subclass BaseStructure")
122
+
123
+ if self.tools is not None and not isinstance(self.tools, Sequence):
124
+ raise TypeError("Configuration.tools must be a Sequence or None")
125
+
126
+ @property
127
+ def instructions_text(self) -> str:
128
+ """Return the resolved instruction text.
129
+
130
+ Returns
131
+ -------
132
+ str
133
+ Plain-text instructions, loading template files when necessary.
134
+ """
135
+ return self._resolve_instructions()
136
+
137
+ def _resolve_instructions(self) -> str:
138
+ if isinstance(self.instructions, Path):
139
+ instruction_path = self.instructions.expanduser()
140
+ try:
141
+ return instruction_path.read_text(encoding="utf-8")
142
+ except OSError as exc:
143
+ raise ValueError(
144
+ f"Unable to read instructions at '{instruction_path}': {exc}"
145
+ ) from exc
146
+ return self.instructions
147
+
148
+ def gen_response(
149
+ self,
150
+ openai_settings: OpenAISettings,
151
+ tool_handlers: dict[str, ToolHandler] = {},
152
+ ) -> BaseResponse[TOut]:
153
+ """Generate a BaseResponse instance based on the configuration.
154
+
155
+ Parameters
156
+ ----------
157
+ openai_settings : OpenAISettings
158
+ Authentication and model settings applied to the generated
159
+ :class:`BaseResponse`.
160
+ tool_handlers : dict[str, Callable], optional
161
+ Mapping of tool names to handler callables. Defaults to an empty
162
+ dictionary when not provided.
163
+
164
+ Returns
165
+ -------
166
+ BaseResponse[TOut]
167
+ An instance of BaseResponse configured with ``openai_settings``.
168
+ """
169
+ return BaseResponse[TOut](
170
+ name=self.name,
171
+ instructions=self.instructions_text,
172
+ tools=self.tools,
173
+ output_structure=self.output_structure,
174
+ tool_handlers=tool_handlers,
175
+ openai_settings=openai_settings,
176
+ )
@@ -1,10 +1,15 @@
1
- """Message containers for shared OpenAI responses."""
1
+ """Message containers for OpenAI response conversations.
2
+
3
+ This module provides dataclasses for managing conversation history including
4
+ user inputs, assistant outputs, system messages, and tool calls. Messages are
5
+ stored with timestamps and metadata, and can be serialized to JSON.
6
+ """
2
7
 
3
8
  from __future__ import annotations
4
9
 
5
10
  from dataclasses import dataclass, field
6
11
  from datetime import datetime, timezone
7
- from typing import Dict, List, Union, cast
12
+ from typing import cast
8
13
 
9
14
  from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
10
15
  from openai.types.responses.response_function_tool_call_param import (
@@ -25,12 +30,26 @@ from .tool_call import ResponseToolCall
25
30
 
26
31
  @dataclass
27
32
  class ResponseMessage(JSONSerializable):
28
- """Single message exchanged with the OpenAI client.
33
+ """Single message exchanged with the OpenAI API.
34
+
35
+ Represents a complete message with role, content, timestamp, and
36
+ optional metadata. Can be serialized to JSON for persistence.
37
+
38
+ Attributes
39
+ ----------
40
+ role : str
41
+ Message role: "user", "assistant", "tool", or "system".
42
+ content : ResponseInputItemParam | ResponseOutputMessage | ResponseFunctionToolCallParam | FunctionCallOutput | ResponseInputMessageContentListParam
43
+ Message content in OpenAI format.
44
+ timestamp : datetime
45
+ UTC timestamp when the message was created.
46
+ metadata : dict[str, str | float | bool]
47
+ Optional metadata for tracking or debugging.
29
48
 
30
49
  Methods
31
50
  -------
32
51
  to_openai_format()
33
- Return the payload in the format expected by the OpenAI client.
52
+ Return the message content in OpenAI API format.
34
53
  """
35
54
 
36
55
  role: str # "user", "assistant", "tool", etc.
@@ -42,7 +61,7 @@ class ResponseMessage(JSONSerializable):
42
61
  | ResponseInputMessageContentListParam
43
62
  )
44
63
  timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
45
- metadata: Dict[str, Union[str, float, bool]] = field(default_factory=dict)
64
+ metadata: dict[str, str | float | bool] = field(default_factory=dict)
46
65
 
47
66
  def to_openai_format(
48
67
  self,
@@ -65,10 +84,16 @@ class ResponseMessage(JSONSerializable):
65
84
 
66
85
  @dataclass
67
86
  class ResponseMessages(JSONSerializable):
68
- """Represent a collection of messages in a response.
87
+ """Collection of messages in a conversation.
88
+
89
+ Manages the complete history of messages exchanged during an OpenAI
90
+ API interaction. Provides methods for adding different message types
91
+ and converting to formats required by the OpenAI API.
69
92
 
70
- This dataclass encapsulates user inputs and assistant outputs during an
71
- OpenAI API interaction.
93
+ Attributes
94
+ ----------
95
+ messages : list[ResponseMessage]
96
+ Ordered list of all messages in the conversation.
72
97
 
73
98
  Methods
74
99
  -------
@@ -81,10 +106,16 @@ class ResponseMessages(JSONSerializable):
81
106
  add_tool_message(content, output, **metadata)
82
107
  Record a tool call and its output.
83
108
  to_openai_payload()
84
- Convert stored messages to the OpenAI input payload.
109
+ Convert stored messages to OpenAI input payload format.
110
+ get_last_assistant_message()
111
+ Return the most recent assistant message or None.
112
+ get_last_tool_message()
113
+ Return the most recent tool message or None.
114
+ get_last_user_message()
115
+ Return the most recent user message or None.
85
116
  """
86
117
 
87
- messages: List[ResponseMessage] = field(default_factory=list)
118
+ messages: list[ResponseMessage] = field(default_factory=list)
88
119
 
89
120
  def add_system_message(
90
121
  self, content: ResponseInputMessageContentListParam, **metadata
@@ -97,10 +128,6 @@ class ResponseMessages(JSONSerializable):
97
128
  System message content in OpenAI format.
98
129
  **metadata
99
130
  Optional metadata to store with the message.
100
-
101
- Returns
102
- -------
103
- None
104
131
  """
105
132
  response_input = cast(
106
133
  ResponseInputItemParam, {"role": "system", "content": content}
@@ -120,10 +147,6 @@ class ResponseMessages(JSONSerializable):
120
147
  Message payload supplied by the user.
121
148
  **metadata
122
149
  Optional metadata to store with the message.
123
-
124
- Returns
125
- -------
126
- None
127
150
  """
128
151
  self.messages.append(
129
152
  ResponseMessage(role="user", content=input_content, metadata=metadata)
@@ -132,20 +155,16 @@ class ResponseMessages(JSONSerializable):
132
155
  def add_assistant_message(
133
156
  self,
134
157
  content: ResponseOutputMessage,
135
- metadata: Dict[str, Union[str, float, bool]],
158
+ metadata: dict[str, str | float | bool],
136
159
  ) -> None:
137
160
  """Append an assistant message to the conversation.
138
161
 
139
162
  Parameters
140
163
  ----------
141
164
  content : ResponseOutputMessage
142
- Assistant response message.
143
- metadata : dict[str, Union[str, float, bool]]
165
+ Assistant response message from the OpenAI API.
166
+ metadata : dict[str, str | float | bool]
144
167
  Optional metadata to store with the message.
145
-
146
- Returns
147
- -------
148
- None
149
168
  """
150
169
  self.messages.append(
151
170
  ResponseMessage(role="assistant", content=content, metadata=metadata)
@@ -154,20 +173,16 @@ class ResponseMessages(JSONSerializable):
154
173
  def add_tool_message(
155
174
  self, content: ResponseFunctionToolCall, output: str, **metadata
156
175
  ) -> None:
157
- """Record a tool call and its output in the conversation history.
176
+ """Record a tool call and its output in the conversation.
158
177
 
159
178
  Parameters
160
179
  ----------
161
180
  content : ResponseFunctionToolCall
162
- Tool call received from OpenAI.
181
+ Tool call received from the OpenAI API.
163
182
  output : str
164
- JSON string returned by the executed tool.
183
+ JSON string returned by the executed tool handler.
165
184
  **metadata
166
185
  Optional metadata to store with the message.
167
-
168
- Returns
169
- -------
170
- None
171
186
  """
172
187
  tool_call = ResponseToolCall(
173
188
  call_id=content.call_id,
@@ -187,24 +202,25 @@ class ResponseMessages(JSONSerializable):
187
202
 
188
203
  def to_openai_payload(
189
204
  self,
190
- ) -> List[
205
+ ) -> list[
191
206
  ResponseInputItemParam
192
207
  | ResponseOutputMessage
193
208
  | ResponseFunctionToolCallParam
194
209
  | FunctionCallOutput
195
210
  | ResponseInputMessageContentListParam
196
211
  ]:
197
- """Convert stored messages to the input payload expected by OpenAI.
198
-
199
- Notes
200
- -----
201
- Assistant messages are model outputs and are not included in the
202
- next request's input payload.
212
+ """Convert stored messages to OpenAI API input format.
203
213
 
204
214
  Returns
205
215
  -------
206
216
  list
207
- List of message payloads excluding assistant outputs.
217
+ List of message payloads suitable for the OpenAI API.
218
+ Assistant messages are excluded as they are outputs, not inputs.
219
+
220
+ Notes
221
+ -----
222
+ Assistant messages are not included in the returned payload since
223
+ they represent model outputs rather than inputs for the next request.
208
224
  """
209
225
  return [
210
226
  msg.to_openai_format() for msg in self.messages if msg.role != "assistant"
@@ -1,10 +1,14 @@
1
- """Convenience runners for response workflows."""
1
+ """Convenience functions for executing response workflows.
2
+
3
+ This module provides high-level functions that handle the complete lifecycle
4
+ of response workflows including instantiation, execution, and resource cleanup.
5
+ They simplify common usage patterns for both synchronous and asynchronous contexts.
6
+ """
2
7
 
3
8
  from __future__ import annotations
4
9
 
5
10
  import asyncio
6
-
7
- from typing import Any, Optional, Type, TypeVar
11
+ from typing import Any, TypeVar
8
12
 
9
13
  from .base import BaseResponse
10
14
 
@@ -13,26 +17,39 @@ R = TypeVar("R", bound=BaseResponse[Any])
13
17
 
14
18
 
15
19
  def run_sync(
16
- response_cls: Type[R],
20
+ response_cls: type[R],
17
21
  *,
18
22
  content: str,
19
- response_kwargs: Optional[dict[str, Any]] = None,
23
+ response_kwargs: dict[str, Any] | None = None,
20
24
  ) -> Any:
21
- """Run a response workflow synchronously and close resources.
25
+ """Execute a response workflow synchronously with automatic cleanup.
26
+
27
+ Instantiates the response class, executes run_sync with the provided
28
+ content, and ensures cleanup occurs even if an exception is raised.
22
29
 
23
30
  Parameters
24
31
  ----------
25
- response_cls
26
- Response class to instantiate.
27
- content
32
+ response_cls : type[BaseResponse]
33
+ Response class to instantiate for the workflow.
34
+ content : str
28
35
  Prompt text to send to the OpenAI API.
29
- response_kwargs
30
- Keyword arguments forwarded to ``response_cls``. Default ``None``.
36
+ response_kwargs : dict[str, Any] or None, default None
37
+ Optional keyword arguments forwarded to response_cls constructor.
31
38
 
32
39
  Returns
33
40
  -------
34
41
  Any
35
- Parsed response from :meth:`BaseResponse.run_response`.
42
+ Parsed response from BaseResponse.run_sync, typically a structured
43
+ output or None.
44
+
45
+ Examples
46
+ --------
47
+ >>> from openai_sdk_helpers.response import run_sync
48
+ >>> result = run_sync(
49
+ ... MyResponse,
50
+ ... content="Analyze this text",
51
+ ... response_kwargs={"openai_settings": settings}
52
+ ... )
36
53
  """
37
54
  response = response_cls(**(response_kwargs or {}))
38
55
  try:
@@ -42,26 +59,39 @@ def run_sync(
42
59
 
43
60
 
44
61
  async def run_async(
45
- response_cls: Type[R],
62
+ response_cls: type[R],
46
63
  *,
47
64
  content: str,
48
- response_kwargs: Optional[dict[str, Any]] = None,
65
+ response_kwargs: dict[str, Any] | None = None,
49
66
  ) -> Any:
50
- """Run a response workflow asynchronously and close resources.
67
+ """Execute a response workflow asynchronously with automatic cleanup.
68
+
69
+ Instantiates the response class, executes run_async with the provided
70
+ content, and ensures cleanup occurs even if an exception is raised.
51
71
 
52
72
  Parameters
53
73
  ----------
54
- response_cls
55
- Response class to instantiate.
56
- content
74
+ response_cls : type[BaseResponse]
75
+ Response class to instantiate for the workflow.
76
+ content : str
57
77
  Prompt text to send to the OpenAI API.
58
- response_kwargs
59
- Keyword arguments forwarded to ``response_cls``. Default ``None``.
78
+ response_kwargs : dict[str, Any] or None, default None
79
+ Optional keyword arguments forwarded to response_cls constructor.
60
80
 
61
81
  Returns
62
82
  -------
63
83
  Any
64
- Parsed response from :meth:`BaseResponse.run_response_async`.
84
+ Parsed response from BaseResponse.run_async, typically a structured
85
+ output or None.
86
+
87
+ Examples
88
+ --------
89
+ >>> from openai_sdk_helpers.response import run_async
90
+ >>> result = await run_async(
91
+ ... MyResponse,
92
+ ... content="Summarize this document",
93
+ ... response_kwargs={"openai_settings": settings}
94
+ ... )
65
95
  """
66
96
  response = response_cls(**(response_kwargs or {}))
67
97
  try:
@@ -71,30 +101,44 @@ async def run_async(
71
101
 
72
102
 
73
103
  def run_streamed(
74
- response_cls: Type[R],
104
+ response_cls: type[R],
75
105
  *,
76
106
  content: str,
77
- response_kwargs: Optional[dict[str, Any]] = None,
107
+ response_kwargs: dict[str, Any] | None = None,
78
108
  ) -> Any:
79
- """Run a response workflow and return the asynchronous result.
109
+ """Execute a response workflow and return the awaited result.
80
110
 
81
- This mirrors the agent API for discoverability. Streaming responses are not
82
- currently supported by :class:`BaseResponse`, so this returns the same value
83
- as :func:`run_async`.
111
+ Provides API compatibility with agent interfaces. Streaming responses
112
+ are not currently fully supported, so this executes run_async and
113
+ awaits the result.
84
114
 
85
115
  Parameters
86
116
  ----------
87
- response_cls
88
- Response class to instantiate.
89
- content
117
+ response_cls : type[BaseResponse]
118
+ Response class to instantiate for the workflow.
119
+ content : str
90
120
  Prompt text to send to the OpenAI API.
91
- response_kwargs
92
- Keyword arguments forwarded to ``response_cls``. Default ``None``.
121
+ response_kwargs : dict[str, Any] or None, default None
122
+ Optional keyword arguments forwarded to response_cls constructor.
93
123
 
94
124
  Returns
95
125
  -------
96
126
  Any
97
- Parsed response returned from :func:`run_async`.
127
+ Parsed response from run_async, typically a structured output or None.
128
+
129
+ Notes
130
+ -----
131
+ This function exists for API consistency but does not currently provide
132
+ true streaming functionality.
133
+
134
+ Examples
135
+ --------
136
+ >>> from openai_sdk_helpers.response import run_streamed
137
+ >>> result = run_streamed(
138
+ ... MyResponse,
139
+ ... content="Process this text",
140
+ ... response_kwargs={"openai_settings": settings}
141
+ ... )
98
142
  """
99
143
  return asyncio.run(
100
144
  run_async(response_cls, content=content, response_kwargs=response_kwargs)
@@ -1,11 +1,15 @@
1
- """Tool call representation for shared responses."""
1
+ """Tool call representation and argument parsing.
2
+
3
+ This module provides data structures and utilities for managing tool calls
4
+ in OpenAI response conversations, including conversion to OpenAI API formats
5
+ and robust argument parsing.
6
+ """
2
7
 
3
8
  from __future__ import annotations
4
9
 
5
- from dataclasses import dataclass
6
- from typing import Tuple
7
- import json
8
10
  import ast
11
+ import json
12
+ from dataclasses import dataclass
9
13
 
10
14
  from openai.types.responses.response_function_tool_call_param import (
11
15
  ResponseFunctionToolCallParam,
@@ -15,23 +19,27 @@ from openai.types.responses.response_input_param import FunctionCallOutput
15
19
 
16
20
  @dataclass
17
21
  class ResponseToolCall:
18
- """Container for tool call data used in a conversation.
22
+ """Container for tool call data in a conversation.
23
+
24
+ Stores the complete information about a tool invocation including
25
+ the call identifier, tool name, input arguments, and execution output.
26
+ Can convert to OpenAI API format for use in subsequent requests.
19
27
 
20
28
  Attributes
21
29
  ----------
22
30
  call_id : str
23
- Identifier of the tool call.
31
+ Unique identifier for this tool call.
24
32
  name : str
25
- Name of the tool invoked.
33
+ Name of the tool that was invoked.
26
34
  arguments : str
27
- JSON string with the arguments passed to the tool.
35
+ JSON string containing the arguments passed to the tool.
28
36
  output : str
29
- JSON string representing the result produced by the tool.
37
+ JSON string representing the result produced by the tool handler.
30
38
 
31
39
  Methods
32
40
  -------
33
41
  to_response_input_item_param()
34
- Convert stored data into OpenAI tool call objects.
42
+ Convert to OpenAI API tool call format.
35
43
  """
36
44
 
37
45
  call_id: str
@@ -41,14 +49,28 @@ class ResponseToolCall:
41
49
 
42
50
  def to_response_input_item_param(
43
51
  self,
44
- ) -> Tuple[ResponseFunctionToolCallParam, FunctionCallOutput]:
45
- """Convert stored data into OpenAI tool call objects.
52
+ ) -> tuple[ResponseFunctionToolCallParam, FunctionCallOutput]:
53
+ """Convert stored data into OpenAI API tool call objects.
54
+
55
+ Creates the function call parameter and corresponding output object
56
+ required by the OpenAI API for tool interaction.
46
57
 
47
58
  Returns
48
59
  -------
49
60
  tuple[ResponseFunctionToolCallParam, FunctionCallOutput]
50
- The function call object and the corresponding output object
51
- suitable for inclusion in an OpenAI request.
61
+ A two-element tuple containing:
62
+ - ResponseFunctionToolCallParam: The function call representation
63
+ - FunctionCallOutput: The function output representation
64
+
65
+ Examples
66
+ --------
67
+ >>> tool_call = ResponseToolCall(
68
+ ... call_id="call_123",
69
+ ... name="search",
70
+ ... arguments='{"query": "test"}',
71
+ ... output='{"results": []}'
72
+ ... )
73
+ >>> func_call, func_output = tool_call.to_response_input_item_param()
52
74
  """
53
75
  from typing import cast
54
76
 
@@ -73,32 +95,34 @@ class ResponseToolCall:
73
95
 
74
96
 
75
97
  def parse_tool_arguments(arguments: str) -> dict:
76
- """Parse tool call arguments which may not be valid JSON.
98
+ """Parse tool call arguments with fallback for malformed JSON.
77
99
 
78
- The OpenAI API is expected to return well-formed JSON for tool arguments,
79
- but minor formatting issues (such as the use of single quotes) can occur.
80
- This helper first tries ``json.loads`` and falls back to
81
- ``ast.literal_eval`` for simple cases.
100
+ Attempts to parse arguments as JSON first, then falls back to
101
+ ast.literal_eval for cases where the OpenAI API returns minor
102
+ formatting issues like single quotes instead of double quotes.
82
103
 
83
104
  Parameters
84
105
  ----------
85
- arguments
86
- Raw argument string from the tool call.
106
+ arguments : str
107
+ Raw argument string from a tool call, expected to be JSON.
87
108
 
88
109
  Returns
89
110
  -------
90
111
  dict
91
- Parsed dictionary of arguments.
112
+ Parsed dictionary of tool arguments.
92
113
 
93
114
  Raises
94
115
  ------
95
116
  ValueError
96
- If the arguments cannot be parsed as JSON.
117
+ If the arguments cannot be parsed as valid JSON or Python literal.
97
118
 
98
119
  Examples
99
120
  --------
100
- >>> parse_tool_arguments('{"key": "value"}')["key"]
101
- 'value'
121
+ >>> parse_tool_arguments('{"key": "value"}')
122
+ {'key': 'value'}
123
+
124
+ >>> parse_tool_arguments("{'key': 'value'}")
125
+ {'key': 'value'}
102
126
  """
103
127
  try:
104
128
  return json.loads(arguments)