openai-sdk-helpers 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +85 -10
- openai_sdk_helpers/agent/__init__.py +8 -4
- openai_sdk_helpers/agent/base.py +81 -46
- openai_sdk_helpers/agent/config.py +6 -4
- openai_sdk_helpers/agent/{project_manager.py → coordination.py} +29 -45
- openai_sdk_helpers/agent/prompt_utils.py +7 -1
- openai_sdk_helpers/agent/runner.py +67 -141
- openai_sdk_helpers/agent/search/__init__.py +33 -0
- openai_sdk_helpers/agent/search/base.py +297 -0
- openai_sdk_helpers/agent/{vector_search.py → search/vector.py} +89 -157
- openai_sdk_helpers/agent/{web_search.py → search/web.py} +82 -162
- openai_sdk_helpers/agent/summarizer.py +29 -8
- openai_sdk_helpers/agent/translator.py +40 -13
- openai_sdk_helpers/agent/validation.py +32 -8
- openai_sdk_helpers/async_utils.py +132 -0
- openai_sdk_helpers/config.py +74 -36
- openai_sdk_helpers/context_manager.py +241 -0
- openai_sdk_helpers/enums/__init__.py +9 -1
- openai_sdk_helpers/enums/base.py +67 -8
- openai_sdk_helpers/environment.py +33 -6
- openai_sdk_helpers/errors.py +133 -0
- openai_sdk_helpers/logging_config.py +105 -0
- openai_sdk_helpers/prompt/__init__.py +10 -71
- openai_sdk_helpers/prompt/base.py +172 -0
- openai_sdk_helpers/response/__init__.py +37 -5
- openai_sdk_helpers/response/base.py +427 -189
- openai_sdk_helpers/response/config.py +176 -0
- openai_sdk_helpers/response/messages.py +104 -40
- openai_sdk_helpers/response/runner.py +79 -35
- openai_sdk_helpers/response/tool_call.py +75 -12
- openai_sdk_helpers/response/vector_store.py +29 -16
- openai_sdk_helpers/retry.py +175 -0
- openai_sdk_helpers/streamlit_app/__init__.py +30 -0
- openai_sdk_helpers/streamlit_app/app.py +345 -0
- openai_sdk_helpers/streamlit_app/config.py +502 -0
- openai_sdk_helpers/streamlit_app/streamlit_web_search.py +68 -0
- openai_sdk_helpers/structure/__init__.py +69 -3
- openai_sdk_helpers/structure/agent_blueprint.py +82 -19
- openai_sdk_helpers/structure/base.py +245 -91
- openai_sdk_helpers/structure/plan/__init__.py +15 -1
- openai_sdk_helpers/structure/plan/enum.py +41 -5
- openai_sdk_helpers/structure/plan/plan.py +101 -45
- openai_sdk_helpers/structure/plan/task.py +38 -6
- openai_sdk_helpers/structure/prompt.py +21 -2
- openai_sdk_helpers/structure/responses.py +52 -11
- openai_sdk_helpers/structure/summary.py +55 -7
- openai_sdk_helpers/structure/validation.py +34 -6
- openai_sdk_helpers/structure/vector_search.py +132 -18
- openai_sdk_helpers/structure/web_search.py +128 -12
- openai_sdk_helpers/types.py +57 -0
- openai_sdk_helpers/utils/__init__.py +32 -1
- openai_sdk_helpers/utils/core.py +200 -32
- openai_sdk_helpers/validation.py +302 -0
- openai_sdk_helpers/vector_storage/__init__.py +21 -1
- openai_sdk_helpers/vector_storage/cleanup.py +25 -13
- openai_sdk_helpers/vector_storage/storage.py +124 -66
- openai_sdk_helpers/vector_storage/types.py +20 -19
- openai_sdk_helpers-0.0.9.dist-info/METADATA +550 -0
- openai_sdk_helpers-0.0.9.dist-info/RECORD +66 -0
- openai_sdk_helpers-0.0.7.dist-info/METADATA +0 -193
- openai_sdk_helpers-0.0.7.dist-info/RECORD +0 -51
- {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.9.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.9.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
"""Module defining the ResponseConfiguration dataclass for managing OpenAI SDK responses."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Generic, Optional, Sequence, Type, TypeVar
|
|
8
|
+
from openai.types.responses.response_text_config_param import ResponseTextConfigParam
|
|
9
|
+
|
|
10
|
+
from ..config import OpenAISettings
|
|
11
|
+
from ..structure.base import BaseStructure
|
|
12
|
+
from ..response.base import BaseResponse, ToolHandler
|
|
13
|
+
|
|
14
|
+
TIn = TypeVar("TIn", bound="BaseStructure")
|
|
15
|
+
TOut = TypeVar("TOut", bound="BaseStructure")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass(frozen=True, slots=True)
|
|
19
|
+
class ResponseConfiguration(Generic[TIn, TOut]):
|
|
20
|
+
"""
|
|
21
|
+
Represent an immutable configuration describing input and output structures.
|
|
22
|
+
|
|
23
|
+
Encapsulate all metadata required to define how a request is interpreted and
|
|
24
|
+
how a response is structured, while enforcing strict type and runtime safety.
|
|
25
|
+
|
|
26
|
+
Parameters
|
|
27
|
+
----------
|
|
28
|
+
name : str
|
|
29
|
+
Unique configuration identifier. Must be a non-empty string.
|
|
30
|
+
instructions : str or Path
|
|
31
|
+
Plain text instructions or a path to a Jinja template file whose
|
|
32
|
+
contents are loaded at runtime.
|
|
33
|
+
tools : Sequence[object], optional
|
|
34
|
+
Tool definitions associated with the configuration. Default is None.
|
|
35
|
+
input_structure : Type[BaseStructure], optional
|
|
36
|
+
Structure class used to parse or validate input. Must subclass
|
|
37
|
+
BaseStructure. Default is None.
|
|
38
|
+
output_structure : Type[BaseStructure], optional
|
|
39
|
+
Structure class used to format or validate output. Schema is
|
|
40
|
+
automatically generated from this structure. Must subclass
|
|
41
|
+
BaseStructure. Default is None.
|
|
42
|
+
|
|
43
|
+
Raises
|
|
44
|
+
------
|
|
45
|
+
TypeError
|
|
46
|
+
If name is not a non-empty string.
|
|
47
|
+
If instructions is not a string or Path.
|
|
48
|
+
If tools is provided and is not a sequence.
|
|
49
|
+
If input_structure or output_structure is not a class.
|
|
50
|
+
If input_structure or output_structure does not subclass BaseStructure.
|
|
51
|
+
ValueError
|
|
52
|
+
If instructions is a string that is empty or only whitespace.
|
|
53
|
+
FileNotFoundError
|
|
54
|
+
If instructions is a Path that does not point to a readable file.
|
|
55
|
+
|
|
56
|
+
Methods
|
|
57
|
+
-------
|
|
58
|
+
__post_init__()
|
|
59
|
+
Validate configuration invariants and enforce BaseStructure subclassing.
|
|
60
|
+
instructions_text
|
|
61
|
+
Return the resolved instruction content as a string.
|
|
62
|
+
|
|
63
|
+
Examples
|
|
64
|
+
--------
|
|
65
|
+
>>> config = Configuration(
|
|
66
|
+
... name="targeting_to_plan",
|
|
67
|
+
... tools=None,
|
|
68
|
+
... input_structure=PromptStructure,
|
|
69
|
+
... output_structure=WebSearchStructure,
|
|
70
|
+
... )
|
|
71
|
+
>>> config.name
|
|
72
|
+
'prompt_to_websearch'
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
name: str
|
|
76
|
+
instructions: str | Path
|
|
77
|
+
tools: Optional[list]
|
|
78
|
+
input_structure: Optional[Type[TIn]]
|
|
79
|
+
output_structure: Optional[Type[TOut]]
|
|
80
|
+
|
|
81
|
+
def __post_init__(self) -> None:
|
|
82
|
+
"""
|
|
83
|
+
Validate configuration invariants after initialization.
|
|
84
|
+
|
|
85
|
+
Enforce non-empty naming, correct typing of structures, and ensure that
|
|
86
|
+
any declared structure subclasses BaseStructure.
|
|
87
|
+
|
|
88
|
+
Raises
|
|
89
|
+
------
|
|
90
|
+
TypeError
|
|
91
|
+
If name is not a non-empty string.
|
|
92
|
+
If tools is provided and is not a sequence.
|
|
93
|
+
If input_structure or output_structure is not a class.
|
|
94
|
+
If input_structure or output_structure does not subclass BaseStructure.
|
|
95
|
+
"""
|
|
96
|
+
if not self.name or not isinstance(self.name, str):
|
|
97
|
+
raise TypeError("Configuration.name must be a non-empty str")
|
|
98
|
+
|
|
99
|
+
instructions_value = self.instructions
|
|
100
|
+
if isinstance(instructions_value, str):
|
|
101
|
+
if not instructions_value.strip():
|
|
102
|
+
raise ValueError("Configuration.instructions must be a non-empty str")
|
|
103
|
+
elif isinstance(instructions_value, Path):
|
|
104
|
+
instruction_path = instructions_value.expanduser()
|
|
105
|
+
if not instruction_path.is_file():
|
|
106
|
+
raise FileNotFoundError(
|
|
107
|
+
f"Instruction template not found: {instruction_path}"
|
|
108
|
+
)
|
|
109
|
+
else:
|
|
110
|
+
raise TypeError("Configuration.instructions must be a str or Path")
|
|
111
|
+
|
|
112
|
+
for attr in ("input_structure", "output_structure"):
|
|
113
|
+
cls = getattr(self, attr)
|
|
114
|
+
if cls is None:
|
|
115
|
+
continue
|
|
116
|
+
if not isinstance(cls, type):
|
|
117
|
+
raise TypeError(
|
|
118
|
+
f"Configuration.{attr} must be a class (Type[BaseStructure]) or None"
|
|
119
|
+
)
|
|
120
|
+
if not issubclass(cls, BaseStructure):
|
|
121
|
+
raise TypeError(f"Configuration.{attr} must subclass BaseStructure")
|
|
122
|
+
|
|
123
|
+
if self.tools is not None and not isinstance(self.tools, Sequence):
|
|
124
|
+
raise TypeError("Configuration.tools must be a Sequence or None")
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def instructions_text(self) -> str:
|
|
128
|
+
"""Return the resolved instruction text.
|
|
129
|
+
|
|
130
|
+
Returns
|
|
131
|
+
-------
|
|
132
|
+
str
|
|
133
|
+
Plain-text instructions, loading template files when necessary.
|
|
134
|
+
"""
|
|
135
|
+
return self._resolve_instructions()
|
|
136
|
+
|
|
137
|
+
def _resolve_instructions(self) -> str:
|
|
138
|
+
if isinstance(self.instructions, Path):
|
|
139
|
+
instruction_path = self.instructions.expanduser()
|
|
140
|
+
try:
|
|
141
|
+
return instruction_path.read_text(encoding="utf-8")
|
|
142
|
+
except OSError as exc:
|
|
143
|
+
raise ValueError(
|
|
144
|
+
f"Unable to read instructions at '{instruction_path}': {exc}"
|
|
145
|
+
) from exc
|
|
146
|
+
return self.instructions
|
|
147
|
+
|
|
148
|
+
def gen_response(
|
|
149
|
+
self,
|
|
150
|
+
openai_settings: OpenAISettings,
|
|
151
|
+
tool_handlers: dict[str, ToolHandler] = {},
|
|
152
|
+
) -> BaseResponse[TOut]:
|
|
153
|
+
"""Generate a BaseResponse instance based on the configuration.
|
|
154
|
+
|
|
155
|
+
Parameters
|
|
156
|
+
----------
|
|
157
|
+
openai_settings : OpenAISettings
|
|
158
|
+
Authentication and model settings applied to the generated
|
|
159
|
+
:class:`BaseResponse`.
|
|
160
|
+
tool_handlers : dict[str, Callable], optional
|
|
161
|
+
Mapping of tool names to handler callables. Defaults to an empty
|
|
162
|
+
dictionary when not provided.
|
|
163
|
+
|
|
164
|
+
Returns
|
|
165
|
+
-------
|
|
166
|
+
BaseResponse[TOut]
|
|
167
|
+
An instance of BaseResponse configured with ``openai_settings``.
|
|
168
|
+
"""
|
|
169
|
+
return BaseResponse[TOut](
|
|
170
|
+
name=self.name,
|
|
171
|
+
instructions=self.instructions_text,
|
|
172
|
+
tools=self.tools,
|
|
173
|
+
output_structure=self.output_structure,
|
|
174
|
+
tool_handlers=tool_handlers,
|
|
175
|
+
openai_settings=openai_settings,
|
|
176
|
+
)
|
|
@@ -1,10 +1,15 @@
|
|
|
1
|
-
"""Message containers for
|
|
1
|
+
"""Message containers for OpenAI response conversations.
|
|
2
|
+
|
|
3
|
+
This module provides dataclasses for managing conversation history including
|
|
4
|
+
user inputs, assistant outputs, system messages, and tool calls. Messages are
|
|
5
|
+
stored with timestamps and metadata, and can be serialized to JSON.
|
|
6
|
+
"""
|
|
2
7
|
|
|
3
8
|
from __future__ import annotations
|
|
4
9
|
|
|
5
10
|
from dataclasses import dataclass, field
|
|
6
11
|
from datetime import datetime, timezone
|
|
7
|
-
from typing import
|
|
12
|
+
from typing import cast
|
|
8
13
|
|
|
9
14
|
from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
|
|
10
15
|
from openai.types.responses.response_function_tool_call_param import (
|
|
@@ -25,12 +30,26 @@ from .tool_call import ResponseToolCall
|
|
|
25
30
|
|
|
26
31
|
@dataclass
|
|
27
32
|
class ResponseMessage(JSONSerializable):
|
|
28
|
-
"""Single message exchanged with the OpenAI
|
|
33
|
+
"""Single message exchanged with the OpenAI API.
|
|
34
|
+
|
|
35
|
+
Represents a complete message with role, content, timestamp, and
|
|
36
|
+
optional metadata. Can be serialized to JSON for persistence.
|
|
37
|
+
|
|
38
|
+
Attributes
|
|
39
|
+
----------
|
|
40
|
+
role : str
|
|
41
|
+
Message role: "user", "assistant", "tool", or "system".
|
|
42
|
+
content : ResponseInputItemParam | ResponseOutputMessage | ResponseFunctionToolCallParam | FunctionCallOutput | ResponseInputMessageContentListParam
|
|
43
|
+
Message content in OpenAI format.
|
|
44
|
+
timestamp : datetime
|
|
45
|
+
UTC timestamp when the message was created.
|
|
46
|
+
metadata : dict[str, str | float | bool]
|
|
47
|
+
Optional metadata for tracking or debugging.
|
|
29
48
|
|
|
30
49
|
Methods
|
|
31
50
|
-------
|
|
32
51
|
to_openai_format()
|
|
33
|
-
Return the
|
|
52
|
+
Return the message content in OpenAI API format.
|
|
34
53
|
"""
|
|
35
54
|
|
|
36
55
|
role: str # "user", "assistant", "tool", etc.
|
|
@@ -42,7 +61,7 @@ class ResponseMessage(JSONSerializable):
|
|
|
42
61
|
| ResponseInputMessageContentListParam
|
|
43
62
|
)
|
|
44
63
|
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
45
|
-
metadata:
|
|
64
|
+
metadata: dict[str, str | float | bool] = field(default_factory=dict)
|
|
46
65
|
|
|
47
66
|
def to_openai_format(
|
|
48
67
|
self,
|
|
@@ -65,10 +84,16 @@ class ResponseMessage(JSONSerializable):
|
|
|
65
84
|
|
|
66
85
|
@dataclass
|
|
67
86
|
class ResponseMessages(JSONSerializable):
|
|
68
|
-
"""
|
|
87
|
+
"""Collection of messages in a conversation.
|
|
69
88
|
|
|
70
|
-
|
|
71
|
-
|
|
89
|
+
Manages the complete history of messages exchanged during an OpenAI
|
|
90
|
+
API interaction. Provides methods for adding different message types
|
|
91
|
+
and converting to formats required by the OpenAI API.
|
|
92
|
+
|
|
93
|
+
Attributes
|
|
94
|
+
----------
|
|
95
|
+
messages : list[ResponseMessage]
|
|
96
|
+
Ordered list of all messages in the conversation.
|
|
72
97
|
|
|
73
98
|
Methods
|
|
74
99
|
-------
|
|
@@ -81,10 +106,16 @@ class ResponseMessages(JSONSerializable):
|
|
|
81
106
|
add_tool_message(content, output, **metadata)
|
|
82
107
|
Record a tool call and its output.
|
|
83
108
|
to_openai_payload()
|
|
84
|
-
Convert stored messages to
|
|
109
|
+
Convert stored messages to OpenAI input payload format.
|
|
110
|
+
get_last_assistant_message()
|
|
111
|
+
Return the most recent assistant message or None.
|
|
112
|
+
get_last_tool_message()
|
|
113
|
+
Return the most recent tool message or None.
|
|
114
|
+
get_last_user_message()
|
|
115
|
+
Return the most recent user message or None.
|
|
85
116
|
"""
|
|
86
117
|
|
|
87
|
-
messages:
|
|
118
|
+
messages: list[ResponseMessage] = field(default_factory=list)
|
|
88
119
|
|
|
89
120
|
def add_system_message(
|
|
90
121
|
self, content: ResponseInputMessageContentListParam, **metadata
|
|
@@ -97,10 +128,6 @@ class ResponseMessages(JSONSerializable):
|
|
|
97
128
|
System message content in OpenAI format.
|
|
98
129
|
**metadata
|
|
99
130
|
Optional metadata to store with the message.
|
|
100
|
-
|
|
101
|
-
Returns
|
|
102
|
-
-------
|
|
103
|
-
None
|
|
104
131
|
"""
|
|
105
132
|
response_input = cast(
|
|
106
133
|
ResponseInputItemParam, {"role": "system", "content": content}
|
|
@@ -120,10 +147,6 @@ class ResponseMessages(JSONSerializable):
|
|
|
120
147
|
Message payload supplied by the user.
|
|
121
148
|
**metadata
|
|
122
149
|
Optional metadata to store with the message.
|
|
123
|
-
|
|
124
|
-
Returns
|
|
125
|
-
-------
|
|
126
|
-
None
|
|
127
150
|
"""
|
|
128
151
|
self.messages.append(
|
|
129
152
|
ResponseMessage(role="user", content=input_content, metadata=metadata)
|
|
@@ -132,20 +155,16 @@ class ResponseMessages(JSONSerializable):
|
|
|
132
155
|
def add_assistant_message(
|
|
133
156
|
self,
|
|
134
157
|
content: ResponseOutputMessage,
|
|
135
|
-
metadata:
|
|
158
|
+
metadata: dict[str, str | float | bool],
|
|
136
159
|
) -> None:
|
|
137
160
|
"""Append an assistant message to the conversation.
|
|
138
161
|
|
|
139
162
|
Parameters
|
|
140
163
|
----------
|
|
141
164
|
content : ResponseOutputMessage
|
|
142
|
-
Assistant response message.
|
|
143
|
-
metadata : dict[str,
|
|
165
|
+
Assistant response message from the OpenAI API.
|
|
166
|
+
metadata : dict[str, str | float | bool]
|
|
144
167
|
Optional metadata to store with the message.
|
|
145
|
-
|
|
146
|
-
Returns
|
|
147
|
-
-------
|
|
148
|
-
None
|
|
149
168
|
"""
|
|
150
169
|
self.messages.append(
|
|
151
170
|
ResponseMessage(role="assistant", content=content, metadata=metadata)
|
|
@@ -154,20 +173,16 @@ class ResponseMessages(JSONSerializable):
|
|
|
154
173
|
def add_tool_message(
|
|
155
174
|
self, content: ResponseFunctionToolCall, output: str, **metadata
|
|
156
175
|
) -> None:
|
|
157
|
-
"""Record a tool call and its output in the conversation
|
|
176
|
+
"""Record a tool call and its output in the conversation.
|
|
158
177
|
|
|
159
178
|
Parameters
|
|
160
179
|
----------
|
|
161
180
|
content : ResponseFunctionToolCall
|
|
162
|
-
Tool call received from OpenAI.
|
|
181
|
+
Tool call received from the OpenAI API.
|
|
163
182
|
output : str
|
|
164
|
-
JSON string returned by the executed tool.
|
|
183
|
+
JSON string returned by the executed tool handler.
|
|
165
184
|
**metadata
|
|
166
185
|
Optional metadata to store with the message.
|
|
167
|
-
|
|
168
|
-
Returns
|
|
169
|
-
-------
|
|
170
|
-
None
|
|
171
186
|
"""
|
|
172
187
|
tool_call = ResponseToolCall(
|
|
173
188
|
call_id=content.call_id,
|
|
@@ -187,25 +202,74 @@ class ResponseMessages(JSONSerializable):
|
|
|
187
202
|
|
|
188
203
|
def to_openai_payload(
|
|
189
204
|
self,
|
|
190
|
-
) ->
|
|
205
|
+
) -> list[
|
|
191
206
|
ResponseInputItemParam
|
|
192
207
|
| ResponseOutputMessage
|
|
193
208
|
| ResponseFunctionToolCallParam
|
|
194
209
|
| FunctionCallOutput
|
|
195
210
|
| ResponseInputMessageContentListParam
|
|
196
211
|
]:
|
|
197
|
-
"""Convert stored messages to
|
|
198
|
-
|
|
199
|
-
Notes
|
|
200
|
-
-----
|
|
201
|
-
Assistant messages are model outputs and are not included in the
|
|
202
|
-
next request's input payload.
|
|
212
|
+
"""Convert stored messages to OpenAI API input format.
|
|
203
213
|
|
|
204
214
|
Returns
|
|
205
215
|
-------
|
|
206
216
|
list
|
|
207
|
-
List of message payloads
|
|
217
|
+
List of message payloads suitable for the OpenAI API.
|
|
218
|
+
Assistant messages are excluded as they are outputs, not inputs.
|
|
219
|
+
|
|
220
|
+
Notes
|
|
221
|
+
-----
|
|
222
|
+
Assistant messages are not included in the returned payload since
|
|
223
|
+
they represent model outputs rather than inputs for the next request.
|
|
208
224
|
"""
|
|
209
225
|
return [
|
|
210
226
|
msg.to_openai_format() for msg in self.messages if msg.role != "assistant"
|
|
211
227
|
]
|
|
228
|
+
|
|
229
|
+
def _get_last_message(self, role: str) -> ResponseMessage | None:
|
|
230
|
+
"""Return the most recent message for the given role.
|
|
231
|
+
|
|
232
|
+
Parameters
|
|
233
|
+
----------
|
|
234
|
+
role : str
|
|
235
|
+
Role name to filter messages by.
|
|
236
|
+
|
|
237
|
+
Returns
|
|
238
|
+
-------
|
|
239
|
+
ResponseMessage or None
|
|
240
|
+
Latest message matching ``role`` or ``None`` when absent.
|
|
241
|
+
"""
|
|
242
|
+
for message in reversed(self.messages):
|
|
243
|
+
if message.role == role:
|
|
244
|
+
return message
|
|
245
|
+
return None
|
|
246
|
+
|
|
247
|
+
def get_last_assistant_message(self) -> ResponseMessage | None:
|
|
248
|
+
"""Return the most recent assistant message.
|
|
249
|
+
|
|
250
|
+
Returns
|
|
251
|
+
-------
|
|
252
|
+
ResponseMessage or None
|
|
253
|
+
Latest assistant message or ``None`` when absent.
|
|
254
|
+
"""
|
|
255
|
+
return self._get_last_message(role="assistant")
|
|
256
|
+
|
|
257
|
+
def get_last_tool_message(self) -> ResponseMessage | None:
|
|
258
|
+
"""Return the most recent tool message.
|
|
259
|
+
|
|
260
|
+
Returns
|
|
261
|
+
-------
|
|
262
|
+
ResponseMessage or None
|
|
263
|
+
Latest tool message or ``None`` when absent.
|
|
264
|
+
"""
|
|
265
|
+
return self._get_last_message(role="tool")
|
|
266
|
+
|
|
267
|
+
def get_last_user_message(self) -> ResponseMessage | None:
|
|
268
|
+
"""Return the most recent user message.
|
|
269
|
+
|
|
270
|
+
Returns
|
|
271
|
+
-------
|
|
272
|
+
ResponseMessage or None
|
|
273
|
+
Latest user message or ``None`` when absent.
|
|
274
|
+
"""
|
|
275
|
+
return self._get_last_message(role="user")
|
|
@@ -1,38 +1,55 @@
|
|
|
1
|
-
"""Convenience
|
|
1
|
+
"""Convenience functions for executing response workflows.
|
|
2
|
+
|
|
3
|
+
This module provides high-level functions that handle the complete lifecycle
|
|
4
|
+
of response workflows including instantiation, execution, and resource cleanup.
|
|
5
|
+
They simplify common usage patterns for both synchronous and asynchronous contexts.
|
|
6
|
+
"""
|
|
2
7
|
|
|
3
8
|
from __future__ import annotations
|
|
4
9
|
|
|
5
10
|
import asyncio
|
|
11
|
+
from typing import Any, TypeVar
|
|
6
12
|
|
|
7
|
-
from
|
|
8
|
-
|
|
9
|
-
from .base import ResponseBase
|
|
13
|
+
from .base import BaseResponse
|
|
10
14
|
|
|
11
15
|
|
|
12
|
-
R = TypeVar("R", bound=
|
|
16
|
+
R = TypeVar("R", bound=BaseResponse[Any])
|
|
13
17
|
|
|
14
18
|
|
|
15
19
|
def run_sync(
|
|
16
|
-
response_cls:
|
|
20
|
+
response_cls: type[R],
|
|
17
21
|
*,
|
|
18
22
|
content: str,
|
|
19
|
-
response_kwargs:
|
|
23
|
+
response_kwargs: dict[str, Any] | None = None,
|
|
20
24
|
) -> Any:
|
|
21
|
-
"""
|
|
25
|
+
"""Execute a response workflow synchronously with automatic cleanup.
|
|
26
|
+
|
|
27
|
+
Instantiates the response class, executes run_sync with the provided
|
|
28
|
+
content, and ensures cleanup occurs even if an exception is raised.
|
|
22
29
|
|
|
23
30
|
Parameters
|
|
24
31
|
----------
|
|
25
|
-
response_cls
|
|
26
|
-
Response class to instantiate.
|
|
27
|
-
content
|
|
32
|
+
response_cls : type[BaseResponse]
|
|
33
|
+
Response class to instantiate for the workflow.
|
|
34
|
+
content : str
|
|
28
35
|
Prompt text to send to the OpenAI API.
|
|
29
|
-
response_kwargs
|
|
30
|
-
|
|
36
|
+
response_kwargs : dict[str, Any] or None, default None
|
|
37
|
+
Optional keyword arguments forwarded to response_cls constructor.
|
|
31
38
|
|
|
32
39
|
Returns
|
|
33
40
|
-------
|
|
34
41
|
Any
|
|
35
|
-
Parsed response from
|
|
42
|
+
Parsed response from BaseResponse.run_sync, typically a structured
|
|
43
|
+
output or None.
|
|
44
|
+
|
|
45
|
+
Examples
|
|
46
|
+
--------
|
|
47
|
+
>>> from openai_sdk_helpers.response import run_sync
|
|
48
|
+
>>> result = run_sync(
|
|
49
|
+
... MyResponse,
|
|
50
|
+
... content="Analyze this text",
|
|
51
|
+
... response_kwargs={"openai_settings": settings}
|
|
52
|
+
... )
|
|
36
53
|
"""
|
|
37
54
|
response = response_cls(**(response_kwargs or {}))
|
|
38
55
|
try:
|
|
@@ -42,26 +59,39 @@ def run_sync(
|
|
|
42
59
|
|
|
43
60
|
|
|
44
61
|
async def run_async(
|
|
45
|
-
response_cls:
|
|
62
|
+
response_cls: type[R],
|
|
46
63
|
*,
|
|
47
64
|
content: str,
|
|
48
|
-
response_kwargs:
|
|
65
|
+
response_kwargs: dict[str, Any] | None = None,
|
|
49
66
|
) -> Any:
|
|
50
|
-
"""
|
|
67
|
+
"""Execute a response workflow asynchronously with automatic cleanup.
|
|
68
|
+
|
|
69
|
+
Instantiates the response class, executes run_async with the provided
|
|
70
|
+
content, and ensures cleanup occurs even if an exception is raised.
|
|
51
71
|
|
|
52
72
|
Parameters
|
|
53
73
|
----------
|
|
54
|
-
response_cls
|
|
55
|
-
Response class to instantiate.
|
|
56
|
-
content
|
|
74
|
+
response_cls : type[BaseResponse]
|
|
75
|
+
Response class to instantiate for the workflow.
|
|
76
|
+
content : str
|
|
57
77
|
Prompt text to send to the OpenAI API.
|
|
58
|
-
response_kwargs
|
|
59
|
-
|
|
78
|
+
response_kwargs : dict[str, Any] or None, default None
|
|
79
|
+
Optional keyword arguments forwarded to response_cls constructor.
|
|
60
80
|
|
|
61
81
|
Returns
|
|
62
82
|
-------
|
|
63
83
|
Any
|
|
64
|
-
Parsed response from
|
|
84
|
+
Parsed response from BaseResponse.run_async, typically a structured
|
|
85
|
+
output or None.
|
|
86
|
+
|
|
87
|
+
Examples
|
|
88
|
+
--------
|
|
89
|
+
>>> from openai_sdk_helpers.response import run_async
|
|
90
|
+
>>> result = await run_async(
|
|
91
|
+
... MyResponse,
|
|
92
|
+
... content="Summarize this document",
|
|
93
|
+
... response_kwargs={"openai_settings": settings}
|
|
94
|
+
... )
|
|
65
95
|
"""
|
|
66
96
|
response = response_cls(**(response_kwargs or {}))
|
|
67
97
|
try:
|
|
@@ -71,30 +101,44 @@ async def run_async(
|
|
|
71
101
|
|
|
72
102
|
|
|
73
103
|
def run_streamed(
|
|
74
|
-
response_cls:
|
|
104
|
+
response_cls: type[R],
|
|
75
105
|
*,
|
|
76
106
|
content: str,
|
|
77
|
-
response_kwargs:
|
|
107
|
+
response_kwargs: dict[str, Any] | None = None,
|
|
78
108
|
) -> Any:
|
|
79
|
-
"""
|
|
109
|
+
"""Execute a response workflow and return the awaited result.
|
|
80
110
|
|
|
81
|
-
|
|
82
|
-
currently supported
|
|
83
|
-
|
|
111
|
+
Provides API compatibility with agent interfaces. Streaming responses
|
|
112
|
+
are not currently fully supported, so this executes run_async and
|
|
113
|
+
awaits the result.
|
|
84
114
|
|
|
85
115
|
Parameters
|
|
86
116
|
----------
|
|
87
|
-
response_cls
|
|
88
|
-
Response class to instantiate.
|
|
89
|
-
content
|
|
117
|
+
response_cls : type[BaseResponse]
|
|
118
|
+
Response class to instantiate for the workflow.
|
|
119
|
+
content : str
|
|
90
120
|
Prompt text to send to the OpenAI API.
|
|
91
|
-
response_kwargs
|
|
92
|
-
|
|
121
|
+
response_kwargs : dict[str, Any] or None, default None
|
|
122
|
+
Optional keyword arguments forwarded to response_cls constructor.
|
|
93
123
|
|
|
94
124
|
Returns
|
|
95
125
|
-------
|
|
96
126
|
Any
|
|
97
|
-
Parsed response
|
|
127
|
+
Parsed response from run_async, typically a structured output or None.
|
|
128
|
+
|
|
129
|
+
Notes
|
|
130
|
+
-----
|
|
131
|
+
This function exists for API consistency but does not currently provide
|
|
132
|
+
true streaming functionality.
|
|
133
|
+
|
|
134
|
+
Examples
|
|
135
|
+
--------
|
|
136
|
+
>>> from openai_sdk_helpers.response import run_streamed
|
|
137
|
+
>>> result = run_streamed(
|
|
138
|
+
... MyResponse,
|
|
139
|
+
... content="Process this text",
|
|
140
|
+
... response_kwargs={"openai_settings": settings}
|
|
141
|
+
... )
|
|
98
142
|
"""
|
|
99
143
|
return asyncio.run(
|
|
100
144
|
run_async(response_cls, content=content, response_kwargs=response_kwargs)
|