openai-sdk-helpers 0.0.7__py3-none-any.whl → 0.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +19 -8
- openai_sdk_helpers/agent/base.py +1 -1
- openai_sdk_helpers/agent/web_search.py +5 -6
- openai_sdk_helpers/response/__init__.py +2 -2
- openai_sdk_helpers/response/base.py +89 -4
- openai_sdk_helpers/response/messages.py +48 -0
- openai_sdk_helpers/response/runner.py +5 -5
- openai_sdk_helpers/response/tool_call.py +39 -0
- openai_sdk_helpers/response/vector_store.py +2 -2
- openai_sdk_helpers/streamlit_app/__init__.py +13 -0
- openai_sdk_helpers/streamlit_app/app.py +270 -0
- openai_sdk_helpers/streamlit_app/configuration.py +324 -0
- openai_sdk_helpers/streamlit_app/streamlit_web_search.py +69 -0
- openai_sdk_helpers/structure/base.py +40 -1
- openai_sdk_helpers/structure/web_search.py +4 -0
- openai_sdk_helpers/utils/__init__.py +2 -0
- openai_sdk_helpers/utils/core.py +34 -0
- openai_sdk_helpers/vector_storage/storage.py +16 -17
- {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.8.dist-info}/METADATA +2 -1
- {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.8.dist-info}/RECORD +22 -18
- {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.8.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.8.dist-info}/licenses/LICENSE +0 -0
openai_sdk_helpers/__init__.py
CHANGED
|
@@ -2,10 +2,23 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from .structure import
|
|
5
|
+
from .structure import (
|
|
6
|
+
BaseStructure,
|
|
7
|
+
SchemaOptions,
|
|
8
|
+
PlanStructure,
|
|
9
|
+
TaskStructure,
|
|
10
|
+
WebSearchStructure,
|
|
11
|
+
VectorSearchStructure,
|
|
12
|
+
PromptStructure,
|
|
13
|
+
spec_field,
|
|
14
|
+
SummaryStructure,
|
|
15
|
+
ExtendedSummaryStructure,
|
|
16
|
+
ValidationResultStructure,
|
|
17
|
+
AgentBlueprint,
|
|
18
|
+
)
|
|
6
19
|
from .prompt import PromptRenderer
|
|
7
20
|
from .config import OpenAISettings
|
|
8
|
-
from .vector_storage import
|
|
21
|
+
from .vector_storage import VectorStorage, VectorStorageFileInfo, VectorStorageFileStats
|
|
9
22
|
from .agent import (
|
|
10
23
|
AgentBase,
|
|
11
24
|
AgentConfig,
|
|
@@ -18,10 +31,11 @@ from .agent import (
|
|
|
18
31
|
WebAgentSearch,
|
|
19
32
|
)
|
|
20
33
|
from .response import (
|
|
21
|
-
|
|
34
|
+
BaseResponse,
|
|
22
35
|
ResponseMessage,
|
|
23
36
|
ResponseMessages,
|
|
24
37
|
ResponseToolCall,
|
|
38
|
+
attach_vector_store,
|
|
25
39
|
)
|
|
26
40
|
|
|
27
41
|
__all__ = [
|
|
@@ -33,10 +47,6 @@ __all__ = [
|
|
|
33
47
|
"VectorStorage",
|
|
34
48
|
"VectorStorageFileInfo",
|
|
35
49
|
"VectorStorageFileStats",
|
|
36
|
-
"assistant_tool_definition",
|
|
37
|
-
"assistant_format",
|
|
38
|
-
"response_tool_definition",
|
|
39
|
-
"response_format",
|
|
40
50
|
"SummaryStructure",
|
|
41
51
|
"PromptStructure",
|
|
42
52
|
"AgentBlueprint",
|
|
@@ -55,8 +65,9 @@ __all__ = [
|
|
|
55
65
|
"WebSearchStructure",
|
|
56
66
|
"VectorSearchStructure",
|
|
57
67
|
"ValidationResultStructure",
|
|
58
|
-
"
|
|
68
|
+
"BaseResponse",
|
|
59
69
|
"ResponseMessage",
|
|
60
70
|
"ResponseMessages",
|
|
61
71
|
"ResponseToolCall",
|
|
72
|
+
"attach_vector_store",
|
|
62
73
|
]
|
openai_sdk_helpers/agent/base.py
CHANGED
|
@@ -207,7 +207,7 @@ class AgentBase:
|
|
|
207
207
|
"""
|
|
208
208
|
agent_config: Dict[str, Any] = {
|
|
209
209
|
"name": self.agent_name,
|
|
210
|
-
"instructions": self._build_prompt_from_jinja(),
|
|
210
|
+
"instructions": self._build_prompt_from_jinja() or ".",
|
|
211
211
|
"model": self.model,
|
|
212
212
|
}
|
|
213
213
|
if self._output_type:
|
|
@@ -311,7 +311,7 @@ class WebAgentSearch(AgentBase):
|
|
|
311
311
|
)
|
|
312
312
|
self._prompt_dir = prompt_dir
|
|
313
313
|
|
|
314
|
-
async def
|
|
314
|
+
async def run_agent_async(self, search_query: str) -> WebSearchStructure:
|
|
315
315
|
"""Execute the entire research workflow for ``search_query``.
|
|
316
316
|
|
|
317
317
|
Parameters
|
|
@@ -358,10 +358,9 @@ class WebAgentSearch(AgentBase):
|
|
|
358
358
|
WebSearchStructure
|
|
359
359
|
Completed research output.
|
|
360
360
|
"""
|
|
361
|
-
return run_coroutine_agent_sync(self.
|
|
361
|
+
return run_coroutine_agent_sync(self.run_agent_async(search_query))
|
|
362
362
|
|
|
363
|
-
|
|
364
|
-
async def run_web_agent(search_query: str) -> WebSearchStructure:
|
|
363
|
+
async def run_web_agent_async(self, search_query: str) -> WebSearchStructure:
|
|
365
364
|
"""Return a research report for the given query using ``WebAgentSearch``.
|
|
366
365
|
|
|
367
366
|
Parameters
|
|
@@ -374,7 +373,7 @@ class WebAgentSearch(AgentBase):
|
|
|
374
373
|
WebSearchStructure
|
|
375
374
|
Completed research output.
|
|
376
375
|
"""
|
|
377
|
-
return await
|
|
376
|
+
return await self.run_agent_async(search_query=search_query)
|
|
378
377
|
|
|
379
378
|
@staticmethod
|
|
380
379
|
def run_web_agent_sync(search_query: str) -> WebSearchStructure:
|
|
@@ -391,7 +390,7 @@ class WebAgentSearch(AgentBase):
|
|
|
391
390
|
Completed research output.
|
|
392
391
|
"""
|
|
393
392
|
return run_coroutine_agent_sync(
|
|
394
|
-
WebAgentSearch.
|
|
393
|
+
WebAgentSearch().run_web_agent_async(search_query=search_query)
|
|
395
394
|
)
|
|
396
395
|
|
|
397
396
|
|
|
@@ -2,14 +2,14 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from .base import
|
|
5
|
+
from .base import BaseResponse
|
|
6
6
|
from .messages import ResponseMessage, ResponseMessages
|
|
7
7
|
from .runner import run_sync, run_async, run_streamed
|
|
8
8
|
from .vector_store import attach_vector_store
|
|
9
9
|
from .tool_call import ResponseToolCall
|
|
10
10
|
|
|
11
11
|
__all__ = [
|
|
12
|
-
"
|
|
12
|
+
"BaseResponse",
|
|
13
13
|
"ResponseMessage",
|
|
14
14
|
"ResponseMessages",
|
|
15
15
|
"run_sync",
|
|
@@ -10,11 +10,13 @@ import threading
|
|
|
10
10
|
import uuid
|
|
11
11
|
from pathlib import Path
|
|
12
12
|
from typing import (
|
|
13
|
+
TYPE_CHECKING,
|
|
13
14
|
Any,
|
|
14
15
|
Callable,
|
|
15
16
|
Generic,
|
|
16
17
|
List,
|
|
17
18
|
Optional,
|
|
19
|
+
Sequence,
|
|
18
20
|
Tuple,
|
|
19
21
|
Type,
|
|
20
22
|
TypeVar,
|
|
@@ -32,16 +34,22 @@ from openai.types.responses.response_input_param import ResponseInputItemParam
|
|
|
32
34
|
from openai.types.responses.response_input_text_param import ResponseInputTextParam
|
|
33
35
|
from openai.types.responses.response_output_message import ResponseOutputMessage
|
|
34
36
|
|
|
35
|
-
from .messages import ResponseMessages
|
|
37
|
+
from .messages import ResponseMessage, ResponseMessages
|
|
36
38
|
from ..structure import BaseStructure
|
|
37
39
|
from ..utils import ensure_list, log
|
|
38
40
|
|
|
41
|
+
if TYPE_CHECKING:
|
|
42
|
+
from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
|
|
43
|
+
|
|
39
44
|
T = TypeVar("T", bound=BaseStructure)
|
|
40
45
|
ToolHandler = Callable[[ResponseFunctionToolCall], Union[str, Any]]
|
|
41
46
|
ProcessContent = Callable[[str], Tuple[str, List[str]]]
|
|
42
47
|
|
|
43
48
|
|
|
44
|
-
|
|
49
|
+
RB = TypeVar("RB", bound="BaseResponse[BaseStructure]")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class BaseResponse(Generic[T]):
|
|
45
53
|
"""Manage OpenAI interactions for structured responses.
|
|
46
54
|
|
|
47
55
|
This base class handles input construction, OpenAI requests, tool calls,
|
|
@@ -55,6 +63,8 @@ class ResponseBase(Generic[T]):
|
|
|
55
63
|
Synchronous wrapper around ``run_async``.
|
|
56
64
|
run_streamed(content, attachments)
|
|
57
65
|
Await ``run_async`` to mirror the agent API.
|
|
66
|
+
build_streamlit_config(...)
|
|
67
|
+
Construct a :class:`StreamlitAppConfig` using this class as the builder.
|
|
58
68
|
save(filepath)
|
|
59
69
|
Serialize the message history to disk.
|
|
60
70
|
close()
|
|
@@ -445,6 +455,81 @@ class ResponseBase(Generic[T]):
|
|
|
445
455
|
"""
|
|
446
456
|
return asyncio.run(self.run_async(content=content, attachments=attachments))
|
|
447
457
|
|
|
458
|
+
def get_last_tool_message(self) -> ResponseMessage | None:
|
|
459
|
+
"""Return the most recent tool message.
|
|
460
|
+
|
|
461
|
+
Returns
|
|
462
|
+
-------
|
|
463
|
+
ResponseMessage or None
|
|
464
|
+
Latest tool message or ``None`` when absent.
|
|
465
|
+
"""
|
|
466
|
+
return self.messages.get_last_tool_message()
|
|
467
|
+
|
|
468
|
+
def get_last_user_message(self) -> ResponseMessage | None:
|
|
469
|
+
"""Return the most recent user message.
|
|
470
|
+
|
|
471
|
+
Returns
|
|
472
|
+
-------
|
|
473
|
+
ResponseMessage or None
|
|
474
|
+
Latest user message or ``None`` when absent.
|
|
475
|
+
"""
|
|
476
|
+
return self.messages.get_last_user_message()
|
|
477
|
+
|
|
478
|
+
def get_last_assistant_message(self) -> ResponseMessage | None:
|
|
479
|
+
"""Return the most recent assistant message.
|
|
480
|
+
|
|
481
|
+
Returns
|
|
482
|
+
-------
|
|
483
|
+
ResponseMessage or None
|
|
484
|
+
Latest assistant message or ``None`` when absent.
|
|
485
|
+
"""
|
|
486
|
+
return self.messages.get_last_assistant_message()
|
|
487
|
+
|
|
488
|
+
@classmethod
|
|
489
|
+
def build_streamlit_config(
|
|
490
|
+
cls: type[RB],
|
|
491
|
+
*,
|
|
492
|
+
display_title: str = "Example copilot",
|
|
493
|
+
description: str | None = None,
|
|
494
|
+
system_vector_store: Sequence[str] | str | None = None,
|
|
495
|
+
preserve_vector_stores: bool = False,
|
|
496
|
+
model: str | None = None,
|
|
497
|
+
) -> "StreamlitAppConfig":
|
|
498
|
+
"""Construct a :class:`StreamlitAppConfig` using ``cls`` as the builder.
|
|
499
|
+
|
|
500
|
+
Parameters
|
|
501
|
+
----------
|
|
502
|
+
display_title : str, default="Example copilot"
|
|
503
|
+
Title displayed at the top of the Streamlit page.
|
|
504
|
+
description : str or None, default=None
|
|
505
|
+
Optional short description shown beneath the title.
|
|
506
|
+
system_vector_store : Sequence[str] | str | None, default=None
|
|
507
|
+
Optional vector store names to attach as system context.
|
|
508
|
+
preserve_vector_stores : bool, default=False
|
|
509
|
+
When ``True``, skip automatic vector store cleanup on close.
|
|
510
|
+
model : str or None, default=None
|
|
511
|
+
Optional model hint for display alongside the chat interface.
|
|
512
|
+
|
|
513
|
+
Returns
|
|
514
|
+
-------
|
|
515
|
+
StreamlitAppConfig
|
|
516
|
+
Validated configuration bound to ``cls`` as the response builder.
|
|
517
|
+
"""
|
|
518
|
+
from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
|
|
519
|
+
|
|
520
|
+
normalized_stores = None
|
|
521
|
+
if system_vector_store is not None:
|
|
522
|
+
normalized_stores = ensure_list(system_vector_store)
|
|
523
|
+
|
|
524
|
+
return StreamlitAppConfig(
|
|
525
|
+
response=cls,
|
|
526
|
+
display_title=display_title,
|
|
527
|
+
description=description,
|
|
528
|
+
system_vector_store=normalized_stores,
|
|
529
|
+
preserve_vector_stores=preserve_vector_stores,
|
|
530
|
+
model=model,
|
|
531
|
+
)
|
|
532
|
+
|
|
448
533
|
def save(self, filepath: Optional[str | Path] = None) -> None:
|
|
449
534
|
"""Serialize the message history to a JSON file."""
|
|
450
535
|
if filepath is not None:
|
|
@@ -478,7 +563,7 @@ class ResponseBase(Generic[T]):
|
|
|
478
563
|
f"messages={len(self.messages.messages)}, data_path={data_path}>"
|
|
479
564
|
)
|
|
480
565
|
|
|
481
|
-
def __enter__(self) -> "
|
|
566
|
+
def __enter__(self) -> "BaseResponse[T]":
|
|
482
567
|
"""Enter the context manager for this response session."""
|
|
483
568
|
return self
|
|
484
569
|
|
|
@@ -489,7 +574,7 @@ class ResponseBase(Generic[T]):
|
|
|
489
574
|
def close(self) -> None:
|
|
490
575
|
"""Delete managed vector stores and clean up the session."""
|
|
491
576
|
log(f"Closing session {self.uuid} for {self.__class__.__name__}")
|
|
492
|
-
|
|
577
|
+
self.save()
|
|
493
578
|
try:
|
|
494
579
|
if self._user_vector_storage and self._cleanup_user_vector_storage:
|
|
495
580
|
self._user_vector_storage.delete()
|
|
@@ -209,3 +209,51 @@ class ResponseMessages(JSONSerializable):
|
|
|
209
209
|
return [
|
|
210
210
|
msg.to_openai_format() for msg in self.messages if msg.role != "assistant"
|
|
211
211
|
]
|
|
212
|
+
|
|
213
|
+
def _get_last_message(self, role: str) -> ResponseMessage | None:
|
|
214
|
+
"""Return the most recent message for the given role.
|
|
215
|
+
|
|
216
|
+
Parameters
|
|
217
|
+
----------
|
|
218
|
+
role : str
|
|
219
|
+
Role name to filter messages by.
|
|
220
|
+
|
|
221
|
+
Returns
|
|
222
|
+
-------
|
|
223
|
+
ResponseMessage or None
|
|
224
|
+
Latest message matching ``role`` or ``None`` when absent.
|
|
225
|
+
"""
|
|
226
|
+
for message in reversed(self.messages):
|
|
227
|
+
if message.role == role:
|
|
228
|
+
return message
|
|
229
|
+
return None
|
|
230
|
+
|
|
231
|
+
def get_last_assistant_message(self) -> ResponseMessage | None:
|
|
232
|
+
"""Return the most recent assistant message.
|
|
233
|
+
|
|
234
|
+
Returns
|
|
235
|
+
-------
|
|
236
|
+
ResponseMessage or None
|
|
237
|
+
Latest assistant message or ``None`` when absent.
|
|
238
|
+
"""
|
|
239
|
+
return self._get_last_message(role="assistant")
|
|
240
|
+
|
|
241
|
+
def get_last_tool_message(self) -> ResponseMessage | None:
|
|
242
|
+
"""Return the most recent tool message.
|
|
243
|
+
|
|
244
|
+
Returns
|
|
245
|
+
-------
|
|
246
|
+
ResponseMessage or None
|
|
247
|
+
Latest tool message or ``None`` when absent.
|
|
248
|
+
"""
|
|
249
|
+
return self._get_last_message(role="tool")
|
|
250
|
+
|
|
251
|
+
def get_last_user_message(self) -> ResponseMessage | None:
|
|
252
|
+
"""Return the most recent user message.
|
|
253
|
+
|
|
254
|
+
Returns
|
|
255
|
+
-------
|
|
256
|
+
ResponseMessage or None
|
|
257
|
+
Latest user message or ``None`` when absent.
|
|
258
|
+
"""
|
|
259
|
+
return self._get_last_message(role="user")
|
|
@@ -6,10 +6,10 @@ import asyncio
|
|
|
6
6
|
|
|
7
7
|
from typing import Any, Optional, Type, TypeVar
|
|
8
8
|
|
|
9
|
-
from .base import
|
|
9
|
+
from .base import BaseResponse
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
R = TypeVar("R", bound=
|
|
12
|
+
R = TypeVar("R", bound=BaseResponse[Any])
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
def run_sync(
|
|
@@ -32,7 +32,7 @@ def run_sync(
|
|
|
32
32
|
Returns
|
|
33
33
|
-------
|
|
34
34
|
Any
|
|
35
|
-
Parsed response from :meth:`
|
|
35
|
+
Parsed response from :meth:`BaseResponse.run_response`.
|
|
36
36
|
"""
|
|
37
37
|
response = response_cls(**(response_kwargs or {}))
|
|
38
38
|
try:
|
|
@@ -61,7 +61,7 @@ async def run_async(
|
|
|
61
61
|
Returns
|
|
62
62
|
-------
|
|
63
63
|
Any
|
|
64
|
-
Parsed response from :meth:`
|
|
64
|
+
Parsed response from :meth:`BaseResponse.run_response_async`.
|
|
65
65
|
"""
|
|
66
66
|
response = response_cls(**(response_kwargs or {}))
|
|
67
67
|
try:
|
|
@@ -79,7 +79,7 @@ def run_streamed(
|
|
|
79
79
|
"""Run a response workflow and return the asynchronous result.
|
|
80
80
|
|
|
81
81
|
This mirrors the agent API for discoverability. Streaming responses are not
|
|
82
|
-
currently supported by :class:`
|
|
82
|
+
currently supported by :class:`BaseResponse`, so this returns the same value
|
|
83
83
|
as :func:`run_async`.
|
|
84
84
|
|
|
85
85
|
Parameters
|
|
@@ -4,6 +4,8 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
from dataclasses import dataclass
|
|
6
6
|
from typing import Tuple
|
|
7
|
+
import json
|
|
8
|
+
import ast
|
|
7
9
|
|
|
8
10
|
from openai.types.responses.response_function_tool_call_param import (
|
|
9
11
|
ResponseFunctionToolCallParam,
|
|
@@ -68,3 +70,40 @@ class ResponseToolCall:
|
|
|
68
70
|
},
|
|
69
71
|
)
|
|
70
72
|
return function_call, function_call_output
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def parse_tool_arguments(arguments: str) -> dict:
|
|
76
|
+
"""Parse tool call arguments which may not be valid JSON.
|
|
77
|
+
|
|
78
|
+
The OpenAI API is expected to return well-formed JSON for tool arguments,
|
|
79
|
+
but minor formatting issues (such as the use of single quotes) can occur.
|
|
80
|
+
This helper first tries ``json.loads`` and falls back to
|
|
81
|
+
``ast.literal_eval`` for simple cases.
|
|
82
|
+
|
|
83
|
+
Parameters
|
|
84
|
+
----------
|
|
85
|
+
arguments
|
|
86
|
+
Raw argument string from the tool call.
|
|
87
|
+
|
|
88
|
+
Returns
|
|
89
|
+
-------
|
|
90
|
+
dict
|
|
91
|
+
Parsed dictionary of arguments.
|
|
92
|
+
|
|
93
|
+
Raises
|
|
94
|
+
------
|
|
95
|
+
ValueError
|
|
96
|
+
If the arguments cannot be parsed as JSON.
|
|
97
|
+
|
|
98
|
+
Examples
|
|
99
|
+
--------
|
|
100
|
+
>>> parse_tool_arguments('{"key": "value"}')["key"]
|
|
101
|
+
'value'
|
|
102
|
+
"""
|
|
103
|
+
try:
|
|
104
|
+
return json.loads(arguments)
|
|
105
|
+
except json.JSONDecodeError:
|
|
106
|
+
try:
|
|
107
|
+
return ast.literal_eval(arguments)
|
|
108
|
+
except Exception as exc: # noqa: BLE001
|
|
109
|
+
raise ValueError(f"Invalid JSON arguments: {arguments}") from exc
|
|
@@ -7,11 +7,11 @@ from typing import Any, Optional, Sequence
|
|
|
7
7
|
from openai import OpenAI
|
|
8
8
|
|
|
9
9
|
from ..utils import ensure_list
|
|
10
|
-
from .base import
|
|
10
|
+
from .base import BaseResponse
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def attach_vector_store(
|
|
14
|
-
response:
|
|
14
|
+
response: BaseResponse[Any],
|
|
15
15
|
vector_stores: str | Sequence[str],
|
|
16
16
|
api_key: Optional[str] = None,
|
|
17
17
|
) -> list[str]:
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Streamlit app utilities for the config-driven chat interface."""
|
|
2
|
+
|
|
3
|
+
from .configuration import (
|
|
4
|
+
StreamlitAppConfig,
|
|
5
|
+
_load_configuration,
|
|
6
|
+
load_app_config,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"StreamlitAppConfig",
|
|
11
|
+
"_load_configuration",
|
|
12
|
+
"load_app_config",
|
|
13
|
+
]
|