openai-sdk-helpers 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +85 -10
- openai_sdk_helpers/agent/__init__.py +8 -4
- openai_sdk_helpers/agent/base.py +81 -46
- openai_sdk_helpers/agent/config.py +6 -4
- openai_sdk_helpers/agent/{project_manager.py → coordination.py} +29 -45
- openai_sdk_helpers/agent/prompt_utils.py +7 -1
- openai_sdk_helpers/agent/runner.py +67 -141
- openai_sdk_helpers/agent/search/__init__.py +33 -0
- openai_sdk_helpers/agent/search/base.py +297 -0
- openai_sdk_helpers/agent/{vector_search.py → search/vector.py} +89 -157
- openai_sdk_helpers/agent/{web_search.py → search/web.py} +82 -162
- openai_sdk_helpers/agent/summarizer.py +29 -8
- openai_sdk_helpers/agent/translator.py +40 -13
- openai_sdk_helpers/agent/validation.py +32 -8
- openai_sdk_helpers/async_utils.py +132 -0
- openai_sdk_helpers/config.py +74 -36
- openai_sdk_helpers/context_manager.py +241 -0
- openai_sdk_helpers/enums/__init__.py +9 -1
- openai_sdk_helpers/enums/base.py +67 -8
- openai_sdk_helpers/environment.py +33 -6
- openai_sdk_helpers/errors.py +133 -0
- openai_sdk_helpers/logging_config.py +105 -0
- openai_sdk_helpers/prompt/__init__.py +10 -71
- openai_sdk_helpers/prompt/base.py +172 -0
- openai_sdk_helpers/response/__init__.py +37 -5
- openai_sdk_helpers/response/base.py +427 -189
- openai_sdk_helpers/response/config.py +176 -0
- openai_sdk_helpers/response/messages.py +104 -40
- openai_sdk_helpers/response/runner.py +79 -35
- openai_sdk_helpers/response/tool_call.py +75 -12
- openai_sdk_helpers/response/vector_store.py +29 -16
- openai_sdk_helpers/retry.py +175 -0
- openai_sdk_helpers/streamlit_app/__init__.py +30 -0
- openai_sdk_helpers/streamlit_app/app.py +345 -0
- openai_sdk_helpers/streamlit_app/config.py +502 -0
- openai_sdk_helpers/streamlit_app/streamlit_web_search.py +68 -0
- openai_sdk_helpers/structure/__init__.py +69 -3
- openai_sdk_helpers/structure/agent_blueprint.py +82 -19
- openai_sdk_helpers/structure/base.py +245 -91
- openai_sdk_helpers/structure/plan/__init__.py +15 -1
- openai_sdk_helpers/structure/plan/enum.py +41 -5
- openai_sdk_helpers/structure/plan/plan.py +101 -45
- openai_sdk_helpers/structure/plan/task.py +38 -6
- openai_sdk_helpers/structure/prompt.py +21 -2
- openai_sdk_helpers/structure/responses.py +52 -11
- openai_sdk_helpers/structure/summary.py +55 -7
- openai_sdk_helpers/structure/validation.py +34 -6
- openai_sdk_helpers/structure/vector_search.py +132 -18
- openai_sdk_helpers/structure/web_search.py +128 -12
- openai_sdk_helpers/types.py +57 -0
- openai_sdk_helpers/utils/__init__.py +32 -1
- openai_sdk_helpers/utils/core.py +200 -32
- openai_sdk_helpers/validation.py +302 -0
- openai_sdk_helpers/vector_storage/__init__.py +21 -1
- openai_sdk_helpers/vector_storage/cleanup.py +25 -13
- openai_sdk_helpers/vector_storage/storage.py +124 -66
- openai_sdk_helpers/vector_storage/types.py +20 -19
- openai_sdk_helpers-0.0.9.dist-info/METADATA +550 -0
- openai_sdk_helpers-0.0.9.dist-info/RECORD +66 -0
- openai_sdk_helpers-0.0.7.dist-info/METADATA +0 -193
- openai_sdk_helpers-0.0.7.dist-info/RECORD +0 -51
- {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.9.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.9.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,9 +1,15 @@
|
|
|
1
|
-
"""Tool call representation
|
|
1
|
+
"""Tool call representation and argument parsing.
|
|
2
|
+
|
|
3
|
+
This module provides data structures and utilities for managing tool calls
|
|
4
|
+
in OpenAI response conversations, including conversion to OpenAI API formats
|
|
5
|
+
and robust argument parsing.
|
|
6
|
+
"""
|
|
2
7
|
|
|
3
8
|
from __future__ import annotations
|
|
4
9
|
|
|
10
|
+
import ast
|
|
11
|
+
import json
|
|
5
12
|
from dataclasses import dataclass
|
|
6
|
-
from typing import Tuple
|
|
7
13
|
|
|
8
14
|
from openai.types.responses.response_function_tool_call_param import (
|
|
9
15
|
ResponseFunctionToolCallParam,
|
|
@@ -13,23 +19,27 @@ from openai.types.responses.response_input_param import FunctionCallOutput
|
|
|
13
19
|
|
|
14
20
|
@dataclass
|
|
15
21
|
class ResponseToolCall:
|
|
16
|
-
"""Container for tool call data
|
|
22
|
+
"""Container for tool call data in a conversation.
|
|
23
|
+
|
|
24
|
+
Stores the complete information about a tool invocation including
|
|
25
|
+
the call identifier, tool name, input arguments, and execution output.
|
|
26
|
+
Can convert to OpenAI API format for use in subsequent requests.
|
|
17
27
|
|
|
18
28
|
Attributes
|
|
19
29
|
----------
|
|
20
30
|
call_id : str
|
|
21
|
-
|
|
31
|
+
Unique identifier for this tool call.
|
|
22
32
|
name : str
|
|
23
|
-
Name of the tool invoked.
|
|
33
|
+
Name of the tool that was invoked.
|
|
24
34
|
arguments : str
|
|
25
|
-
JSON string
|
|
35
|
+
JSON string containing the arguments passed to the tool.
|
|
26
36
|
output : str
|
|
27
|
-
JSON string representing the result produced by the tool.
|
|
37
|
+
JSON string representing the result produced by the tool handler.
|
|
28
38
|
|
|
29
39
|
Methods
|
|
30
40
|
-------
|
|
31
41
|
to_response_input_item_param()
|
|
32
|
-
Convert
|
|
42
|
+
Convert to OpenAI API tool call format.
|
|
33
43
|
"""
|
|
34
44
|
|
|
35
45
|
call_id: str
|
|
@@ -39,14 +49,28 @@ class ResponseToolCall:
|
|
|
39
49
|
|
|
40
50
|
def to_response_input_item_param(
|
|
41
51
|
self,
|
|
42
|
-
) ->
|
|
43
|
-
"""Convert stored data into OpenAI tool call objects.
|
|
52
|
+
) -> tuple[ResponseFunctionToolCallParam, FunctionCallOutput]:
|
|
53
|
+
"""Convert stored data into OpenAI API tool call objects.
|
|
54
|
+
|
|
55
|
+
Creates the function call parameter and corresponding output object
|
|
56
|
+
required by the OpenAI API for tool interaction.
|
|
44
57
|
|
|
45
58
|
Returns
|
|
46
59
|
-------
|
|
47
60
|
tuple[ResponseFunctionToolCallParam, FunctionCallOutput]
|
|
48
|
-
|
|
49
|
-
|
|
61
|
+
A two-element tuple containing:
|
|
62
|
+
- ResponseFunctionToolCallParam: The function call representation
|
|
63
|
+
- FunctionCallOutput: The function output representation
|
|
64
|
+
|
|
65
|
+
Examples
|
|
66
|
+
--------
|
|
67
|
+
>>> tool_call = ResponseToolCall(
|
|
68
|
+
... call_id="call_123",
|
|
69
|
+
... name="search",
|
|
70
|
+
... arguments='{"query": "test"}',
|
|
71
|
+
... output='{"results": []}'
|
|
72
|
+
... )
|
|
73
|
+
>>> func_call, func_output = tool_call.to_response_input_item_param()
|
|
50
74
|
"""
|
|
51
75
|
from typing import cast
|
|
52
76
|
|
|
@@ -68,3 +92,42 @@ class ResponseToolCall:
|
|
|
68
92
|
},
|
|
69
93
|
)
|
|
70
94
|
return function_call, function_call_output
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def parse_tool_arguments(arguments: str) -> dict:
|
|
98
|
+
"""Parse tool call arguments with fallback for malformed JSON.
|
|
99
|
+
|
|
100
|
+
Attempts to parse arguments as JSON first, then falls back to
|
|
101
|
+
ast.literal_eval for cases where the OpenAI API returns minor
|
|
102
|
+
formatting issues like single quotes instead of double quotes.
|
|
103
|
+
|
|
104
|
+
Parameters
|
|
105
|
+
----------
|
|
106
|
+
arguments : str
|
|
107
|
+
Raw argument string from a tool call, expected to be JSON.
|
|
108
|
+
|
|
109
|
+
Returns
|
|
110
|
+
-------
|
|
111
|
+
dict
|
|
112
|
+
Parsed dictionary of tool arguments.
|
|
113
|
+
|
|
114
|
+
Raises
|
|
115
|
+
------
|
|
116
|
+
ValueError
|
|
117
|
+
If the arguments cannot be parsed as valid JSON or Python literal.
|
|
118
|
+
|
|
119
|
+
Examples
|
|
120
|
+
--------
|
|
121
|
+
>>> parse_tool_arguments('{"key": "value"}')
|
|
122
|
+
{'key': 'value'}
|
|
123
|
+
|
|
124
|
+
>>> parse_tool_arguments("{'key': 'value'}")
|
|
125
|
+
{'key': 'value'}
|
|
126
|
+
"""
|
|
127
|
+
try:
|
|
128
|
+
return json.loads(arguments)
|
|
129
|
+
except json.JSONDecodeError:
|
|
130
|
+
try:
|
|
131
|
+
return ast.literal_eval(arguments)
|
|
132
|
+
except Exception as exc: # noqa: BLE001
|
|
133
|
+
raise ValueError(f"Invalid JSON arguments: {arguments}") from exc
|
|
@@ -1,42 +1,55 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""Vector store attachment utilities for responses.
|
|
2
|
+
|
|
3
|
+
This module provides functions for attaching named vector stores to response
|
|
4
|
+
instances, enabling file search capabilities through the OpenAI API.
|
|
5
|
+
"""
|
|
2
6
|
|
|
3
7
|
from __future__ import annotations
|
|
4
8
|
|
|
5
|
-
from typing import Any,
|
|
9
|
+
from typing import Any, Sequence
|
|
6
10
|
|
|
7
11
|
from openai import OpenAI
|
|
8
12
|
|
|
9
13
|
from ..utils import ensure_list
|
|
10
|
-
from .base import
|
|
14
|
+
from .base import BaseResponse
|
|
11
15
|
|
|
12
16
|
|
|
13
17
|
def attach_vector_store(
|
|
14
|
-
response:
|
|
18
|
+
response: BaseResponse[Any],
|
|
15
19
|
vector_stores: str | Sequence[str],
|
|
16
|
-
api_key:
|
|
20
|
+
api_key: str | None = None,
|
|
17
21
|
) -> list[str]:
|
|
18
|
-
"""Attach vector stores to a response
|
|
22
|
+
"""Attach named vector stores to a response's file_search tool.
|
|
23
|
+
|
|
24
|
+
Resolves vector store names to IDs via the OpenAI API and configures
|
|
25
|
+
the response's file_search tool to use them. Creates the file_search
|
|
26
|
+
tool if it doesn't exist, or updates it to include additional stores.
|
|
19
27
|
|
|
20
28
|
Parameters
|
|
21
29
|
----------
|
|
22
|
-
response
|
|
23
|
-
Response instance whose tool configuration
|
|
24
|
-
vector_stores
|
|
25
|
-
Single vector store name or
|
|
26
|
-
api_key : str,
|
|
27
|
-
API key
|
|
28
|
-
``None``.
|
|
30
|
+
response : BaseResponse[Any]
|
|
31
|
+
Response instance whose tool configuration will be updated.
|
|
32
|
+
vector_stores : str or Sequence[str]
|
|
33
|
+
Single vector store name or sequence of names to attach.
|
|
34
|
+
api_key : str or None, default None
|
|
35
|
+
API key for OpenAI client. If None, uses the response's client.
|
|
29
36
|
|
|
30
37
|
Returns
|
|
31
38
|
-------
|
|
32
39
|
list[str]
|
|
33
|
-
Ordered list of vector store IDs
|
|
40
|
+
Ordered list of vector store IDs attached to the file_search tool.
|
|
34
41
|
|
|
35
42
|
Raises
|
|
36
43
|
------
|
|
37
44
|
ValueError
|
|
38
|
-
If a vector store cannot be resolved
|
|
39
|
-
|
|
45
|
+
If a vector store name cannot be resolved to an ID.
|
|
46
|
+
If no API key is available and the response has no client.
|
|
47
|
+
|
|
48
|
+
Examples
|
|
49
|
+
--------
|
|
50
|
+
>>> from openai_sdk_helpers.response import attach_vector_store
|
|
51
|
+
>>> ids = attach_vector_store(response, "knowledge_base")
|
|
52
|
+
>>> ids = attach_vector_store(response, ["docs", "kb"], api_key="sk-...")
|
|
40
53
|
"""
|
|
41
54
|
requested_stores = ensure_list(vector_stores)
|
|
42
55
|
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
"""Retry decorators with exponential backoff for API operations.
|
|
2
|
+
|
|
3
|
+
Provides decorators for retrying async and sync functions with
|
|
4
|
+
exponential backoff and jitter when rate limiting or transient
|
|
5
|
+
errors occur.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import logging
|
|
10
|
+
import random
|
|
11
|
+
import time
|
|
12
|
+
from functools import wraps
|
|
13
|
+
from typing import Any, Callable, ParamSpec, TypeVar
|
|
14
|
+
|
|
15
|
+
from openai import APIError, RateLimitError
|
|
16
|
+
|
|
17
|
+
from openai_sdk_helpers.errors import AsyncExecutionError
|
|
18
|
+
from openai_sdk_helpers.utils.core import log
|
|
19
|
+
|
|
20
|
+
P = ParamSpec("P")
|
|
21
|
+
T = TypeVar("T")
|
|
22
|
+
|
|
23
|
+
# Default retry configuration constants
|
|
24
|
+
DEFAULT_MAX_RETRIES = 3
|
|
25
|
+
DEFAULT_BASE_DELAY = 1.0
|
|
26
|
+
DEFAULT_MAX_DELAY = 60.0
|
|
27
|
+
|
|
28
|
+
# HTTP status codes for transient errors
|
|
29
|
+
TRANSIENT_HTTP_STATUS_CODES = frozenset({408, 429, 500, 502, 503})
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def with_exponential_backoff(
|
|
33
|
+
max_retries: int = DEFAULT_MAX_RETRIES,
|
|
34
|
+
base_delay: float = DEFAULT_BASE_DELAY,
|
|
35
|
+
max_delay: float = DEFAULT_MAX_DELAY,
|
|
36
|
+
) -> Callable[[Callable[P, T]], Callable[P, T]]:
|
|
37
|
+
"""Decorate functions with exponential backoff on transient errors.
|
|
38
|
+
|
|
39
|
+
Retries on RateLimitError or transient API errors (5xx, 408, 429).
|
|
40
|
+
Uses exponential backoff with jitter to avoid thundering herd.
|
|
41
|
+
|
|
42
|
+
Parameters
|
|
43
|
+
----------
|
|
44
|
+
max_retries : int
|
|
45
|
+
Maximum number of retry attempts (total attempts = max_retries + 1).
|
|
46
|
+
Default is 3.
|
|
47
|
+
base_delay : float
|
|
48
|
+
Initial delay in seconds before first retry. Default is 1.0.
|
|
49
|
+
max_delay : float
|
|
50
|
+
Maximum delay in seconds between retries. Default is 60.0.
|
|
51
|
+
|
|
52
|
+
Returns
|
|
53
|
+
-------
|
|
54
|
+
Callable
|
|
55
|
+
Decorator function.
|
|
56
|
+
|
|
57
|
+
Examples
|
|
58
|
+
--------
|
|
59
|
+
>>> @with_exponential_backoff(max_retries=3, base_delay=1.0)
|
|
60
|
+
... def call_api(query: str) -> str:
|
|
61
|
+
... # API call that may fail with rate limiting
|
|
62
|
+
... return client.call(query)
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
def decorator(func: Callable[P, T]) -> Callable[P, T]:
|
|
66
|
+
"""Apply retry logic to function."""
|
|
67
|
+
if asyncio.iscoroutinefunction(func):
|
|
68
|
+
|
|
69
|
+
@wraps(func)
|
|
70
|
+
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
|
71
|
+
"""Async wrapper with retry logic."""
|
|
72
|
+
last_exc: Exception | None = None
|
|
73
|
+
for attempt in range(max_retries + 1):
|
|
74
|
+
try:
|
|
75
|
+
return await func(*args, **kwargs)
|
|
76
|
+
except RateLimitError as exc:
|
|
77
|
+
last_exc = exc
|
|
78
|
+
if attempt >= max_retries:
|
|
79
|
+
raise
|
|
80
|
+
delay = min(
|
|
81
|
+
base_delay * (2**attempt) + random.uniform(0, 1),
|
|
82
|
+
max_delay,
|
|
83
|
+
)
|
|
84
|
+
log(
|
|
85
|
+
f"Rate limited on {func.__name__}, retrying in "
|
|
86
|
+
f"{delay:.2f}s (attempt {attempt + 1}/{max_retries + 1})",
|
|
87
|
+
level=logging.WARNING,
|
|
88
|
+
)
|
|
89
|
+
await asyncio.sleep(delay)
|
|
90
|
+
except APIError as exc:
|
|
91
|
+
last_exc = exc
|
|
92
|
+
status_code: int | None = getattr(exc, "status_code", None)
|
|
93
|
+
# Only retry on transient errors
|
|
94
|
+
if (
|
|
95
|
+
not status_code
|
|
96
|
+
or status_code not in TRANSIENT_HTTP_STATUS_CODES
|
|
97
|
+
):
|
|
98
|
+
raise
|
|
99
|
+
if attempt >= max_retries:
|
|
100
|
+
raise
|
|
101
|
+
delay = min(
|
|
102
|
+
base_delay * (2**attempt),
|
|
103
|
+
max_delay,
|
|
104
|
+
)
|
|
105
|
+
log(
|
|
106
|
+
f"Transient API error on {func.__name__}: "
|
|
107
|
+
f"{status_code}, retrying in {delay:.2f}s "
|
|
108
|
+
f"(attempt {attempt + 1}/{max_retries + 1})",
|
|
109
|
+
level=logging.WARNING,
|
|
110
|
+
)
|
|
111
|
+
await asyncio.sleep(delay)
|
|
112
|
+
|
|
113
|
+
# Should never reach here, but handle edge case
|
|
114
|
+
if last_exc:
|
|
115
|
+
raise last_exc
|
|
116
|
+
raise AsyncExecutionError(
|
|
117
|
+
f"Unexpected state in {func.__name__} after retries"
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
return async_wrapper # type: ignore
|
|
121
|
+
|
|
122
|
+
@wraps(func)
|
|
123
|
+
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
|
124
|
+
"""Sync wrapper with retry logic."""
|
|
125
|
+
last_exc: Exception | None = None
|
|
126
|
+
for attempt in range(max_retries + 1):
|
|
127
|
+
try:
|
|
128
|
+
return func(*args, **kwargs)
|
|
129
|
+
except RateLimitError as exc:
|
|
130
|
+
last_exc = exc
|
|
131
|
+
if attempt >= max_retries:
|
|
132
|
+
raise
|
|
133
|
+
delay = min(
|
|
134
|
+
base_delay * (2**attempt) + random.uniform(0, 1),
|
|
135
|
+
max_delay,
|
|
136
|
+
)
|
|
137
|
+
log(
|
|
138
|
+
f"Rate limited on {func.__name__}, retrying in "
|
|
139
|
+
f"{delay:.2f}s (attempt {attempt + 1}/{max_retries + 1})",
|
|
140
|
+
level=logging.WARNING,
|
|
141
|
+
)
|
|
142
|
+
time.sleep(delay)
|
|
143
|
+
except APIError as exc:
|
|
144
|
+
last_exc = exc
|
|
145
|
+
status_code: int | None = getattr(exc, "status_code", None)
|
|
146
|
+
# Only retry on transient errors
|
|
147
|
+
if (
|
|
148
|
+
not status_code
|
|
149
|
+
or status_code not in TRANSIENT_HTTP_STATUS_CODES
|
|
150
|
+
):
|
|
151
|
+
raise
|
|
152
|
+
if attempt >= max_retries:
|
|
153
|
+
raise
|
|
154
|
+
delay = min(
|
|
155
|
+
base_delay * (2**attempt),
|
|
156
|
+
max_delay,
|
|
157
|
+
)
|
|
158
|
+
log(
|
|
159
|
+
f"Transient API error on {func.__name__}: "
|
|
160
|
+
f"{status_code}, retrying in {delay:.2f}s "
|
|
161
|
+
f"(attempt {attempt + 1}/{max_retries + 1})",
|
|
162
|
+
level=logging.WARNING,
|
|
163
|
+
)
|
|
164
|
+
time.sleep(delay)
|
|
165
|
+
|
|
166
|
+
# Should never reach here, but handle edge case
|
|
167
|
+
if last_exc:
|
|
168
|
+
raise last_exc
|
|
169
|
+
raise AsyncExecutionError(
|
|
170
|
+
f"Unexpected state in {func.__name__} after retries"
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
return sync_wrapper # type: ignore
|
|
174
|
+
|
|
175
|
+
return decorator
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"""Streamlit application utilities for configuration-driven chat interfaces.
|
|
2
|
+
|
|
3
|
+
This module provides configuration management and loading utilities for building
|
|
4
|
+
Streamlit-based chat applications powered by OpenAI response handlers. It enables
|
|
5
|
+
rapid deployment of conversational AI interfaces with minimal boilerplate.
|
|
6
|
+
|
|
7
|
+
Classes
|
|
8
|
+
-------
|
|
9
|
+
StreamlitAppConfig
|
|
10
|
+
Validated configuration for Streamlit chat applications.
|
|
11
|
+
|
|
12
|
+
Functions
|
|
13
|
+
---------
|
|
14
|
+
load_app_config
|
|
15
|
+
Load and validate configuration from a Python module.
|
|
16
|
+
_load_configuration
|
|
17
|
+
Load configuration with user-friendly error handling for Streamlit UI.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from .config import (
|
|
21
|
+
StreamlitAppConfig,
|
|
22
|
+
_load_configuration,
|
|
23
|
+
load_app_config,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
__all__ = [
|
|
27
|
+
"StreamlitAppConfig",
|
|
28
|
+
"_load_configuration",
|
|
29
|
+
"load_app_config",
|
|
30
|
+
]
|