openai-sdk-helpers 0.0.6__py3-none-any.whl → 0.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,10 +2,23 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from .structure import *
5
+ from .structure import (
6
+ BaseStructure,
7
+ SchemaOptions,
8
+ PlanStructure,
9
+ TaskStructure,
10
+ WebSearchStructure,
11
+ VectorSearchStructure,
12
+ PromptStructure,
13
+ spec_field,
14
+ SummaryStructure,
15
+ ExtendedSummaryStructure,
16
+ ValidationResultStructure,
17
+ AgentBlueprint,
18
+ )
6
19
  from .prompt import PromptRenderer
7
20
  from .config import OpenAISettings
8
- from .vector_storage import *
21
+ from .vector_storage import VectorStorage, VectorStorageFileInfo, VectorStorageFileStats
9
22
  from .agent import (
10
23
  AgentBase,
11
24
  AgentConfig,
@@ -18,10 +31,11 @@ from .agent import (
18
31
  WebAgentSearch,
19
32
  )
20
33
  from .response import (
21
- ResponseBase,
34
+ BaseResponse,
22
35
  ResponseMessage,
23
36
  ResponseMessages,
24
37
  ResponseToolCall,
38
+ attach_vector_store,
25
39
  )
26
40
 
27
41
  __all__ = [
@@ -33,10 +47,6 @@ __all__ = [
33
47
  "VectorStorage",
34
48
  "VectorStorageFileInfo",
35
49
  "VectorStorageFileStats",
36
- "assistant_tool_definition",
37
- "assistant_format",
38
- "response_tool_definition",
39
- "response_format",
40
50
  "SummaryStructure",
41
51
  "PromptStructure",
42
52
  "AgentBlueprint",
@@ -55,8 +65,9 @@ __all__ = [
55
65
  "WebSearchStructure",
56
66
  "VectorSearchStructure",
57
67
  "ValidationResultStructure",
58
- "ResponseBase",
68
+ "BaseResponse",
59
69
  "ResponseMessage",
60
70
  "ResponseMessages",
61
71
  "ResponseToolCall",
72
+ "attach_vector_store",
62
73
  ]
@@ -207,7 +207,7 @@ class AgentBase:
207
207
  """
208
208
  agent_config: Dict[str, Any] = {
209
209
  "name": self.agent_name,
210
- "instructions": self._build_prompt_from_jinja(),
210
+ "instructions": self._build_prompt_from_jinja() or ".",
211
211
  "model": self.model,
212
212
  }
213
213
  if self._output_type:
@@ -311,7 +311,7 @@ class WebAgentSearch(AgentBase):
311
311
  )
312
312
  self._prompt_dir = prompt_dir
313
313
 
314
- async def run_agent(self, search_query: str) -> WebSearchStructure:
314
+ async def run_agent_async(self, search_query: str) -> WebSearchStructure:
315
315
  """Execute the entire research workflow for ``search_query``.
316
316
 
317
317
  Parameters
@@ -358,10 +358,9 @@ class WebAgentSearch(AgentBase):
358
358
  WebSearchStructure
359
359
  Completed research output.
360
360
  """
361
- return run_coroutine_agent_sync(self.run_agent(search_query))
361
+ return run_coroutine_agent_sync(self.run_agent_async(search_query))
362
362
 
363
- @staticmethod
364
- async def run_web_agent(search_query: str) -> WebSearchStructure:
363
+ async def run_web_agent_async(self, search_query: str) -> WebSearchStructure:
365
364
  """Return a research report for the given query using ``WebAgentSearch``.
366
365
 
367
366
  Parameters
@@ -374,7 +373,7 @@ class WebAgentSearch(AgentBase):
374
373
  WebSearchStructure
375
374
  Completed research output.
376
375
  """
377
- return await WebAgentSearch().run_agent(search_query=search_query)
376
+ return await self.run_agent_async(search_query=search_query)
378
377
 
379
378
  @staticmethod
380
379
  def run_web_agent_sync(search_query: str) -> WebSearchStructure:
@@ -391,7 +390,7 @@ class WebAgentSearch(AgentBase):
391
390
  Completed research output.
392
391
  """
393
392
  return run_coroutine_agent_sync(
394
- WebAgentSearch.run_web_agent(search_query=search_query)
393
+ WebAgentSearch().run_web_agent_async(search_query=search_query)
395
394
  )
396
395
 
397
396
 
@@ -10,6 +10,12 @@ from dotenv import dotenv_values
10
10
  from openai import OpenAI
11
11
  from pydantic import BaseModel, ConfigDict, Field
12
12
 
13
+ from openai_sdk_helpers.utils import (
14
+ coerce_dict,
15
+ coerce_optional_float,
16
+ coerce_optional_int,
17
+ )
18
+
13
19
 
14
20
  class OpenAISettings(BaseModel):
15
21
  """Configuration helpers for constructing OpenAI clients.
@@ -61,6 +67,28 @@ class OpenAISettings(BaseModel):
61
67
  " provided. Defaults to ``OPENAI_MODEL``."
62
68
  ),
63
69
  )
70
+ timeout: Optional[float] = Field(
71
+ default=None,
72
+ description=(
73
+ "Request timeout in seconds applied to all OpenAI client calls."
74
+ " Defaults to ``OPENAI_TIMEOUT``."
75
+ ),
76
+ )
77
+ max_retries: Optional[int] = Field(
78
+ default=None,
79
+ description=(
80
+ "Maximum number of automatic retries for transient failures."
81
+ " Defaults to ``OPENAI_MAX_RETRIES``."
82
+ ),
83
+ )
84
+ extra_client_kwargs: Dict[str, Any] = Field(
85
+ default_factory=dict,
86
+ description=(
87
+ "Additional keyword arguments forwarded to ``openai.OpenAI``. Use"
88
+ " this for less common options like ``default_headers`` or"
89
+ " ``http_client``."
90
+ ),
91
+ )
64
92
 
65
93
  @classmethod
66
94
  def from_env(
@@ -87,7 +115,18 @@ class OpenAISettings(BaseModel):
87
115
  else:
88
116
  env_file_values = dotenv_values()
89
117
 
90
- values: Dict[str, Optional[str]] = {
118
+ timeout_raw = (
119
+ overrides.get("timeout")
120
+ or env_file_values.get("OPENAI_TIMEOUT")
121
+ or os.getenv("OPENAI_TIMEOUT")
122
+ )
123
+ max_retries_raw = (
124
+ overrides.get("max_retries")
125
+ or env_file_values.get("OPENAI_MAX_RETRIES")
126
+ or os.getenv("OPENAI_MAX_RETRIES")
127
+ )
128
+
129
+ values: Dict[str, Any] = {
91
130
  "api_key": overrides.get("api_key")
92
131
  or env_file_values.get("OPENAI_API_KEY")
93
132
  or os.getenv("OPENAI_API_KEY"),
@@ -103,6 +142,9 @@ class OpenAISettings(BaseModel):
103
142
  "default_model": overrides.get("default_model")
104
143
  or env_file_values.get("OPENAI_MODEL")
105
144
  or os.getenv("OPENAI_MODEL"),
145
+ "timeout": coerce_optional_float(timeout_raw),
146
+ "max_retries": coerce_optional_int(max_retries_raw),
147
+ "extra_client_kwargs": coerce_dict(overrides.get("extra_client_kwargs")),
106
148
  }
107
149
 
108
150
  settings = cls(**values)
@@ -128,7 +170,7 @@ class OpenAISettings(BaseModel):
128
170
  Keyword arguments populated with available authentication and routing
129
171
  values.
130
172
  """
131
- kwargs: Dict[str, Any] = {}
173
+ kwargs: Dict[str, Any] = dict(self.extra_client_kwargs)
132
174
  if self.api_key:
133
175
  kwargs["api_key"] = self.api_key
134
176
  if self.org_id:
@@ -137,6 +179,10 @@ class OpenAISettings(BaseModel):
137
179
  kwargs["project"] = self.project_id
138
180
  if self.base_url:
139
181
  kwargs["base_url"] = self.base_url
182
+ if self.timeout is not None:
183
+ kwargs["timeout"] = self.timeout
184
+ if self.max_retries is not None:
185
+ kwargs["max_retries"] = self.max_retries
140
186
  return kwargs
141
187
 
142
188
  def create_client(self) -> OpenAI:
@@ -2,17 +2,19 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from .base import ResponseBase
5
+ from .base import BaseResponse
6
6
  from .messages import ResponseMessage, ResponseMessages
7
7
  from .runner import run_sync, run_async, run_streamed
8
+ from .vector_store import attach_vector_store
8
9
  from .tool_call import ResponseToolCall
9
10
 
10
11
  __all__ = [
11
- "ResponseBase",
12
+ "BaseResponse",
12
13
  "ResponseMessage",
13
14
  "ResponseMessages",
14
15
  "run_sync",
15
16
  "run_async",
16
17
  "run_streamed",
17
18
  "ResponseToolCall",
19
+ "attach_vector_store",
18
20
  ]
@@ -10,11 +10,13 @@ import threading
10
10
  import uuid
11
11
  from pathlib import Path
12
12
  from typing import (
13
+ TYPE_CHECKING,
13
14
  Any,
14
15
  Callable,
15
16
  Generic,
16
17
  List,
17
18
  Optional,
19
+ Sequence,
18
20
  Tuple,
19
21
  Type,
20
22
  TypeVar,
@@ -32,16 +34,22 @@ from openai.types.responses.response_input_param import ResponseInputItemParam
32
34
  from openai.types.responses.response_input_text_param import ResponseInputTextParam
33
35
  from openai.types.responses.response_output_message import ResponseOutputMessage
34
36
 
35
- from .messages import ResponseMessages
37
+ from .messages import ResponseMessage, ResponseMessages
36
38
  from ..structure import BaseStructure
37
39
  from ..utils import ensure_list, log
38
40
 
41
+ if TYPE_CHECKING:
42
+ from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
43
+
39
44
  T = TypeVar("T", bound=BaseStructure)
40
45
  ToolHandler = Callable[[ResponseFunctionToolCall], Union[str, Any]]
41
46
  ProcessContent = Callable[[str], Tuple[str, List[str]]]
42
47
 
43
48
 
44
- class ResponseBase(Generic[T]):
49
+ RB = TypeVar("RB", bound="BaseResponse[BaseStructure]")
50
+
51
+
52
+ class BaseResponse(Generic[T]):
45
53
  """Manage OpenAI interactions for structured responses.
46
54
 
47
55
  This base class handles input construction, OpenAI requests, tool calls,
@@ -55,6 +63,8 @@ class ResponseBase(Generic[T]):
55
63
  Synchronous wrapper around ``run_async``.
56
64
  run_streamed(content, attachments)
57
65
  Await ``run_async`` to mirror the agent API.
66
+ build_streamlit_config(...)
67
+ Construct a :class:`StreamlitAppConfig` using this class as the builder.
58
68
  save(filepath)
59
69
  Serialize the message history to disk.
60
70
  close()
@@ -129,6 +139,8 @@ class ResponseBase(Generic[T]):
129
139
  self._tools = tools if tools is not None else []
130
140
  self._schema = schema
131
141
  self._output_structure = output_structure
142
+ self._cleanup_user_vector_storage = False
143
+ self._cleanup_system_vector_storage = False
132
144
 
133
145
  if client is None:
134
146
  if api_key is None:
@@ -162,6 +174,7 @@ class ResponseBase(Generic[T]):
162
174
  self._system_vector_storage = self._vector_storage_cls(
163
175
  store_name=storage_name, client=self._client, model=self._model
164
176
  )
177
+ self._cleanup_system_vector_storage = True
165
178
  system_vector_storage = cast(Any, self._system_vector_storage)
166
179
  for file_path, tool_type in attachments:
167
180
  uploaded_file = system_vector_storage.upload_file(file_path=file_path)
@@ -248,6 +261,7 @@ class ResponseBase(Generic[T]):
248
261
  client=self._client,
249
262
  model=self._model,
250
263
  )
264
+ self._cleanup_user_vector_storage = True
251
265
  user_vector_storage = cast(Any, self._user_vector_storage)
252
266
  if not any(
253
267
  tool.get("type") == "file_search" for tool in self._tools
@@ -441,6 +455,81 @@ class ResponseBase(Generic[T]):
441
455
  """
442
456
  return asyncio.run(self.run_async(content=content, attachments=attachments))
443
457
 
458
+ def get_last_tool_message(self) -> ResponseMessage | None:
459
+ """Return the most recent tool message.
460
+
461
+ Returns
462
+ -------
463
+ ResponseMessage or None
464
+ Latest tool message or ``None`` when absent.
465
+ """
466
+ return self.messages.get_last_tool_message()
467
+
468
+ def get_last_user_message(self) -> ResponseMessage | None:
469
+ """Return the most recent user message.
470
+
471
+ Returns
472
+ -------
473
+ ResponseMessage or None
474
+ Latest user message or ``None`` when absent.
475
+ """
476
+ return self.messages.get_last_user_message()
477
+
478
+ def get_last_assistant_message(self) -> ResponseMessage | None:
479
+ """Return the most recent assistant message.
480
+
481
+ Returns
482
+ -------
483
+ ResponseMessage or None
484
+ Latest assistant message or ``None`` when absent.
485
+ """
486
+ return self.messages.get_last_assistant_message()
487
+
488
+ @classmethod
489
+ def build_streamlit_config(
490
+ cls: type[RB],
491
+ *,
492
+ display_title: str = "Example copilot",
493
+ description: str | None = None,
494
+ system_vector_store: Sequence[str] | str | None = None,
495
+ preserve_vector_stores: bool = False,
496
+ model: str | None = None,
497
+ ) -> "StreamlitAppConfig":
498
+ """Construct a :class:`StreamlitAppConfig` using ``cls`` as the builder.
499
+
500
+ Parameters
501
+ ----------
502
+ display_title : str, default="Example copilot"
503
+ Title displayed at the top of the Streamlit page.
504
+ description : str or None, default=None
505
+ Optional short description shown beneath the title.
506
+ system_vector_store : Sequence[str] | str | None, default=None
507
+ Optional vector store names to attach as system context.
508
+ preserve_vector_stores : bool, default=False
509
+ When ``True``, skip automatic vector store cleanup on close.
510
+ model : str or None, default=None
511
+ Optional model hint for display alongside the chat interface.
512
+
513
+ Returns
514
+ -------
515
+ StreamlitAppConfig
516
+ Validated configuration bound to ``cls`` as the response builder.
517
+ """
518
+ from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
519
+
520
+ normalized_stores = None
521
+ if system_vector_store is not None:
522
+ normalized_stores = ensure_list(system_vector_store)
523
+
524
+ return StreamlitAppConfig(
525
+ response=cls,
526
+ display_title=display_title,
527
+ description=description,
528
+ system_vector_store=normalized_stores,
529
+ preserve_vector_stores=preserve_vector_stores,
530
+ model=model,
531
+ )
532
+
444
533
  def save(self, filepath: Optional[str | Path] = None) -> None:
445
534
  """Serialize the message history to a JSON file."""
446
535
  if filepath is not None:
@@ -474,7 +563,7 @@ class ResponseBase(Generic[T]):
474
563
  f"messages={len(self.messages.messages)}, data_path={data_path}>"
475
564
  )
476
565
 
477
- def __enter__(self) -> "ResponseBase[T]":
566
+ def __enter__(self) -> "BaseResponse[T]":
478
567
  """Enter the context manager for this response session."""
479
568
  return self
480
569
 
@@ -483,17 +572,17 @@ class ResponseBase(Generic[T]):
483
572
  self.close()
484
573
 
485
574
  def close(self) -> None:
486
- """Delete remote vector stores and clean up the session."""
575
+ """Delete managed vector stores and clean up the session."""
487
576
  log(f"Closing session {self.uuid} for {self.__class__.__name__}")
488
-
577
+ self.save()
489
578
  try:
490
- if self._user_vector_storage:
579
+ if self._user_vector_storage and self._cleanup_user_vector_storage:
491
580
  self._user_vector_storage.delete()
492
581
  log("User vector store deleted.")
493
582
  except Exception as exc:
494
583
  log(f"Error deleting user vector store: {exc}", level=logging.WARNING)
495
584
  try:
496
- if self._system_vector_storage:
585
+ if self._system_vector_storage and self._cleanup_system_vector_storage:
497
586
  self._system_vector_storage.delete()
498
587
  log("System vector store deleted.")
499
588
  except Exception as exc:
@@ -209,3 +209,51 @@ class ResponseMessages(JSONSerializable):
209
209
  return [
210
210
  msg.to_openai_format() for msg in self.messages if msg.role != "assistant"
211
211
  ]
212
+
213
+ def _get_last_message(self, role: str) -> ResponseMessage | None:
214
+ """Return the most recent message for the given role.
215
+
216
+ Parameters
217
+ ----------
218
+ role : str
219
+ Role name to filter messages by.
220
+
221
+ Returns
222
+ -------
223
+ ResponseMessage or None
224
+ Latest message matching ``role`` or ``None`` when absent.
225
+ """
226
+ for message in reversed(self.messages):
227
+ if message.role == role:
228
+ return message
229
+ return None
230
+
231
+ def get_last_assistant_message(self) -> ResponseMessage | None:
232
+ """Return the most recent assistant message.
233
+
234
+ Returns
235
+ -------
236
+ ResponseMessage or None
237
+ Latest assistant message or ``None`` when absent.
238
+ """
239
+ return self._get_last_message(role="assistant")
240
+
241
+ def get_last_tool_message(self) -> ResponseMessage | None:
242
+ """Return the most recent tool message.
243
+
244
+ Returns
245
+ -------
246
+ ResponseMessage or None
247
+ Latest tool message or ``None`` when absent.
248
+ """
249
+ return self._get_last_message(role="tool")
250
+
251
+ def get_last_user_message(self) -> ResponseMessage | None:
252
+ """Return the most recent user message.
253
+
254
+ Returns
255
+ -------
256
+ ResponseMessage or None
257
+ Latest user message or ``None`` when absent.
258
+ """
259
+ return self._get_last_message(role="user")
@@ -6,10 +6,10 @@ import asyncio
6
6
 
7
7
  from typing import Any, Optional, Type, TypeVar
8
8
 
9
- from .base import ResponseBase
9
+ from .base import BaseResponse
10
10
 
11
11
 
12
- R = TypeVar("R", bound=ResponseBase[Any])
12
+ R = TypeVar("R", bound=BaseResponse[Any])
13
13
 
14
14
 
15
15
  def run_sync(
@@ -32,7 +32,7 @@ def run_sync(
32
32
  Returns
33
33
  -------
34
34
  Any
35
- Parsed response from :meth:`ResponseBase.run_response`.
35
+ Parsed response from :meth:`BaseResponse.run_response`.
36
36
  """
37
37
  response = response_cls(**(response_kwargs or {}))
38
38
  try:
@@ -61,7 +61,7 @@ async def run_async(
61
61
  Returns
62
62
  -------
63
63
  Any
64
- Parsed response from :meth:`ResponseBase.run_response_async`.
64
+ Parsed response from :meth:`BaseResponse.run_response_async`.
65
65
  """
66
66
  response = response_cls(**(response_kwargs or {}))
67
67
  try:
@@ -79,7 +79,7 @@ def run_streamed(
79
79
  """Run a response workflow and return the asynchronous result.
80
80
 
81
81
  This mirrors the agent API for discoverability. Streaming responses are not
82
- currently supported by :class:`ResponseBase`, so this returns the same value
82
+ currently supported by :class:`BaseResponse`, so this returns the same value
83
83
  as :func:`run_async`.
84
84
 
85
85
  Parameters
@@ -4,6 +4,8 @@ from __future__ import annotations
4
4
 
5
5
  from dataclasses import dataclass
6
6
  from typing import Tuple
7
+ import json
8
+ import ast
7
9
 
8
10
  from openai.types.responses.response_function_tool_call_param import (
9
11
  ResponseFunctionToolCallParam,
@@ -68,3 +70,40 @@ class ResponseToolCall:
68
70
  },
69
71
  )
70
72
  return function_call, function_call_output
73
+
74
+
75
+ def parse_tool_arguments(arguments: str) -> dict:
76
+ """Parse tool call arguments which may not be valid JSON.
77
+
78
+ The OpenAI API is expected to return well-formed JSON for tool arguments,
79
+ but minor formatting issues (such as the use of single quotes) can occur.
80
+ This helper first tries ``json.loads`` and falls back to
81
+ ``ast.literal_eval`` for simple cases.
82
+
83
+ Parameters
84
+ ----------
85
+ arguments
86
+ Raw argument string from the tool call.
87
+
88
+ Returns
89
+ -------
90
+ dict
91
+ Parsed dictionary of arguments.
92
+
93
+ Raises
94
+ ------
95
+ ValueError
96
+ If the arguments cannot be parsed as JSON.
97
+
98
+ Examples
99
+ --------
100
+ >>> parse_tool_arguments('{"key": "value"}')["key"]
101
+ 'value'
102
+ """
103
+ try:
104
+ return json.loads(arguments)
105
+ except json.JSONDecodeError:
106
+ try:
107
+ return ast.literal_eval(arguments)
108
+ except Exception as exc: # noqa: BLE001
109
+ raise ValueError(f"Invalid JSON arguments: {arguments}") from exc
@@ -0,0 +1,84 @@
1
+ """Helpers for attaching vector stores to responses."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Optional, Sequence
6
+
7
+ from openai import OpenAI
8
+
9
+ from ..utils import ensure_list
10
+ from .base import BaseResponse
11
+
12
+
13
+ def attach_vector_store(
14
+ response: BaseResponse[Any],
15
+ vector_stores: str | Sequence[str],
16
+ api_key: Optional[str] = None,
17
+ ) -> list[str]:
18
+ """Attach vector stores to a response ``file_search`` tool.
19
+
20
+ Parameters
21
+ ----------
22
+ response
23
+ Response instance whose tool configuration is updated.
24
+ vector_stores
25
+ Single vector store name or a sequence of names to attach.
26
+ api_key : str, optional
27
+ API key used when the response does not already have a client. Default
28
+ ``None``.
29
+
30
+ Returns
31
+ -------
32
+ list[str]
33
+ Ordered list of vector store IDs applied to the ``file_search`` tool.
34
+
35
+ Raises
36
+ ------
37
+ ValueError
38
+ If a vector store cannot be resolved or no API key is available when
39
+ required.
40
+ """
41
+ requested_stores = ensure_list(vector_stores)
42
+
43
+ client = getattr(response, "_client", None)
44
+ if client is None:
45
+ if api_key is None:
46
+ raise ValueError(
47
+ "OpenAI API key is required to resolve vector store names."
48
+ )
49
+ client = OpenAI(api_key=api_key)
50
+
51
+ available_stores = client.vector_stores.list().data
52
+ resolved_ids: list[str] = []
53
+
54
+ for store in requested_stores:
55
+ match = next(
56
+ (vs.id for vs in available_stores if vs.name == store),
57
+ None,
58
+ )
59
+ if match is None:
60
+ raise ValueError(f"Vector store '{store}' not found.")
61
+ if match not in resolved_ids:
62
+ resolved_ids.append(match)
63
+
64
+ file_search_tool = next(
65
+ (tool for tool in response._tools if tool.get("type") == "file_search"),
66
+ None,
67
+ )
68
+
69
+ if file_search_tool is None:
70
+ response._tools.append(
71
+ {"type": "file_search", "vector_store_ids": resolved_ids}
72
+ )
73
+ return resolved_ids
74
+
75
+ existing_ids = ensure_list(file_search_tool.get("vector_store_ids", []))
76
+ combined_ids = existing_ids.copy()
77
+ for vector_store_id in resolved_ids:
78
+ if vector_store_id not in combined_ids:
79
+ combined_ids.append(vector_store_id)
80
+ file_search_tool["vector_store_ids"] = combined_ids
81
+ return combined_ids
82
+
83
+
84
+ __all__ = ["attach_vector_store"]
@@ -0,0 +1,13 @@
1
+ """Streamlit app utilities for the config-driven chat interface."""
2
+
3
+ from .configuration import (
4
+ StreamlitAppConfig,
5
+ _load_configuration,
6
+ load_app_config,
7
+ )
8
+
9
+ __all__ = [
10
+ "StreamlitAppConfig",
11
+ "_load_configuration",
12
+ "load_app_config",
13
+ ]