openai-sdk-helpers 0.0.8__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. openai_sdk_helpers/__init__.py +66 -2
  2. openai_sdk_helpers/agent/__init__.py +8 -4
  3. openai_sdk_helpers/agent/base.py +80 -45
  4. openai_sdk_helpers/agent/config.py +6 -4
  5. openai_sdk_helpers/agent/{project_manager.py → coordination.py} +29 -45
  6. openai_sdk_helpers/agent/prompt_utils.py +7 -1
  7. openai_sdk_helpers/agent/runner.py +67 -141
  8. openai_sdk_helpers/agent/search/__init__.py +33 -0
  9. openai_sdk_helpers/agent/search/base.py +297 -0
  10. openai_sdk_helpers/agent/{vector_search.py → search/vector.py} +89 -157
  11. openai_sdk_helpers/agent/{web_search.py → search/web.py} +77 -156
  12. openai_sdk_helpers/agent/summarizer.py +29 -8
  13. openai_sdk_helpers/agent/translator.py +40 -13
  14. openai_sdk_helpers/agent/validation.py +32 -8
  15. openai_sdk_helpers/async_utils.py +132 -0
  16. openai_sdk_helpers/config.py +74 -36
  17. openai_sdk_helpers/context_manager.py +241 -0
  18. openai_sdk_helpers/enums/__init__.py +9 -1
  19. openai_sdk_helpers/enums/base.py +67 -8
  20. openai_sdk_helpers/environment.py +33 -6
  21. openai_sdk_helpers/errors.py +133 -0
  22. openai_sdk_helpers/logging_config.py +105 -0
  23. openai_sdk_helpers/prompt/__init__.py +10 -71
  24. openai_sdk_helpers/prompt/base.py +172 -0
  25. openai_sdk_helpers/response/__init__.py +35 -3
  26. openai_sdk_helpers/response/base.py +363 -210
  27. openai_sdk_helpers/response/config.py +176 -0
  28. openai_sdk_helpers/response/messages.py +56 -40
  29. openai_sdk_helpers/response/runner.py +77 -33
  30. openai_sdk_helpers/response/tool_call.py +49 -25
  31. openai_sdk_helpers/response/vector_store.py +27 -14
  32. openai_sdk_helpers/retry.py +175 -0
  33. openai_sdk_helpers/streamlit_app/__init__.py +19 -2
  34. openai_sdk_helpers/streamlit_app/app.py +114 -39
  35. openai_sdk_helpers/streamlit_app/config.py +502 -0
  36. openai_sdk_helpers/streamlit_app/streamlit_web_search.py +5 -6
  37. openai_sdk_helpers/structure/__init__.py +69 -3
  38. openai_sdk_helpers/structure/agent_blueprint.py +82 -19
  39. openai_sdk_helpers/structure/base.py +208 -93
  40. openai_sdk_helpers/structure/plan/__init__.py +15 -1
  41. openai_sdk_helpers/structure/plan/enum.py +41 -5
  42. openai_sdk_helpers/structure/plan/plan.py +101 -45
  43. openai_sdk_helpers/structure/plan/task.py +38 -6
  44. openai_sdk_helpers/structure/prompt.py +21 -2
  45. openai_sdk_helpers/structure/responses.py +52 -11
  46. openai_sdk_helpers/structure/summary.py +55 -7
  47. openai_sdk_helpers/structure/validation.py +34 -6
  48. openai_sdk_helpers/structure/vector_search.py +132 -18
  49. openai_sdk_helpers/structure/web_search.py +125 -13
  50. openai_sdk_helpers/types.py +57 -0
  51. openai_sdk_helpers/utils/__init__.py +30 -1
  52. openai_sdk_helpers/utils/core.py +168 -34
  53. openai_sdk_helpers/validation.py +302 -0
  54. openai_sdk_helpers/vector_storage/__init__.py +21 -1
  55. openai_sdk_helpers/vector_storage/cleanup.py +25 -13
  56. openai_sdk_helpers/vector_storage/storage.py +123 -64
  57. openai_sdk_helpers/vector_storage/types.py +20 -19
  58. openai_sdk_helpers-0.0.9.dist-info/METADATA +550 -0
  59. openai_sdk_helpers-0.0.9.dist-info/RECORD +66 -0
  60. openai_sdk_helpers/streamlit_app/configuration.py +0 -324
  61. openai_sdk_helpers-0.0.8.dist-info/METADATA +0 -194
  62. openai_sdk_helpers-0.0.8.dist-info/RECORD +0 -55
  63. {openai_sdk_helpers-0.0.8.dist-info → openai_sdk_helpers-0.0.9.dist-info}/WHEEL +0 -0
  64. {openai_sdk_helpers-0.0.8.dist-info → openai_sdk_helpers-0.0.9.dist-info}/licenses/LICENSE +0 -0
@@ -1,8 +1,12 @@
1
- """Helpers for attaching vector stores to responses."""
1
+ """Vector store attachment utilities for responses.
2
+
3
+ This module provides functions for attaching named vector stores to response
4
+ instances, enabling file search capabilities through the OpenAI API.
5
+ """
2
6
 
3
7
  from __future__ import annotations
4
8
 
5
- from typing import Any, Optional, Sequence
9
+ from typing import Any, Sequence
6
10
 
7
11
  from openai import OpenAI
8
12
 
@@ -13,30 +17,39 @@ from .base import BaseResponse
13
17
  def attach_vector_store(
14
18
  response: BaseResponse[Any],
15
19
  vector_stores: str | Sequence[str],
16
- api_key: Optional[str] = None,
20
+ api_key: str | None = None,
17
21
  ) -> list[str]:
18
- """Attach vector stores to a response ``file_search`` tool.
22
+ """Attach named vector stores to a response's file_search tool.
23
+
24
+ Resolves vector store names to IDs via the OpenAI API and configures
25
+ the response's file_search tool to use them. Creates the file_search
26
+ tool if it doesn't exist, or updates it to include additional stores.
19
27
 
20
28
  Parameters
21
29
  ----------
22
- response
23
- Response instance whose tool configuration is updated.
24
- vector_stores
25
- Single vector store name or a sequence of names to attach.
26
- api_key : str, optional
27
- API key used when the response does not already have a client. Default
28
- ``None``.
30
+ response : BaseResponse[Any]
31
+ Response instance whose tool configuration will be updated.
32
+ vector_stores : str or Sequence[str]
33
+ Single vector store name or sequence of names to attach.
34
+ api_key : str or None, default None
35
+ API key for OpenAI client. If None, uses the response's client.
29
36
 
30
37
  Returns
31
38
  -------
32
39
  list[str]
33
- Ordered list of vector store IDs applied to the ``file_search`` tool.
40
+ Ordered list of vector store IDs attached to the file_search tool.
34
41
 
35
42
  Raises
36
43
  ------
37
44
  ValueError
38
- If a vector store cannot be resolved or no API key is available when
39
- required.
45
+ If a vector store name cannot be resolved to an ID.
46
+ If no API key is available and the response has no client.
47
+
48
+ Examples
49
+ --------
50
+ >>> from openai_sdk_helpers.response import attach_vector_store
51
+ >>> ids = attach_vector_store(response, "knowledge_base")
52
+ >>> ids = attach_vector_store(response, ["docs", "kb"], api_key="sk-...")
40
53
  """
41
54
  requested_stores = ensure_list(vector_stores)
42
55
 
@@ -0,0 +1,175 @@
1
+ """Retry decorators with exponential backoff for API operations.
2
+
3
+ Provides decorators for retrying async and sync functions with
4
+ exponential backoff and jitter when rate limiting or transient
5
+ errors occur.
6
+ """
7
+
8
+ import asyncio
9
+ import logging
10
+ import random
11
+ import time
12
+ from functools import wraps
13
+ from typing import Any, Callable, ParamSpec, TypeVar
14
+
15
+ from openai import APIError, RateLimitError
16
+
17
+ from openai_sdk_helpers.errors import AsyncExecutionError
18
+ from openai_sdk_helpers.utils.core import log
19
+
20
+ P = ParamSpec("P")
21
+ T = TypeVar("T")
22
+
23
+ # Default retry configuration constants
24
+ DEFAULT_MAX_RETRIES = 3
25
+ DEFAULT_BASE_DELAY = 1.0
26
+ DEFAULT_MAX_DELAY = 60.0
27
+
28
+ # HTTP status codes for transient errors
29
+ TRANSIENT_HTTP_STATUS_CODES = frozenset({408, 429, 500, 502, 503})
30
+
31
+
32
+ def with_exponential_backoff(
33
+ max_retries: int = DEFAULT_MAX_RETRIES,
34
+ base_delay: float = DEFAULT_BASE_DELAY,
35
+ max_delay: float = DEFAULT_MAX_DELAY,
36
+ ) -> Callable[[Callable[P, T]], Callable[P, T]]:
37
+ """Decorate functions with exponential backoff on transient errors.
38
+
39
+ Retries on RateLimitError or transient API errors (5xx, 408, 429).
40
+ Uses exponential backoff with jitter to avoid thundering herd.
41
+
42
+ Parameters
43
+ ----------
44
+ max_retries : int
45
+ Maximum number of retry attempts (total attempts = max_retries + 1).
46
+ Default is 3.
47
+ base_delay : float
48
+ Initial delay in seconds before first retry. Default is 1.0.
49
+ max_delay : float
50
+ Maximum delay in seconds between retries. Default is 60.0.
51
+
52
+ Returns
53
+ -------
54
+ Callable
55
+ Decorator function.
56
+
57
+ Examples
58
+ --------
59
+ >>> @with_exponential_backoff(max_retries=3, base_delay=1.0)
60
+ ... def call_api(query: str) -> str:
61
+ ... # API call that may fail with rate limiting
62
+ ... return client.call(query)
63
+ """
64
+
65
+ def decorator(func: Callable[P, T]) -> Callable[P, T]:
66
+ """Apply retry logic to function."""
67
+ if asyncio.iscoroutinefunction(func):
68
+
69
+ @wraps(func)
70
+ async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
71
+ """Async wrapper with retry logic."""
72
+ last_exc: Exception | None = None
73
+ for attempt in range(max_retries + 1):
74
+ try:
75
+ return await func(*args, **kwargs)
76
+ except RateLimitError as exc:
77
+ last_exc = exc
78
+ if attempt >= max_retries:
79
+ raise
80
+ delay = min(
81
+ base_delay * (2**attempt) + random.uniform(0, 1),
82
+ max_delay,
83
+ )
84
+ log(
85
+ f"Rate limited on {func.__name__}, retrying in "
86
+ f"{delay:.2f}s (attempt {attempt + 1}/{max_retries + 1})",
87
+ level=logging.WARNING,
88
+ )
89
+ await asyncio.sleep(delay)
90
+ except APIError as exc:
91
+ last_exc = exc
92
+ status_code: int | None = getattr(exc, "status_code", None)
93
+ # Only retry on transient errors
94
+ if (
95
+ not status_code
96
+ or status_code not in TRANSIENT_HTTP_STATUS_CODES
97
+ ):
98
+ raise
99
+ if attempt >= max_retries:
100
+ raise
101
+ delay = min(
102
+ base_delay * (2**attempt),
103
+ max_delay,
104
+ )
105
+ log(
106
+ f"Transient API error on {func.__name__}: "
107
+ f"{status_code}, retrying in {delay:.2f}s "
108
+ f"(attempt {attempt + 1}/{max_retries + 1})",
109
+ level=logging.WARNING,
110
+ )
111
+ await asyncio.sleep(delay)
112
+
113
+ # Should never reach here, but handle edge case
114
+ if last_exc:
115
+ raise last_exc
116
+ raise AsyncExecutionError(
117
+ f"Unexpected state in {func.__name__} after retries"
118
+ )
119
+
120
+ return async_wrapper # type: ignore
121
+
122
+ @wraps(func)
123
+ def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
124
+ """Sync wrapper with retry logic."""
125
+ last_exc: Exception | None = None
126
+ for attempt in range(max_retries + 1):
127
+ try:
128
+ return func(*args, **kwargs)
129
+ except RateLimitError as exc:
130
+ last_exc = exc
131
+ if attempt >= max_retries:
132
+ raise
133
+ delay = min(
134
+ base_delay * (2**attempt) + random.uniform(0, 1),
135
+ max_delay,
136
+ )
137
+ log(
138
+ f"Rate limited on {func.__name__}, retrying in "
139
+ f"{delay:.2f}s (attempt {attempt + 1}/{max_retries + 1})",
140
+ level=logging.WARNING,
141
+ )
142
+ time.sleep(delay)
143
+ except APIError as exc:
144
+ last_exc = exc
145
+ status_code: int | None = getattr(exc, "status_code", None)
146
+ # Only retry on transient errors
147
+ if (
148
+ not status_code
149
+ or status_code not in TRANSIENT_HTTP_STATUS_CODES
150
+ ):
151
+ raise
152
+ if attempt >= max_retries:
153
+ raise
154
+ delay = min(
155
+ base_delay * (2**attempt),
156
+ max_delay,
157
+ )
158
+ log(
159
+ f"Transient API error on {func.__name__}: "
160
+ f"{status_code}, retrying in {delay:.2f}s "
161
+ f"(attempt {attempt + 1}/{max_retries + 1})",
162
+ level=logging.WARNING,
163
+ )
164
+ time.sleep(delay)
165
+
166
+ # Should never reach here, but handle edge case
167
+ if last_exc:
168
+ raise last_exc
169
+ raise AsyncExecutionError(
170
+ f"Unexpected state in {func.__name__} after retries"
171
+ )
172
+
173
+ return sync_wrapper # type: ignore
174
+
175
+ return decorator
@@ -1,6 +1,23 @@
1
- """Streamlit app utilities for the config-driven chat interface."""
1
+ """Streamlit application utilities for configuration-driven chat interfaces.
2
2
 
3
- from .configuration import (
3
+ This module provides configuration management and loading utilities for building
4
+ Streamlit-based chat applications powered by OpenAI response handlers. It enables
5
+ rapid deployment of conversational AI interfaces with minimal boilerplate.
6
+
7
+ Classes
8
+ -------
9
+ StreamlitAppConfig
10
+ Validated configuration for Streamlit chat applications.
11
+
12
+ Functions
13
+ ---------
14
+ load_app_config
15
+ Load and validate configuration from a Python module.
16
+ _load_configuration
17
+ Load configuration with user-friendly error handling for Streamlit UI.
18
+ """
19
+
20
+ from .config import (
4
21
  StreamlitAppConfig,
5
22
  _load_configuration,
6
23
  load_app_config,
@@ -1,10 +1,15 @@
1
- """Streamlit chat application driven by a developer configuration."""
1
+ """Configuration-driven Streamlit chat application.
2
+
3
+ This module implements a complete Streamlit chat interface that loads its
4
+ configuration from a Python module. It handles conversation state, message
5
+ rendering, response execution, and resource cleanup.
6
+ """
2
7
 
3
8
  from __future__ import annotations
4
9
 
5
10
  import json
6
11
  from pathlib import Path
7
- from typing import Any, Dict, List
12
+ from typing import Any
8
13
 
9
14
  import streamlit as st
10
15
  from dotenv import load_dotenv
@@ -17,21 +22,29 @@ from openai_sdk_helpers.streamlit_app import (
17
22
  _load_configuration,
18
23
  )
19
24
  from openai_sdk_helpers.structure.base import BaseStructure
20
- from openai_sdk_helpers.utils import ensure_list, coerce_jsonable, log
25
+ from openai_sdk_helpers.utils import coerce_jsonable, ensure_list, log
21
26
 
22
27
 
23
28
  def _extract_assistant_text(response: BaseResponse[Any]) -> str:
24
- """Return the latest assistant message as a friendly string.
29
+ """Extract the latest assistant message as readable text.
30
+
31
+ Searches the response's message history for the most recent assistant
32
+ or tool message and extracts displayable text content.
25
33
 
26
34
  Parameters
27
35
  ----------
28
36
  response : BaseResponse[Any]
29
- Active response session containing message history.
37
+ Active response session with message history.
30
38
 
31
39
  Returns
32
40
  -------
33
41
  str
34
- Concatenated assistant text, or an empty string when unavailable.
42
+ Concatenated assistant text, or empty string if unavailable.
43
+
44
+ Examples
45
+ --------
46
+ >>> text = _extract_assistant_text(response)
47
+ >>> print(text)
35
48
  """
36
49
  message = response.get_last_assistant_message() or response.get_last_tool_message()
37
50
  if message is None:
@@ -41,7 +54,7 @@ def _extract_assistant_text(response: BaseResponse[Any]) -> str:
41
54
  if content is None:
42
55
  return ""
43
56
 
44
- text_parts: List[str] = []
57
+ text_parts: list[str] = []
45
58
  for part in ensure_list(content):
46
59
  text_value = getattr(getattr(part, "text", None), "value", None)
47
60
  if text_value:
@@ -52,19 +65,28 @@ def _extract_assistant_text(response: BaseResponse[Any]) -> str:
52
65
 
53
66
 
54
67
  def _render_summary(result: Any, response: BaseResponse[Any]) -> str:
55
- """Generate the assistant-facing summary shown in the transcript.
68
+ """Generate display text for the chat transcript.
69
+
70
+ Converts the response result into a human-readable format suitable
71
+ for display in the Streamlit chat interface. Handles structured
72
+ outputs, dictionaries, and raw text.
56
73
 
57
74
  Parameters
58
75
  ----------
59
76
  result : Any
60
- Parsed result returned from ``BaseResponse.run_sync``.
77
+ Parsed result from BaseResponse.run_sync.
61
78
  response : BaseResponse[Any]
62
- Response instance containing the latest assistant message.
79
+ Response instance containing message history.
63
80
 
64
81
  Returns
65
82
  -------
66
83
  str
67
84
  Display-ready summary text for the chat transcript.
85
+
86
+ Notes
87
+ -----
88
+ Falls back to extracting assistant text from message history if
89
+ the result cannot be formatted directly.
68
90
  """
69
91
  if isinstance(result, BaseStructure):
70
92
  return result.print()
@@ -79,20 +101,29 @@ def _render_summary(result: Any, response: BaseResponse[Any]) -> str:
79
101
  return "No response returned."
80
102
 
81
103
 
82
- def _build_raw_output(result: Any, response: BaseResponse[Any]) -> Dict[str, Any]:
83
- """Assemble the raw payload shown under the expandable transcript section.
104
+ def _build_raw_output(result: Any, response: BaseResponse[Any]) -> dict[str, Any]:
105
+ """Assemble raw JSON payload for the expandable transcript section.
106
+
107
+ Creates a structured dictionary containing both the parsed result
108
+ and the complete conversation history for debugging and inspection.
84
109
 
85
110
  Parameters
86
111
  ----------
87
112
  result : Any
88
- Parsed result returned from the response instance.
113
+ Parsed result from the response execution.
89
114
  response : BaseResponse[Any]
90
- Response session containing message history.
115
+ Response session with complete message history.
91
116
 
92
117
  Returns
93
118
  -------
94
119
  dict[str, Any]
95
- Mapping that includes parsed data and raw conversation messages.
120
+ Mapping with 'parsed' data and 'conversation' messages.
121
+
122
+ Examples
123
+ --------
124
+ >>> raw = _build_raw_output(result, response)
125
+ >>> raw.keys()
126
+ dict_keys(['parsed', 'conversation'])
96
127
  """
97
128
  return {
98
129
  "parsed": coerce_jsonable(result),
@@ -101,22 +132,31 @@ def _build_raw_output(result: Any, response: BaseResponse[Any]) -> Dict[str, Any
101
132
 
102
133
 
103
134
  def _get_response_instance(config: StreamlitAppConfig) -> BaseResponse[Any]:
104
- """Instantiate and cache the configured :class:`BaseResponse`.
135
+ """Instantiate and cache the configured BaseResponse.
136
+
137
+ Creates a new response instance from the configuration if not already
138
+ cached in session state. Applies vector store attachments and cleanup
139
+ settings based on configuration.
105
140
 
106
141
  Parameters
107
142
  ----------
108
143
  config : StreamlitAppConfig
109
- Loaded configuration containing the response definition.
144
+ Loaded configuration with response handler definition.
110
145
 
111
146
  Returns
112
147
  -------
113
148
  BaseResponse[Any]
114
- Active response instance for the current session.
149
+ Active response instance for the current Streamlit session.
115
150
 
116
151
  Raises
117
152
  ------
118
153
  TypeError
119
- If the configured ``response`` cannot produce ``BaseResponse``.
154
+ If the configured response cannot produce a BaseResponse.
155
+
156
+ Notes
157
+ -----
158
+ The response instance is cached in st.session_state['response_instance']
159
+ to maintain conversation state across Streamlit reruns.
120
160
  """
121
161
  if "response_instance" in st.session_state:
122
162
  cached = st.session_state["response_instance"]
@@ -138,17 +178,21 @@ def _get_response_instance(config: StreamlitAppConfig) -> BaseResponse[Any]:
138
178
 
139
179
 
140
180
  def _reset_chat(close_response: bool = True) -> None:
141
- """Clear the conversation and optionally close the response session.
181
+ """Clear conversation and optionally close the response session.
182
+
183
+ Saves the current conversation to disk, closes the response to clean
184
+ up resources, and clears the chat history from session state.
142
185
 
143
186
  Parameters
144
187
  ----------
145
- close_response : bool, default=True
146
- Whether to call ``close`` on the cached response instance.
147
-
148
- Returns
149
- -------
150
- None
151
- This function mutates ``st.session_state`` in-place.
188
+ close_response : bool, default True
189
+ Whether to call close() on the cached response instance,
190
+ triggering resource cleanup.
191
+
192
+ Notes
193
+ -----
194
+ This function mutates st.session_state in-place, clearing the
195
+ chat_history and response_instance keys.
152
196
  """
153
197
  response = st.session_state.get("response_instance")
154
198
  if close_response and isinstance(response, BaseResponse):
@@ -160,12 +204,15 @@ def _reset_chat(close_response: bool = True) -> None:
160
204
 
161
205
 
162
206
  def _init_session_state() -> None:
163
- """Prepare Streamlit session state containers.
207
+ """Initialize Streamlit session state for chat functionality.
164
208
 
165
- Returns
166
- -------
167
- None
168
- This function initializes chat-related session keys when absent.
209
+ Creates the chat_history list in session state if it doesn't exist,
210
+ enabling conversation persistence across Streamlit reruns.
211
+
212
+ Notes
213
+ -----
214
+ This function should be called early in the app lifecycle to ensure
215
+ session state is properly initialized before rendering chat UI.
169
216
  """
170
217
  if "chat_history" not in st.session_state:
171
218
  st.session_state["chat_history"] = []
@@ -174,10 +221,14 @@ def _init_session_state() -> None:
174
221
  def _render_chat_history() -> None:
175
222
  """Display the conversation transcript from session state.
176
223
 
177
- Returns
178
- -------
179
- None
180
- Renders chat messages in the current Streamlit session.
224
+ Iterates through chat_history in session state and renders each
225
+ message with appropriate formatting. Assistant messages include
226
+ an expandable raw output section.
227
+
228
+ Notes
229
+ -----
230
+ Uses Streamlit's chat_message context manager for role-based
231
+ message styling.
181
232
  """
182
233
  for message in st.session_state.get("chat_history", []):
183
234
  role = message.get("role", "assistant")
@@ -193,14 +244,24 @@ def _render_chat_history() -> None:
193
244
 
194
245
 
195
246
  def _handle_user_message(prompt: str, config: StreamlitAppConfig) -> None:
196
- """Append a user prompt and stream the assistant reply into the transcript.
247
+ """Process user input and generate assistant response.
248
+
249
+ Appends the user message to chat history, executes the response
250
+ handler, and adds the assistant's reply to the conversation.
251
+ Handles errors gracefully by displaying them in the UI.
197
252
 
198
253
  Parameters
199
254
  ----------
200
255
  prompt : str
201
256
  User-entered text to send to the assistant.
202
257
  config : StreamlitAppConfig
203
- Loaded configuration containing the response definition.
258
+ Loaded configuration with response handler definition.
259
+
260
+ Notes
261
+ -----
262
+ Errors during response execution are caught and displayed in the
263
+ chat transcript rather than crashing the application. The function
264
+ triggers a Streamlit rerun after successful response generation.
204
265
  """
205
266
  st.session_state["chat_history"].append({"role": "user", "content": prompt})
206
267
  try:
@@ -230,12 +291,26 @@ def _handle_user_message(prompt: str, config: StreamlitAppConfig) -> None:
230
291
 
231
292
 
232
293
  def main(config_path: Path) -> None:
233
- """Run the config-driven Streamlit chat app.
294
+ """Run the configuration-driven Streamlit chat application.
295
+
296
+ Entry point for the Streamlit app that loads configuration, sets up
297
+ the UI, manages session state, and handles user interactions.
234
298
 
235
299
  Parameters
236
300
  ----------
237
301
  config_path : Path
238
302
  Filesystem location of the configuration module.
303
+
304
+ Notes
305
+ -----
306
+ This function should be called as the entry point for the Streamlit
307
+ application. It handles the complete application lifecycle including
308
+ configuration loading, UI rendering, and chat interactions.
309
+
310
+ Examples
311
+ --------
312
+ >>> from pathlib import Path
313
+ >>> main(Path("./my_config.py"))
239
314
  """
240
315
  config = _load_configuration(config_path)
241
316
  st.set_page_config(page_title=config.display_title, layout="wide")