openai-sdk-helpers 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. openai_sdk_helpers/__init__.py +85 -10
  2. openai_sdk_helpers/agent/__init__.py +8 -4
  3. openai_sdk_helpers/agent/base.py +81 -46
  4. openai_sdk_helpers/agent/config.py +6 -4
  5. openai_sdk_helpers/agent/{project_manager.py → coordination.py} +29 -45
  6. openai_sdk_helpers/agent/prompt_utils.py +7 -1
  7. openai_sdk_helpers/agent/runner.py +67 -141
  8. openai_sdk_helpers/agent/search/__init__.py +33 -0
  9. openai_sdk_helpers/agent/search/base.py +297 -0
  10. openai_sdk_helpers/agent/{vector_search.py → search/vector.py} +89 -157
  11. openai_sdk_helpers/agent/{web_search.py → search/web.py} +82 -162
  12. openai_sdk_helpers/agent/summarizer.py +29 -8
  13. openai_sdk_helpers/agent/translator.py +40 -13
  14. openai_sdk_helpers/agent/validation.py +32 -8
  15. openai_sdk_helpers/async_utils.py +132 -0
  16. openai_sdk_helpers/config.py +74 -36
  17. openai_sdk_helpers/context_manager.py +241 -0
  18. openai_sdk_helpers/enums/__init__.py +9 -1
  19. openai_sdk_helpers/enums/base.py +67 -8
  20. openai_sdk_helpers/environment.py +33 -6
  21. openai_sdk_helpers/errors.py +133 -0
  22. openai_sdk_helpers/logging_config.py +105 -0
  23. openai_sdk_helpers/prompt/__init__.py +10 -71
  24. openai_sdk_helpers/prompt/base.py +172 -0
  25. openai_sdk_helpers/response/__init__.py +37 -5
  26. openai_sdk_helpers/response/base.py +427 -189
  27. openai_sdk_helpers/response/config.py +176 -0
  28. openai_sdk_helpers/response/messages.py +104 -40
  29. openai_sdk_helpers/response/runner.py +79 -35
  30. openai_sdk_helpers/response/tool_call.py +75 -12
  31. openai_sdk_helpers/response/vector_store.py +29 -16
  32. openai_sdk_helpers/retry.py +175 -0
  33. openai_sdk_helpers/streamlit_app/__init__.py +30 -0
  34. openai_sdk_helpers/streamlit_app/app.py +345 -0
  35. openai_sdk_helpers/streamlit_app/config.py +502 -0
  36. openai_sdk_helpers/streamlit_app/streamlit_web_search.py +68 -0
  37. openai_sdk_helpers/structure/__init__.py +69 -3
  38. openai_sdk_helpers/structure/agent_blueprint.py +82 -19
  39. openai_sdk_helpers/structure/base.py +245 -91
  40. openai_sdk_helpers/structure/plan/__init__.py +15 -1
  41. openai_sdk_helpers/structure/plan/enum.py +41 -5
  42. openai_sdk_helpers/structure/plan/plan.py +101 -45
  43. openai_sdk_helpers/structure/plan/task.py +38 -6
  44. openai_sdk_helpers/structure/prompt.py +21 -2
  45. openai_sdk_helpers/structure/responses.py +52 -11
  46. openai_sdk_helpers/structure/summary.py +55 -7
  47. openai_sdk_helpers/structure/validation.py +34 -6
  48. openai_sdk_helpers/structure/vector_search.py +132 -18
  49. openai_sdk_helpers/structure/web_search.py +128 -12
  50. openai_sdk_helpers/types.py +57 -0
  51. openai_sdk_helpers/utils/__init__.py +32 -1
  52. openai_sdk_helpers/utils/core.py +200 -32
  53. openai_sdk_helpers/validation.py +302 -0
  54. openai_sdk_helpers/vector_storage/__init__.py +21 -1
  55. openai_sdk_helpers/vector_storage/cleanup.py +25 -13
  56. openai_sdk_helpers/vector_storage/storage.py +124 -66
  57. openai_sdk_helpers/vector_storage/types.py +20 -19
  58. openai_sdk_helpers-0.0.9.dist-info/METADATA +550 -0
  59. openai_sdk_helpers-0.0.9.dist-info/RECORD +66 -0
  60. openai_sdk_helpers-0.0.7.dist-info/METADATA +0 -193
  61. openai_sdk_helpers-0.0.7.dist-info/RECORD +0 -51
  62. {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.9.dist-info}/WHEEL +0 -0
  63. {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.9.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,345 @@
1
+ """Configuration-driven Streamlit chat application.
2
+
3
+ This module implements a complete Streamlit chat interface that loads its
4
+ configuration from a Python module. It handles conversation state, message
5
+ rendering, response execution, and resource cleanup.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import json
11
+ from pathlib import Path
12
+ from typing import Any
13
+
14
+ import streamlit as st
15
+ from dotenv import load_dotenv
16
+
17
+ load_dotenv()
18
+
19
+ from openai_sdk_helpers.response import BaseResponse, attach_vector_store
20
+ from openai_sdk_helpers.streamlit_app import (
21
+ StreamlitAppConfig,
22
+ _load_configuration,
23
+ )
24
+ from openai_sdk_helpers.structure.base import BaseStructure
25
+ from openai_sdk_helpers.utils import coerce_jsonable, ensure_list, log
26
+
27
+
28
+ def _extract_assistant_text(response: BaseResponse[Any]) -> str:
29
+ """Extract the latest assistant message as readable text.
30
+
31
+ Searches the response's message history for the most recent assistant
32
+ or tool message and extracts displayable text content.
33
+
34
+ Parameters
35
+ ----------
36
+ response : BaseResponse[Any]
37
+ Active response session with message history.
38
+
39
+ Returns
40
+ -------
41
+ str
42
+ Concatenated assistant text, or empty string if unavailable.
43
+
44
+ Examples
45
+ --------
46
+ >>> text = _extract_assistant_text(response)
47
+ >>> print(text)
48
+ """
49
+ message = response.get_last_assistant_message() or response.get_last_tool_message()
50
+ if message is None:
51
+ return ""
52
+
53
+ content = getattr(message.content, "content", None)
54
+ if content is None:
55
+ return ""
56
+
57
+ text_parts: list[str] = []
58
+ for part in ensure_list(content):
59
+ text_value = getattr(getattr(part, "text", None), "value", None)
60
+ if text_value:
61
+ text_parts.append(text_value)
62
+ if text_parts:
63
+ return "\n\n".join(text_parts)
64
+ return ""
65
+
66
+
67
+ def _render_summary(result: Any, response: BaseResponse[Any]) -> str:
68
+ """Generate display text for the chat transcript.
69
+
70
+ Converts the response result into a human-readable format suitable
71
+ for display in the Streamlit chat interface. Handles structured
72
+ outputs, dictionaries, and raw text.
73
+
74
+ Parameters
75
+ ----------
76
+ result : Any
77
+ Parsed result from BaseResponse.run_sync.
78
+ response : BaseResponse[Any]
79
+ Response instance containing message history.
80
+
81
+ Returns
82
+ -------
83
+ str
84
+ Display-ready summary text for the chat transcript.
85
+
86
+ Notes
87
+ -----
88
+ Falls back to extracting assistant text from message history if
89
+ the result cannot be formatted directly.
90
+ """
91
+ if isinstance(result, BaseStructure):
92
+ return result.print()
93
+ if isinstance(result, dict):
94
+ return json.dumps(result, indent=2)
95
+ if result:
96
+ return str(result)
97
+
98
+ fallback_text = _extract_assistant_text(response)
99
+ if fallback_text:
100
+ return fallback_text
101
+ return "No response returned."
102
+
103
+
104
+ def _build_raw_output(result: Any, response: BaseResponse[Any]) -> dict[str, Any]:
105
+ """Assemble raw JSON payload for the expandable transcript section.
106
+
107
+ Creates a structured dictionary containing both the parsed result
108
+ and the complete conversation history for debugging and inspection.
109
+
110
+ Parameters
111
+ ----------
112
+ result : Any
113
+ Parsed result from the response execution.
114
+ response : BaseResponse[Any]
115
+ Response session with complete message history.
116
+
117
+ Returns
118
+ -------
119
+ dict[str, Any]
120
+ Mapping with 'parsed' data and 'conversation' messages.
121
+
122
+ Examples
123
+ --------
124
+ >>> raw = _build_raw_output(result, response)
125
+ >>> raw.keys()
126
+ dict_keys(['parsed', 'conversation'])
127
+ """
128
+ return {
129
+ "parsed": coerce_jsonable(result),
130
+ "conversation": response.messages.to_json(),
131
+ }
132
+
133
+
134
+ def _get_response_instance(config: StreamlitAppConfig) -> BaseResponse[Any]:
135
+ """Instantiate and cache the configured BaseResponse.
136
+
137
+ Creates a new response instance from the configuration if not already
138
+ cached in session state. Applies vector store attachments and cleanup
139
+ settings based on configuration.
140
+
141
+ Parameters
142
+ ----------
143
+ config : StreamlitAppConfig
144
+ Loaded configuration with response handler definition.
145
+
146
+ Returns
147
+ -------
148
+ BaseResponse[Any]
149
+ Active response instance for the current Streamlit session.
150
+
151
+ Raises
152
+ ------
153
+ TypeError
154
+ If the configured response cannot produce a BaseResponse.
155
+
156
+ Notes
157
+ -----
158
+ The response instance is cached in st.session_state['response_instance']
159
+ to maintain conversation state across Streamlit reruns.
160
+ """
161
+ if "response_instance" in st.session_state:
162
+ cached = st.session_state["response_instance"]
163
+ if isinstance(cached, BaseResponse):
164
+ return cached
165
+
166
+ response = config.create_response()
167
+
168
+ if config.preserve_vector_stores:
169
+ setattr(response, "_cleanup_system_vector_storage", False)
170
+ setattr(response, "_cleanup_user_vector_storage", False)
171
+
172
+ vector_stores = config.normalized_vector_stores()
173
+ if vector_stores:
174
+ attach_vector_store(response=response, vector_stores=vector_stores)
175
+
176
+ st.session_state["response_instance"] = response
177
+ return response
178
+
179
+
180
+ def _reset_chat(close_response: bool = True) -> None:
181
+ """Clear conversation and optionally close the response session.
182
+
183
+ Saves the current conversation to disk, closes the response to clean
184
+ up resources, and clears the chat history from session state.
185
+
186
+ Parameters
187
+ ----------
188
+ close_response : bool, default True
189
+ Whether to call close() on the cached response instance,
190
+ triggering resource cleanup.
191
+
192
+ Notes
193
+ -----
194
+ This function mutates st.session_state in-place, clearing the
195
+ chat_history and response_instance keys.
196
+ """
197
+ response = st.session_state.get("response_instance")
198
+ if close_response and isinstance(response, BaseResponse):
199
+ filepath = f"./data/{response.name}.{response.uuid}.json"
200
+ response.save(filepath)
201
+ response.close()
202
+ st.session_state["chat_history"] = []
203
+ st.session_state.pop("response_instance", None)
204
+
205
+
206
+ def _init_session_state() -> None:
207
+ """Initialize Streamlit session state for chat functionality.
208
+
209
+ Creates the chat_history list in session state if it doesn't exist,
210
+ enabling conversation persistence across Streamlit reruns.
211
+
212
+ Notes
213
+ -----
214
+ This function should be called early in the app lifecycle to ensure
215
+ session state is properly initialized before rendering chat UI.
216
+ """
217
+ if "chat_history" not in st.session_state:
218
+ st.session_state["chat_history"] = []
219
+
220
+
221
+ def _render_chat_history() -> None:
222
+ """Display the conversation transcript from session state.
223
+
224
+ Iterates through chat_history in session state and renders each
225
+ message with appropriate formatting. Assistant messages include
226
+ an expandable raw output section.
227
+
228
+ Notes
229
+ -----
230
+ Uses Streamlit's chat_message context manager for role-based
231
+ message styling.
232
+ """
233
+ for message in st.session_state.get("chat_history", []):
234
+ role = message.get("role", "assistant")
235
+ with st.chat_message(role):
236
+ if role == "assistant":
237
+ st.markdown(message.get("summary", ""))
238
+ raw_output = message.get("raw")
239
+ if raw_output is not None:
240
+ with st.expander("Raw output", expanded=False):
241
+ st.json(raw_output)
242
+ else:
243
+ st.markdown(message.get("content", ""))
244
+
245
+
246
+ def _handle_user_message(prompt: str, config: StreamlitAppConfig) -> None:
247
+ """Process user input and generate assistant response.
248
+
249
+ Appends the user message to chat history, executes the response
250
+ handler, and adds the assistant's reply to the conversation.
251
+ Handles errors gracefully by displaying them in the UI.
252
+
253
+ Parameters
254
+ ----------
255
+ prompt : str
256
+ User-entered text to send to the assistant.
257
+ config : StreamlitAppConfig
258
+ Loaded configuration with response handler definition.
259
+
260
+ Notes
261
+ -----
262
+ Errors during response execution are caught and displayed in the
263
+ chat transcript rather than crashing the application. The function
264
+ triggers a Streamlit rerun after successful response generation.
265
+ """
266
+ st.session_state["chat_history"].append({"role": "user", "content": prompt})
267
+ try:
268
+ response = _get_response_instance(config)
269
+ except Exception as exc: # pragma: no cover - surfaced in UI
270
+ st.error(f"Failed to start response session: {exc}")
271
+ return
272
+
273
+ try:
274
+ with st.spinner("Thinking..."):
275
+ result = response.run_sync(content=prompt)
276
+ summary = _render_summary(result, response)
277
+ raw_output = _build_raw_output(result, response)
278
+ st.session_state["chat_history"].append(
279
+ {"role": "assistant", "summary": summary, "raw": raw_output}
280
+ )
281
+ st.rerun()
282
+ except Exception as exc: # pragma: no cover - surfaced in UI
283
+ st.session_state["chat_history"].append(
284
+ {
285
+ "role": "assistant",
286
+ "summary": f"Encountered an error: {exc}",
287
+ "raw": {"error": str(exc)},
288
+ }
289
+ )
290
+ st.error("Something went wrong, but your chat history is still here.")
291
+
292
+
293
+ def main(config_path: Path) -> None:
294
+ """Run the configuration-driven Streamlit chat application.
295
+
296
+ Entry point for the Streamlit app that loads configuration, sets up
297
+ the UI, manages session state, and handles user interactions.
298
+
299
+ Parameters
300
+ ----------
301
+ config_path : Path
302
+ Filesystem location of the configuration module.
303
+
304
+ Notes
305
+ -----
306
+ This function should be called as the entry point for the Streamlit
307
+ application. It handles the complete application lifecycle including
308
+ configuration loading, UI rendering, and chat interactions.
309
+
310
+ Examples
311
+ --------
312
+ >>> from pathlib import Path
313
+ >>> main(Path("./my_config.py"))
314
+ """
315
+ config = _load_configuration(config_path)
316
+ st.set_page_config(page_title=config.display_title, layout="wide")
317
+ _init_session_state()
318
+
319
+ st.title(config.display_title)
320
+ if config.description:
321
+ st.caption(config.description)
322
+ if config.model:
323
+ st.caption(f"Model: {config.model}")
324
+
325
+ close_col, _ = st.columns([1, 5])
326
+ with close_col:
327
+ if st.button("Close chat", type="secondary"):
328
+ _reset_chat()
329
+ st.toast("Chat closed.")
330
+
331
+ _render_chat_history()
332
+
333
+ prompt = st.chat_input("Message the assistant")
334
+ if prompt:
335
+ _handle_user_message(prompt, config)
336
+
337
+
338
+ if __name__ == "__main__":
339
+ import sys
340
+
341
+ if len(sys.argv) != 2:
342
+ print("Usage: python app.py <config_path>")
343
+ sys.exit(1)
344
+ config_path = Path(sys.argv[1])
345
+ main(config_path)