openai-sdk-helpers 0.0.6__py3-none-any.whl → 0.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +19 -8
- openai_sdk_helpers/agent/base.py +1 -1
- openai_sdk_helpers/agent/web_search.py +5 -6
- openai_sdk_helpers/config.py +48 -2
- openai_sdk_helpers/response/__init__.py +4 -2
- openai_sdk_helpers/response/base.py +96 -7
- openai_sdk_helpers/response/messages.py +48 -0
- openai_sdk_helpers/response/runner.py +5 -5
- openai_sdk_helpers/response/tool_call.py +39 -0
- openai_sdk_helpers/response/vector_store.py +84 -0
- openai_sdk_helpers/streamlit_app/__init__.py +13 -0
- openai_sdk_helpers/streamlit_app/app.py +270 -0
- openai_sdk_helpers/streamlit_app/configuration.py +324 -0
- openai_sdk_helpers/streamlit_app/streamlit_web_search.py +69 -0
- openai_sdk_helpers/structure/base.py +40 -1
- openai_sdk_helpers/structure/web_search.py +4 -0
- openai_sdk_helpers/utils/__init__.py +15 -1
- openai_sdk_helpers/utils/core.py +127 -1
- openai_sdk_helpers/vector_storage/storage.py +80 -18
- {openai_sdk_helpers-0.0.6.dist-info → openai_sdk_helpers-0.0.8.dist-info}/METADATA +7 -3
- {openai_sdk_helpers-0.0.6.dist-info → openai_sdk_helpers-0.0.8.dist-info}/RECORD +23 -18
- {openai_sdk_helpers-0.0.6.dist-info → openai_sdk_helpers-0.0.8.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.0.6.dist-info → openai_sdk_helpers-0.0.8.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,270 @@
|
|
|
1
|
+
"""Streamlit chat application driven by a developer configuration."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any, Dict, List
|
|
8
|
+
|
|
9
|
+
import streamlit as st
|
|
10
|
+
from dotenv import load_dotenv
|
|
11
|
+
|
|
12
|
+
load_dotenv()
|
|
13
|
+
|
|
14
|
+
from openai_sdk_helpers.response import BaseResponse, attach_vector_store
|
|
15
|
+
from openai_sdk_helpers.streamlit_app import (
|
|
16
|
+
StreamlitAppConfig,
|
|
17
|
+
_load_configuration,
|
|
18
|
+
)
|
|
19
|
+
from openai_sdk_helpers.structure.base import BaseStructure
|
|
20
|
+
from openai_sdk_helpers.utils import ensure_list, coerce_jsonable, log
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _extract_assistant_text(response: BaseResponse[Any]) -> str:
|
|
24
|
+
"""Return the latest assistant message as a friendly string.
|
|
25
|
+
|
|
26
|
+
Parameters
|
|
27
|
+
----------
|
|
28
|
+
response : BaseResponse[Any]
|
|
29
|
+
Active response session containing message history.
|
|
30
|
+
|
|
31
|
+
Returns
|
|
32
|
+
-------
|
|
33
|
+
str
|
|
34
|
+
Concatenated assistant text, or an empty string when unavailable.
|
|
35
|
+
"""
|
|
36
|
+
message = response.get_last_assistant_message() or response.get_last_tool_message()
|
|
37
|
+
if message is None:
|
|
38
|
+
return ""
|
|
39
|
+
|
|
40
|
+
content = getattr(message.content, "content", None)
|
|
41
|
+
if content is None:
|
|
42
|
+
return ""
|
|
43
|
+
|
|
44
|
+
text_parts: List[str] = []
|
|
45
|
+
for part in ensure_list(content):
|
|
46
|
+
text_value = getattr(getattr(part, "text", None), "value", None)
|
|
47
|
+
if text_value:
|
|
48
|
+
text_parts.append(text_value)
|
|
49
|
+
if text_parts:
|
|
50
|
+
return "\n\n".join(text_parts)
|
|
51
|
+
return ""
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _render_summary(result: Any, response: BaseResponse[Any]) -> str:
|
|
55
|
+
"""Generate the assistant-facing summary shown in the transcript.
|
|
56
|
+
|
|
57
|
+
Parameters
|
|
58
|
+
----------
|
|
59
|
+
result : Any
|
|
60
|
+
Parsed result returned from ``BaseResponse.run_sync``.
|
|
61
|
+
response : BaseResponse[Any]
|
|
62
|
+
Response instance containing the latest assistant message.
|
|
63
|
+
|
|
64
|
+
Returns
|
|
65
|
+
-------
|
|
66
|
+
str
|
|
67
|
+
Display-ready summary text for the chat transcript.
|
|
68
|
+
"""
|
|
69
|
+
if isinstance(result, BaseStructure):
|
|
70
|
+
return result.print()
|
|
71
|
+
if isinstance(result, dict):
|
|
72
|
+
return json.dumps(result, indent=2)
|
|
73
|
+
if result:
|
|
74
|
+
return str(result)
|
|
75
|
+
|
|
76
|
+
fallback_text = _extract_assistant_text(response)
|
|
77
|
+
if fallback_text:
|
|
78
|
+
return fallback_text
|
|
79
|
+
return "No response returned."
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _build_raw_output(result: Any, response: BaseResponse[Any]) -> Dict[str, Any]:
|
|
83
|
+
"""Assemble the raw payload shown under the expandable transcript section.
|
|
84
|
+
|
|
85
|
+
Parameters
|
|
86
|
+
----------
|
|
87
|
+
result : Any
|
|
88
|
+
Parsed result returned from the response instance.
|
|
89
|
+
response : BaseResponse[Any]
|
|
90
|
+
Response session containing message history.
|
|
91
|
+
|
|
92
|
+
Returns
|
|
93
|
+
-------
|
|
94
|
+
dict[str, Any]
|
|
95
|
+
Mapping that includes parsed data and raw conversation messages.
|
|
96
|
+
"""
|
|
97
|
+
return {
|
|
98
|
+
"parsed": coerce_jsonable(result),
|
|
99
|
+
"conversation": response.messages.to_json(),
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _get_response_instance(config: StreamlitAppConfig) -> BaseResponse[Any]:
|
|
104
|
+
"""Instantiate and cache the configured :class:`BaseResponse`.
|
|
105
|
+
|
|
106
|
+
Parameters
|
|
107
|
+
----------
|
|
108
|
+
config : StreamlitAppConfig
|
|
109
|
+
Loaded configuration containing the response definition.
|
|
110
|
+
|
|
111
|
+
Returns
|
|
112
|
+
-------
|
|
113
|
+
BaseResponse[Any]
|
|
114
|
+
Active response instance for the current session.
|
|
115
|
+
|
|
116
|
+
Raises
|
|
117
|
+
------
|
|
118
|
+
TypeError
|
|
119
|
+
If the configured ``response`` cannot produce ``BaseResponse``.
|
|
120
|
+
"""
|
|
121
|
+
if "response_instance" in st.session_state:
|
|
122
|
+
cached = st.session_state["response_instance"]
|
|
123
|
+
if isinstance(cached, BaseResponse):
|
|
124
|
+
return cached
|
|
125
|
+
|
|
126
|
+
response = config.create_response()
|
|
127
|
+
|
|
128
|
+
if config.preserve_vector_stores:
|
|
129
|
+
setattr(response, "_cleanup_system_vector_storage", False)
|
|
130
|
+
setattr(response, "_cleanup_user_vector_storage", False)
|
|
131
|
+
|
|
132
|
+
vector_stores = config.normalized_vector_stores()
|
|
133
|
+
if vector_stores:
|
|
134
|
+
attach_vector_store(response=response, vector_stores=vector_stores)
|
|
135
|
+
|
|
136
|
+
st.session_state["response_instance"] = response
|
|
137
|
+
return response
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def _reset_chat(close_response: bool = True) -> None:
|
|
141
|
+
"""Clear the conversation and optionally close the response session.
|
|
142
|
+
|
|
143
|
+
Parameters
|
|
144
|
+
----------
|
|
145
|
+
close_response : bool, default=True
|
|
146
|
+
Whether to call ``close`` on the cached response instance.
|
|
147
|
+
|
|
148
|
+
Returns
|
|
149
|
+
-------
|
|
150
|
+
None
|
|
151
|
+
This function mutates ``st.session_state`` in-place.
|
|
152
|
+
"""
|
|
153
|
+
response = st.session_state.get("response_instance")
|
|
154
|
+
if close_response and isinstance(response, BaseResponse):
|
|
155
|
+
filepath = f"./data/{response.name}.{response.uuid}.json"
|
|
156
|
+
response.save(filepath)
|
|
157
|
+
response.close()
|
|
158
|
+
st.session_state["chat_history"] = []
|
|
159
|
+
st.session_state.pop("response_instance", None)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def _init_session_state() -> None:
|
|
163
|
+
"""Prepare Streamlit session state containers.
|
|
164
|
+
|
|
165
|
+
Returns
|
|
166
|
+
-------
|
|
167
|
+
None
|
|
168
|
+
This function initializes chat-related session keys when absent.
|
|
169
|
+
"""
|
|
170
|
+
if "chat_history" not in st.session_state:
|
|
171
|
+
st.session_state["chat_history"] = []
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def _render_chat_history() -> None:
|
|
175
|
+
"""Display the conversation transcript from session state.
|
|
176
|
+
|
|
177
|
+
Returns
|
|
178
|
+
-------
|
|
179
|
+
None
|
|
180
|
+
Renders chat messages in the current Streamlit session.
|
|
181
|
+
"""
|
|
182
|
+
for message in st.session_state.get("chat_history", []):
|
|
183
|
+
role = message.get("role", "assistant")
|
|
184
|
+
with st.chat_message(role):
|
|
185
|
+
if role == "assistant":
|
|
186
|
+
st.markdown(message.get("summary", ""))
|
|
187
|
+
raw_output = message.get("raw")
|
|
188
|
+
if raw_output is not None:
|
|
189
|
+
with st.expander("Raw output", expanded=False):
|
|
190
|
+
st.json(raw_output)
|
|
191
|
+
else:
|
|
192
|
+
st.markdown(message.get("content", ""))
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def _handle_user_message(prompt: str, config: StreamlitAppConfig) -> None:
|
|
196
|
+
"""Append a user prompt and stream the assistant reply into the transcript.
|
|
197
|
+
|
|
198
|
+
Parameters
|
|
199
|
+
----------
|
|
200
|
+
prompt : str
|
|
201
|
+
User-entered text to send to the assistant.
|
|
202
|
+
config : StreamlitAppConfig
|
|
203
|
+
Loaded configuration containing the response definition.
|
|
204
|
+
"""
|
|
205
|
+
st.session_state["chat_history"].append({"role": "user", "content": prompt})
|
|
206
|
+
try:
|
|
207
|
+
response = _get_response_instance(config)
|
|
208
|
+
except Exception as exc: # pragma: no cover - surfaced in UI
|
|
209
|
+
st.error(f"Failed to start response session: {exc}")
|
|
210
|
+
return
|
|
211
|
+
|
|
212
|
+
try:
|
|
213
|
+
with st.spinner("Thinking..."):
|
|
214
|
+
result = response.run_sync(content=prompt)
|
|
215
|
+
summary = _render_summary(result, response)
|
|
216
|
+
raw_output = _build_raw_output(result, response)
|
|
217
|
+
st.session_state["chat_history"].append(
|
|
218
|
+
{"role": "assistant", "summary": summary, "raw": raw_output}
|
|
219
|
+
)
|
|
220
|
+
st.rerun()
|
|
221
|
+
except Exception as exc: # pragma: no cover - surfaced in UI
|
|
222
|
+
st.session_state["chat_history"].append(
|
|
223
|
+
{
|
|
224
|
+
"role": "assistant",
|
|
225
|
+
"summary": f"Encountered an error: {exc}",
|
|
226
|
+
"raw": {"error": str(exc)},
|
|
227
|
+
}
|
|
228
|
+
)
|
|
229
|
+
st.error("Something went wrong, but your chat history is still here.")
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def main(config_path: Path) -> None:
|
|
233
|
+
"""Run the config-driven Streamlit chat app.
|
|
234
|
+
|
|
235
|
+
Parameters
|
|
236
|
+
----------
|
|
237
|
+
config_path : Path
|
|
238
|
+
Filesystem location of the configuration module.
|
|
239
|
+
"""
|
|
240
|
+
config = _load_configuration(config_path)
|
|
241
|
+
st.set_page_config(page_title=config.display_title, layout="wide")
|
|
242
|
+
_init_session_state()
|
|
243
|
+
|
|
244
|
+
st.title(config.display_title)
|
|
245
|
+
if config.description:
|
|
246
|
+
st.caption(config.description)
|
|
247
|
+
if config.model:
|
|
248
|
+
st.caption(f"Model: {config.model}")
|
|
249
|
+
|
|
250
|
+
close_col, _ = st.columns([1, 5])
|
|
251
|
+
with close_col:
|
|
252
|
+
if st.button("Close chat", type="secondary"):
|
|
253
|
+
_reset_chat()
|
|
254
|
+
st.toast("Chat closed.")
|
|
255
|
+
|
|
256
|
+
_render_chat_history()
|
|
257
|
+
|
|
258
|
+
prompt = st.chat_input("Message the assistant")
|
|
259
|
+
if prompt:
|
|
260
|
+
_handle_user_message(prompt, config)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
if __name__ == "__main__":
|
|
264
|
+
import sys
|
|
265
|
+
|
|
266
|
+
if len(sys.argv) != 2:
|
|
267
|
+
print("Usage: python app.py <config_path>")
|
|
268
|
+
sys.exit(1)
|
|
269
|
+
config_path = Path(sys.argv[1])
|
|
270
|
+
main(config_path)
|
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
"""Configuration loading for the example Streamlit chat app."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import importlib.util
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from types import ModuleType
|
|
8
|
+
from typing import Callable, Sequence, cast
|
|
9
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
|
10
|
+
|
|
11
|
+
from openai_sdk_helpers.response.base import BaseResponse
|
|
12
|
+
from openai_sdk_helpers.structure.base import BaseStructure
|
|
13
|
+
from openai_sdk_helpers.utils import ensure_list
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class StreamlitAppConfig(BaseModel):
|
|
17
|
+
"""Validated configuration for the config-driven Streamlit application.
|
|
18
|
+
|
|
19
|
+
Methods
|
|
20
|
+
-------
|
|
21
|
+
normalized_vector_stores()
|
|
22
|
+
Return configured system vector stores as a list of names.
|
|
23
|
+
create_response()
|
|
24
|
+
Instantiate the configured ``BaseResponse``.
|
|
25
|
+
load_app_config(config_path)
|
|
26
|
+
Load, validate, and return the Streamlit application configuration.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True)
|
|
30
|
+
|
|
31
|
+
response: BaseResponse[BaseStructure] | type[BaseResponse] | Callable | None = (
|
|
32
|
+
Field(
|
|
33
|
+
default=None,
|
|
34
|
+
description=(
|
|
35
|
+
"Configured ``BaseResponse`` subclass, instance, or callable that returns"
|
|
36
|
+
" a response instance."
|
|
37
|
+
),
|
|
38
|
+
)
|
|
39
|
+
)
|
|
40
|
+
display_title: str = Field(
|
|
41
|
+
default="Example copilot",
|
|
42
|
+
description="Title displayed at the top of the Streamlit page.",
|
|
43
|
+
)
|
|
44
|
+
description: str | None = Field(
|
|
45
|
+
default=None,
|
|
46
|
+
description="Optional short description shown beneath the title.",
|
|
47
|
+
)
|
|
48
|
+
system_vector_store: list[str] | None = Field(
|
|
49
|
+
default=None,
|
|
50
|
+
description=(
|
|
51
|
+
"Optional vector store names to attach as system context for "
|
|
52
|
+
"file search tools."
|
|
53
|
+
),
|
|
54
|
+
)
|
|
55
|
+
preserve_vector_stores: bool = Field(
|
|
56
|
+
default=False,
|
|
57
|
+
description="When ``True``, skip automatic vector store cleanup on close.",
|
|
58
|
+
)
|
|
59
|
+
model: str | None = Field(
|
|
60
|
+
default=None,
|
|
61
|
+
description="Optional model hint for display alongside the chat interface.",
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
@field_validator("system_vector_store", mode="before")
|
|
65
|
+
@classmethod
|
|
66
|
+
def validate_vector_store(
|
|
67
|
+
cls, value: Sequence[str] | str | None
|
|
68
|
+
) -> list[str] | None:
|
|
69
|
+
"""Normalize configured vector stores to a list of names.
|
|
70
|
+
|
|
71
|
+
Parameters
|
|
72
|
+
----------
|
|
73
|
+
value : Sequence[str] | str | None
|
|
74
|
+
Raw value provided by the configuration module.
|
|
75
|
+
|
|
76
|
+
Returns
|
|
77
|
+
-------
|
|
78
|
+
list[str] | None
|
|
79
|
+
Normalized list of vector store names.
|
|
80
|
+
|
|
81
|
+
Raises
|
|
82
|
+
------
|
|
83
|
+
TypeError
|
|
84
|
+
If any entry cannot be coerced to ``str``.
|
|
85
|
+
"""
|
|
86
|
+
if value is None:
|
|
87
|
+
return None
|
|
88
|
+
stores = ensure_list(value)
|
|
89
|
+
if not all(isinstance(store, str) for store in stores):
|
|
90
|
+
raise ValueError("system_vector_store values must be strings.")
|
|
91
|
+
return list(stores)
|
|
92
|
+
|
|
93
|
+
@field_validator("response")
|
|
94
|
+
@classmethod
|
|
95
|
+
def validate_response(
|
|
96
|
+
cls, value: BaseResponse[BaseStructure] | type[BaseResponse] | Callable | None
|
|
97
|
+
) -> BaseResponse[BaseStructure] | type[BaseResponse] | Callable | None:
|
|
98
|
+
"""Ensure the configuration provides a valid response source."""
|
|
99
|
+
if value is None:
|
|
100
|
+
return None
|
|
101
|
+
if isinstance(value, BaseResponse):
|
|
102
|
+
return value
|
|
103
|
+
if isinstance(value, type) and issubclass(value, BaseResponse):
|
|
104
|
+
return value
|
|
105
|
+
if callable(value):
|
|
106
|
+
return value
|
|
107
|
+
raise TypeError("response must be a BaseResponse, subclass, or callable")
|
|
108
|
+
|
|
109
|
+
def normalized_vector_stores(self) -> list[str]:
|
|
110
|
+
"""Return configured system vector stores as a list.
|
|
111
|
+
|
|
112
|
+
Returns
|
|
113
|
+
-------
|
|
114
|
+
list[str]
|
|
115
|
+
Vector store names or an empty list when none are configured.
|
|
116
|
+
"""
|
|
117
|
+
return list(self.system_vector_store or [])
|
|
118
|
+
|
|
119
|
+
@model_validator(mode="after")
|
|
120
|
+
def ensure_response(self) -> "StreamlitAppConfig":
|
|
121
|
+
"""Validate that a response source is provided."""
|
|
122
|
+
if self.response is None:
|
|
123
|
+
raise ValueError("response must be provided.")
|
|
124
|
+
return self
|
|
125
|
+
|
|
126
|
+
def create_response(self) -> BaseResponse[BaseStructure]:
|
|
127
|
+
"""Instantiate and return the configured response instance.
|
|
128
|
+
|
|
129
|
+
Returns
|
|
130
|
+
-------
|
|
131
|
+
BaseResponse[BaseStructure]
|
|
132
|
+
Active response instance.
|
|
133
|
+
|
|
134
|
+
Raises
|
|
135
|
+
------
|
|
136
|
+
TypeError
|
|
137
|
+
If the configured ``response`` cannot produce a ``BaseResponse``.
|
|
138
|
+
"""
|
|
139
|
+
return _instantiate_response(self.response)
|
|
140
|
+
|
|
141
|
+
@staticmethod
|
|
142
|
+
def load_app_config(
|
|
143
|
+
config_path: Path,
|
|
144
|
+
) -> "StreamlitAppConfig":
|
|
145
|
+
"""Load, validate, and return the Streamlit application configuration.
|
|
146
|
+
|
|
147
|
+
Parameters
|
|
148
|
+
----------
|
|
149
|
+
config_path : Path
|
|
150
|
+
Filesystem path to the configuration module.
|
|
151
|
+
|
|
152
|
+
Returns
|
|
153
|
+
-------
|
|
154
|
+
StreamlitAppConfig
|
|
155
|
+
Validated configuration derived from ``config_path``.
|
|
156
|
+
"""
|
|
157
|
+
module = _import_config_module(config_path)
|
|
158
|
+
return _extract_config(module)
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def _import_config_module(config_path: Path) -> ModuleType:
|
|
162
|
+
"""Import the configuration module from ``config_path``.
|
|
163
|
+
|
|
164
|
+
Parameters
|
|
165
|
+
----------
|
|
166
|
+
config_path : Path
|
|
167
|
+
Filesystem path pointing to the configuration module.
|
|
168
|
+
|
|
169
|
+
Returns
|
|
170
|
+
-------
|
|
171
|
+
ModuleType
|
|
172
|
+
Loaded Python module containing application configuration.
|
|
173
|
+
|
|
174
|
+
Raises
|
|
175
|
+
------
|
|
176
|
+
FileNotFoundError
|
|
177
|
+
If ``config_path`` does not exist.
|
|
178
|
+
ImportError
|
|
179
|
+
If the module cannot be imported.
|
|
180
|
+
"""
|
|
181
|
+
if not config_path.exists():
|
|
182
|
+
raise FileNotFoundError(f"Configuration file not found at '{config_path}'.")
|
|
183
|
+
|
|
184
|
+
spec = importlib.util.spec_from_file_location(config_path.stem, config_path)
|
|
185
|
+
if spec is None or spec.loader is None:
|
|
186
|
+
raise ImportError(f"Unable to load configuration module at '{config_path}'.")
|
|
187
|
+
|
|
188
|
+
module = importlib.util.module_from_spec(spec)
|
|
189
|
+
spec.loader.exec_module(module)
|
|
190
|
+
return module
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def _extract_config(module: ModuleType) -> StreamlitAppConfig:
|
|
194
|
+
"""Extract a validated :class:`StreamlitAppConfig` from ``module``.
|
|
195
|
+
|
|
196
|
+
Parameters
|
|
197
|
+
----------
|
|
198
|
+
module : ModuleType
|
|
199
|
+
Module loaded from the configuration path.
|
|
200
|
+
|
|
201
|
+
Returns
|
|
202
|
+
-------
|
|
203
|
+
StreamlitAppConfig
|
|
204
|
+
Parsed and validated configuration instance.
|
|
205
|
+
|
|
206
|
+
Raises
|
|
207
|
+
------
|
|
208
|
+
ValueError
|
|
209
|
+
If ``APP_CONFIG`` is missing from the module.
|
|
210
|
+
TypeError
|
|
211
|
+
If ``APP_CONFIG`` is neither a mapping nor ``StreamlitAppConfig`` instance.
|
|
212
|
+
"""
|
|
213
|
+
if not hasattr(module, "APP_CONFIG"):
|
|
214
|
+
raise ValueError("APP_CONFIG must be defined in the configuration module.")
|
|
215
|
+
|
|
216
|
+
raw_config = getattr(module, "APP_CONFIG")
|
|
217
|
+
if isinstance(raw_config, StreamlitAppConfig):
|
|
218
|
+
return raw_config
|
|
219
|
+
if isinstance(raw_config, dict):
|
|
220
|
+
return _config_from_mapping(raw_config)
|
|
221
|
+
if isinstance(raw_config, BaseResponse):
|
|
222
|
+
return StreamlitAppConfig(response=raw_config)
|
|
223
|
+
if isinstance(raw_config, type) and issubclass(raw_config, BaseResponse):
|
|
224
|
+
return StreamlitAppConfig(response=raw_config)
|
|
225
|
+
if callable(raw_config):
|
|
226
|
+
return StreamlitAppConfig(response=raw_config)
|
|
227
|
+
|
|
228
|
+
raise TypeError(
|
|
229
|
+
"APP_CONFIG must be a dict, callable, BaseResponse, or StreamlitAppConfig."
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def _instantiate_response(candidate: object) -> BaseResponse[BaseStructure]:
|
|
234
|
+
"""Instantiate a :class:`BaseResponse` from the provided candidate.
|
|
235
|
+
|
|
236
|
+
Parameters
|
|
237
|
+
----------
|
|
238
|
+
candidate : object
|
|
239
|
+
Configured response source.
|
|
240
|
+
|
|
241
|
+
Returns
|
|
242
|
+
-------
|
|
243
|
+
BaseResponse[BaseStructure]
|
|
244
|
+
Active response instance.
|
|
245
|
+
|
|
246
|
+
Raises
|
|
247
|
+
------
|
|
248
|
+
TypeError
|
|
249
|
+
If the candidate cannot produce a ``BaseResponse`` instance.
|
|
250
|
+
"""
|
|
251
|
+
if isinstance(candidate, BaseResponse):
|
|
252
|
+
return candidate
|
|
253
|
+
if isinstance(candidate, type) and issubclass(candidate, BaseResponse):
|
|
254
|
+
response_cls = cast(type[BaseResponse[BaseStructure]], candidate)
|
|
255
|
+
return response_cls() # type: ignore[call-arg]
|
|
256
|
+
if callable(candidate):
|
|
257
|
+
response_callable = cast(Callable[[], BaseResponse[BaseStructure]], candidate)
|
|
258
|
+
response = response_callable()
|
|
259
|
+
if isinstance(response, BaseResponse):
|
|
260
|
+
return response
|
|
261
|
+
raise TypeError("response must be a BaseResponse, subclass, or callable")
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def _config_from_mapping(raw_config: dict) -> StreamlitAppConfig:
|
|
265
|
+
"""Build :class:`StreamlitAppConfig` from a mapping with aliases.
|
|
266
|
+
|
|
267
|
+
The mapping may provide ``build_response`` directly or a ``response`` key
|
|
268
|
+
containing a :class:`BaseResponse` instance, subclass, or callable.
|
|
269
|
+
|
|
270
|
+
Parameters
|
|
271
|
+
----------
|
|
272
|
+
raw_config : dict
|
|
273
|
+
Developer-supplied mapping from the configuration module.
|
|
274
|
+
|
|
275
|
+
Returns
|
|
276
|
+
-------
|
|
277
|
+
StreamlitAppConfig
|
|
278
|
+
Validated configuration derived from ``raw_config``.
|
|
279
|
+
"""
|
|
280
|
+
config_kwargs = dict(raw_config)
|
|
281
|
+
response_candidate = config_kwargs.pop("response", None)
|
|
282
|
+
if response_candidate is None:
|
|
283
|
+
response_candidate = config_kwargs.pop("build_response", None)
|
|
284
|
+
if response_candidate is not None:
|
|
285
|
+
config_kwargs["response"] = response_candidate
|
|
286
|
+
|
|
287
|
+
return StreamlitAppConfig(**config_kwargs)
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
def load_app_config(
|
|
291
|
+
config_path: Path,
|
|
292
|
+
) -> StreamlitAppConfig:
|
|
293
|
+
"""Proxy to :meth:`StreamlitAppConfig.load_app_config` for compatibility."""
|
|
294
|
+
return StreamlitAppConfig.load_app_config(config_path=config_path)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def _load_configuration(config_path: Path) -> StreamlitAppConfig:
|
|
298
|
+
"""Load the Streamlit configuration and present user-friendly errors.
|
|
299
|
+
|
|
300
|
+
Parameters
|
|
301
|
+
----------
|
|
302
|
+
config_path : Path
|
|
303
|
+
Filesystem location of the developer-authored configuration module.
|
|
304
|
+
|
|
305
|
+
Returns
|
|
306
|
+
-------
|
|
307
|
+
StreamlitAppConfig
|
|
308
|
+
Validated configuration object.
|
|
309
|
+
"""
|
|
310
|
+
try:
|
|
311
|
+
return StreamlitAppConfig.load_app_config(config_path=config_path)
|
|
312
|
+
except Exception as exc: # pragma: no cover - surfaced in UI
|
|
313
|
+
import streamlit as st # type: ignore[import-not-found]
|
|
314
|
+
|
|
315
|
+
st.error(f"Configuration error: {exc}")
|
|
316
|
+
st.stop()
|
|
317
|
+
raise RuntimeError("Configuration loading halted.") from exc
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
__all__ = [
|
|
321
|
+
"StreamlitAppConfig",
|
|
322
|
+
"load_app_config",
|
|
323
|
+
"_load_configuration",
|
|
324
|
+
]
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"""Developer configuration for the example Streamlit chat app."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from openai_sdk_helpers.agent.web_search import WebAgentSearch
|
|
5
|
+
from openai_sdk_helpers.config import OpenAISettings
|
|
6
|
+
from openai_sdk_helpers.response.base import BaseResponse
|
|
7
|
+
from openai_sdk_helpers.structure.web_search import WebSearchStructure
|
|
8
|
+
from openai_sdk_helpers.structure.prompt import PromptStructure
|
|
9
|
+
from openai_sdk_helpers.utils.core import customJSONEncoder
|
|
10
|
+
|
|
11
|
+
DEFAULT_MODEL = "gpt-4o-mini"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class StreamlitWebSearch(BaseResponse[WebSearchStructure]):
|
|
15
|
+
"""Response tuned for a generic chat experience with structured output.
|
|
16
|
+
|
|
17
|
+
Methods
|
|
18
|
+
-------
|
|
19
|
+
__init__()
|
|
20
|
+
Configure a general-purpose response session using OpenAI settings.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self) -> None:
|
|
24
|
+
settings = OpenAISettings.from_env()
|
|
25
|
+
super().__init__(
|
|
26
|
+
instructions="Perform web searches and generate reports.",
|
|
27
|
+
tools=[
|
|
28
|
+
PromptStructure.response_tool_definition(
|
|
29
|
+
tool_name="perform_search",
|
|
30
|
+
tool_description="Tool to perform web searches and generate reports.",
|
|
31
|
+
)
|
|
32
|
+
],
|
|
33
|
+
schema=WebSearchStructure.response_format(),
|
|
34
|
+
output_structure=WebSearchStructure,
|
|
35
|
+
tool_handlers={"perform_search": perform_search},
|
|
36
|
+
client=settings.create_client(),
|
|
37
|
+
model=settings.default_model or DEFAULT_MODEL,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
async def perform_search(tool) -> str:
|
|
42
|
+
"""Perform a web search and return structured results."""
|
|
43
|
+
structured_data = PromptStructure.from_tool_arguments(tool.arguments)
|
|
44
|
+
web_result = await WebAgentSearch(default_model=DEFAULT_MODEL).run_web_agent_async(
|
|
45
|
+
structured_data.prompt
|
|
46
|
+
)
|
|
47
|
+
return json.dumps(web_result.to_json(), cls=customJSONEncoder)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
APP_CONFIG = {
|
|
51
|
+
"response": StreamlitWebSearch,
|
|
52
|
+
"display_title": "Web Search Assistant",
|
|
53
|
+
"description": "Config-driven chat experience for performing web searches.",
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
if __name__ == "__main__":
|
|
57
|
+
web_search_instance = StreamlitWebSearch()
|
|
58
|
+
import asyncio
|
|
59
|
+
|
|
60
|
+
result = asyncio.run(
|
|
61
|
+
web_search_instance.run_async("What are the 2026 advancements in AI?")
|
|
62
|
+
)
|
|
63
|
+
if result:
|
|
64
|
+
print(web_search_instance.get_last_tool_message())
|
|
65
|
+
else:
|
|
66
|
+
print("No result returned.")
|
|
67
|
+
filepath = f"./data/{web_search_instance.name}.{web_search_instance.uuid}.json"
|
|
68
|
+
web_search_instance.save(filepath)
|
|
69
|
+
web_search_instance.close()
|