openai-sdk-helpers 0.4.1__py3-none-any.whl → 0.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +10 -36
- openai_sdk_helpers/agent/__init__.py +5 -6
- openai_sdk_helpers/agent/base.py +184 -39
- openai_sdk_helpers/agent/{config.py → configuration.py} +50 -75
- openai_sdk_helpers/agent/{coordination.py → coordinator.py} +12 -10
- openai_sdk_helpers/agent/search/__init__.py +4 -4
- openai_sdk_helpers/agent/search/base.py +16 -16
- openai_sdk_helpers/agent/search/vector.py +66 -42
- openai_sdk_helpers/agent/search/web.py +33 -29
- openai_sdk_helpers/agent/summarizer.py +6 -4
- openai_sdk_helpers/agent/translator.py +9 -5
- openai_sdk_helpers/agent/{validation.py → validator.py} +6 -4
- openai_sdk_helpers/cli.py +8 -22
- openai_sdk_helpers/environment.py +17 -0
- openai_sdk_helpers/prompt/vector_planner.jinja +7 -0
- openai_sdk_helpers/prompt/vector_search.jinja +6 -0
- openai_sdk_helpers/prompt/vector_writer.jinja +7 -0
- openai_sdk_helpers/response/__init__.py +1 -1
- openai_sdk_helpers/response/base.py +4 -4
- openai_sdk_helpers/response/{config.py → configuration.py} +9 -9
- openai_sdk_helpers/response/planner.py +12 -0
- openai_sdk_helpers/response/prompter.py +12 -0
- openai_sdk_helpers/streamlit_app/__init__.py +1 -1
- openai_sdk_helpers/streamlit_app/app.py +16 -17
- openai_sdk_helpers/streamlit_app/{config.py → configuration.py} +13 -13
- openai_sdk_helpers/streamlit_app/streamlit_web_search.py +3 -3
- openai_sdk_helpers/types.py +3 -3
- openai_sdk_helpers/utils/__init__.py +2 -6
- openai_sdk_helpers/utils/json/base_model.py +1 -1
- openai_sdk_helpers/utils/json/data_class.py +1 -1
- openai_sdk_helpers/utils/json/ref.py +3 -0
- openai_sdk_helpers/utils/registry.py +19 -15
- openai_sdk_helpers/vector_storage/storage.py +1 -1
- {openai_sdk_helpers-0.4.1.dist-info → openai_sdk_helpers-0.4.3.dist-info}/METADATA +8 -8
- {openai_sdk_helpers-0.4.1.dist-info → openai_sdk_helpers-0.4.3.dist-info}/RECORD +40 -40
- openai_sdk_helpers/agent/prompt_utils.py +0 -15
- openai_sdk_helpers/context_manager.py +0 -241
- openai_sdk_helpers/deprecation.py +0 -167
- openai_sdk_helpers/retry.py +0 -175
- openai_sdk_helpers/utils/deprecation.py +0 -167
- /openai_sdk_helpers/{logging_config.py → logging.py} +0 -0
- /openai_sdk_helpers/{config.py → settings.py} +0 -0
- {openai_sdk_helpers-0.4.1.dist-info → openai_sdk_helpers-0.4.3.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.4.1.dist-info → openai_sdk_helpers-0.4.3.dist-info}/entry_points.txt +0 -0
- {openai_sdk_helpers-0.4.1.dist-info → openai_sdk_helpers-0.4.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -3,12 +3,13 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
from pathlib import Path
|
|
6
|
-
from typing import Any, Dict, List, Optional, Union
|
|
6
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
|
7
7
|
|
|
8
8
|
from agents import custom_span, gen_trace_id, trace
|
|
9
9
|
from agents.model_settings import ModelSettings
|
|
10
10
|
from agents.tool import WebSearchTool
|
|
11
11
|
|
|
12
|
+
from ...structure.prompt import PromptStructure
|
|
12
13
|
from ...structure.web_search import (
|
|
13
14
|
WebSearchItemStructure,
|
|
14
15
|
WebSearchItemResultStructure,
|
|
@@ -16,7 +17,8 @@ from ...structure.web_search import (
|
|
|
16
17
|
WebSearchPlanStructure,
|
|
17
18
|
WebSearchReportStructure,
|
|
18
19
|
)
|
|
19
|
-
from
|
|
20
|
+
from ...tools import tool_handler_factory
|
|
21
|
+
from ..configuration import AgentConfiguration
|
|
20
22
|
from ..utils import run_coroutine_agent_sync
|
|
21
23
|
from .base import SearchPlanner, SearchToolAgent, SearchWriter
|
|
22
24
|
|
|
@@ -31,7 +33,7 @@ class WebAgentPlanner(SearchPlanner[WebSearchPlanStructure]):
|
|
|
31
33
|
prompt_dir : Path or None, default=None
|
|
32
34
|
Directory containing prompt templates.
|
|
33
35
|
default_model : str or None, default=None
|
|
34
|
-
Default model identifier to use when not defined in
|
|
36
|
+
Default model identifier to use when not defined in configuration.
|
|
35
37
|
|
|
36
38
|
Methods
|
|
37
39
|
-------
|
|
@@ -82,7 +84,7 @@ class WebSearchToolAgent(
|
|
|
82
84
|
prompt_dir : Path or None, default=None
|
|
83
85
|
Directory containing prompt templates.
|
|
84
86
|
default_model : str or None, default=None
|
|
85
|
-
Default model identifier to use when not defined in
|
|
87
|
+
Default model identifier to use when not defined in configuration.
|
|
86
88
|
|
|
87
89
|
Methods
|
|
88
90
|
-------
|
|
@@ -187,7 +189,7 @@ class WebAgentWriter(SearchWriter[WebSearchReportStructure]):
|
|
|
187
189
|
prompt_dir : Path or None, default=None
|
|
188
190
|
Directory containing prompt templates.
|
|
189
191
|
default_model : str or None, default=None
|
|
190
|
-
Default model identifier to use when not defined in
|
|
192
|
+
Default model identifier to use when not defined in configuration.
|
|
191
193
|
|
|
192
194
|
Methods
|
|
193
195
|
-------
|
|
@@ -234,7 +236,7 @@ class WebAgentSearch:
|
|
|
234
236
|
prompt_dir : Path or None, default=None
|
|
235
237
|
Directory containing prompt templates.
|
|
236
238
|
default_model : str or None, default=None
|
|
237
|
-
Default model identifier to use when not defined in
|
|
239
|
+
Default model identifier to use when not defined in configuration.
|
|
238
240
|
|
|
239
241
|
Methods
|
|
240
242
|
-------
|
|
@@ -242,6 +244,8 @@ class WebAgentSearch:
|
|
|
242
244
|
Execute the research workflow asynchronously.
|
|
243
245
|
run_agent_sync(search_query)
|
|
244
246
|
Execute the research workflow synchronously.
|
|
247
|
+
as_response_tool(tool_name, tool_description)
|
|
248
|
+
Build a Responses API tool definition and handler.
|
|
245
249
|
run_web_agent_async(search_query)
|
|
246
250
|
Convenience asynchronous entry point for the workflow.
|
|
247
251
|
run_web_agent_sync(search_query)
|
|
@@ -301,7 +305,7 @@ class WebAgentSearch:
|
|
|
301
305
|
)
|
|
302
306
|
|
|
303
307
|
def run_agent_sync(self, search_query: str) -> WebSearchStructure:
|
|
304
|
-
"""
|
|
308
|
+
"""Execute the entire research workflow for ``search_query`` synchronously.
|
|
305
309
|
|
|
306
310
|
Parameters
|
|
307
311
|
----------
|
|
@@ -312,41 +316,41 @@ class WebAgentSearch:
|
|
|
312
316
|
-------
|
|
313
317
|
WebSearchStructure
|
|
314
318
|
Completed research output.
|
|
319
|
+
|
|
315
320
|
"""
|
|
316
321
|
return run_coroutine_agent_sync(self.run_agent_async(search_query))
|
|
317
322
|
|
|
318
|
-
|
|
319
|
-
|
|
323
|
+
def as_response_tool(
|
|
324
|
+
self,
|
|
325
|
+
*,
|
|
326
|
+
tool_name: str = "web_search",
|
|
327
|
+
tool_description: str = "Run the web search workflow.",
|
|
328
|
+
) -> tuple[dict[str, Callable[..., Any]], dict[str, Any]]:
|
|
329
|
+
"""Return a Responses API tool handler and definition.
|
|
320
330
|
|
|
321
331
|
Parameters
|
|
322
332
|
----------
|
|
323
|
-
|
|
324
|
-
|
|
333
|
+
tool_name : str, default="web_search"
|
|
334
|
+
Name to use for the response tool.
|
|
335
|
+
tool_description : str, default="Run the web search workflow."
|
|
336
|
+
Description for the response tool.
|
|
325
337
|
|
|
326
338
|
Returns
|
|
327
339
|
-------
|
|
328
|
-
|
|
329
|
-
|
|
340
|
+
tuple[dict[str, Callable[..., Any]], dict[str, Any]]
|
|
341
|
+
Tool handler mapping and tool definition for Responses API usage.
|
|
330
342
|
"""
|
|
331
|
-
return await self.run_agent_async(search_query=search_query)
|
|
332
343
|
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
"""Run :meth:`run_web_agent_async` synchronously for ``search_query``.
|
|
344
|
+
def _run_search(prompt: str) -> WebSearchStructure:
|
|
345
|
+
return run_coroutine_agent_sync(self.run_agent_async(search_query=prompt))
|
|
336
346
|
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
Returns
|
|
343
|
-
-------
|
|
344
|
-
WebSearchStructure
|
|
345
|
-
Completed research output.
|
|
346
|
-
"""
|
|
347
|
-
return run_coroutine_agent_sync(
|
|
348
|
-
WebAgentSearch().run_web_agent_async(search_query=search_query)
|
|
347
|
+
tool_handler = {
|
|
348
|
+
tool_name: tool_handler_factory(_run_search, input_model=PromptStructure)
|
|
349
|
+
}
|
|
350
|
+
tool_definition = PromptStructure.response_tool_definition(
|
|
351
|
+
tool_name, tool_description=tool_description
|
|
349
352
|
)
|
|
353
|
+
return tool_handler, tool_definition
|
|
350
354
|
|
|
351
355
|
|
|
352
356
|
__all__ = [
|
|
@@ -8,8 +8,8 @@ from typing import Any, Dict, Optional, Type
|
|
|
8
8
|
from ..structure import SummaryStructure
|
|
9
9
|
from ..structure.base import StructureBase
|
|
10
10
|
from .base import AgentBase
|
|
11
|
-
from .
|
|
12
|
-
from
|
|
11
|
+
from .configuration import AgentConfiguration
|
|
12
|
+
from ..environment import DEFAULT_PROMPT_DIR
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
class SummarizerAgent(AgentBase):
|
|
@@ -84,7 +84,7 @@ class SummarizerAgent(AgentBase):
|
|
|
84
84
|
--------
|
|
85
85
|
>>> summarizer = SummarizerAgent(default_model="gpt-4o-mini")
|
|
86
86
|
"""
|
|
87
|
-
|
|
87
|
+
configuration = AgentConfiguration(
|
|
88
88
|
name="summarizer",
|
|
89
89
|
instructions="Agent instructions",
|
|
90
90
|
description="Summarize passages into concise findings.",
|
|
@@ -92,7 +92,9 @@ class SummarizerAgent(AgentBase):
|
|
|
92
92
|
)
|
|
93
93
|
prompt_directory = prompt_dir or DEFAULT_PROMPT_DIR
|
|
94
94
|
super().__init__(
|
|
95
|
-
|
|
95
|
+
configuration=configuration,
|
|
96
|
+
prompt_dir=prompt_directory,
|
|
97
|
+
default_model=default_model,
|
|
96
98
|
)
|
|
97
99
|
|
|
98
100
|
async def run_agent(
|
|
@@ -5,11 +5,13 @@ from __future__ import annotations
|
|
|
5
5
|
from pathlib import Path
|
|
6
6
|
from typing import Any, Dict, Optional
|
|
7
7
|
|
|
8
|
-
|
|
9
|
-
from .config import AgentConfiguration
|
|
10
|
-
from .prompt_utils import DEFAULT_PROMPT_DIR
|
|
8
|
+
|
|
11
9
|
from ..structure import TranslationStructure
|
|
12
10
|
from ..structure.base import StructureBase
|
|
11
|
+
from ..environment import DEFAULT_PROMPT_DIR
|
|
12
|
+
|
|
13
|
+
from .base import AgentBase
|
|
14
|
+
from .configuration import AgentConfiguration
|
|
13
15
|
|
|
14
16
|
|
|
15
17
|
class TranslatorAgent(AgentBase):
|
|
@@ -82,7 +84,7 @@ class TranslatorAgent(AgentBase):
|
|
|
82
84
|
--------
|
|
83
85
|
>>> translator = TranslatorAgent(default_model="gpt-4o-mini")
|
|
84
86
|
"""
|
|
85
|
-
|
|
87
|
+
configuration = AgentConfiguration(
|
|
86
88
|
name="translator",
|
|
87
89
|
instructions="Agent instructions",
|
|
88
90
|
description="Translate text into the requested language.",
|
|
@@ -90,7 +92,9 @@ class TranslatorAgent(AgentBase):
|
|
|
90
92
|
)
|
|
91
93
|
prompt_directory = prompt_dir or DEFAULT_PROMPT_DIR
|
|
92
94
|
super().__init__(
|
|
93
|
-
|
|
95
|
+
configuration=configuration,
|
|
96
|
+
prompt_dir=prompt_directory,
|
|
97
|
+
default_model=default_model,
|
|
94
98
|
)
|
|
95
99
|
|
|
96
100
|
async def run_agent(
|
|
@@ -5,10 +5,10 @@ from __future__ import annotations
|
|
|
5
5
|
from pathlib import Path
|
|
6
6
|
from typing import Any, Dict, Optional
|
|
7
7
|
|
|
8
|
+
from ..environment import DEFAULT_PROMPT_DIR
|
|
8
9
|
from ..structure.validation import ValidationResultStructure
|
|
9
10
|
from .base import AgentBase
|
|
10
|
-
from .
|
|
11
|
-
from .prompt_utils import DEFAULT_PROMPT_DIR
|
|
11
|
+
from .configuration import AgentConfiguration
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
class ValidatorAgent(AgentBase):
|
|
@@ -81,7 +81,7 @@ class ValidatorAgent(AgentBase):
|
|
|
81
81
|
--------
|
|
82
82
|
>>> validator = ValidatorAgent(default_model="gpt-4o-mini")
|
|
83
83
|
"""
|
|
84
|
-
|
|
84
|
+
configuration = AgentConfiguration(
|
|
85
85
|
name="validator",
|
|
86
86
|
instructions="Agent instructions",
|
|
87
87
|
description="Validate user input and agent output against guardrails.",
|
|
@@ -89,7 +89,9 @@ class ValidatorAgent(AgentBase):
|
|
|
89
89
|
)
|
|
90
90
|
prompt_directory = prompt_dir or DEFAULT_PROMPT_DIR
|
|
91
91
|
super().__init__(
|
|
92
|
-
|
|
92
|
+
configuration=configuration,
|
|
93
|
+
prompt_dir=prompt_directory,
|
|
94
|
+
default_model=default_model,
|
|
93
95
|
)
|
|
94
96
|
|
|
95
97
|
async def run_agent(
|
openai_sdk_helpers/cli.py
CHANGED
|
@@ -18,17 +18,8 @@ registry inspect
|
|
|
18
18
|
from __future__ import annotations
|
|
19
19
|
|
|
20
20
|
import argparse
|
|
21
|
-
import json
|
|
22
21
|
import sys
|
|
23
22
|
from pathlib import Path
|
|
24
|
-
from typing import Any
|
|
25
|
-
|
|
26
|
-
try:
|
|
27
|
-
import openai_sdk_helpers
|
|
28
|
-
|
|
29
|
-
__version__ = getattr(openai_sdk_helpers, "__version__", "unknown")
|
|
30
|
-
except ImportError:
|
|
31
|
-
__version__ = "unknown"
|
|
32
23
|
|
|
33
24
|
|
|
34
25
|
def cmd_agent_test(args: argparse.Namespace) -> int:
|
|
@@ -159,8 +150,8 @@ def cmd_registry_list(args: argparse.Namespace) -> int:
|
|
|
159
150
|
|
|
160
151
|
print("Registered configurations:")
|
|
161
152
|
for name in sorted(names):
|
|
162
|
-
|
|
163
|
-
tools_count = len(
|
|
153
|
+
configuration = registry.get(name)
|
|
154
|
+
tools_count = len(configuration.tools) if configuration.tools else 0
|
|
164
155
|
print(f" - {name} ({tools_count} tools)")
|
|
165
156
|
|
|
166
157
|
return 0
|
|
@@ -199,7 +190,7 @@ def cmd_registry_inspect(args: argparse.Namespace) -> int:
|
|
|
199
190
|
registry = get_default_registry()
|
|
200
191
|
|
|
201
192
|
try:
|
|
202
|
-
|
|
193
|
+
configuration = registry.get(args.config_name)
|
|
203
194
|
except KeyError:
|
|
204
195
|
print(f"Error: Configuration '{args.config_name}' not found", file=sys.stderr)
|
|
205
196
|
print("\nAvailable configurations:")
|
|
@@ -207,17 +198,17 @@ def cmd_registry_inspect(args: argparse.Namespace) -> int:
|
|
|
207
198
|
print(f" - {name}")
|
|
208
199
|
return 1
|
|
209
200
|
|
|
210
|
-
print(f"Configuration: {
|
|
211
|
-
instructions_str = str(
|
|
201
|
+
print(f"Configuration: {configuration.name}")
|
|
202
|
+
instructions_str = str(configuration.instructions)
|
|
212
203
|
instructions_preview = (
|
|
213
204
|
instructions_str[:100] if len(instructions_str) > 100 else instructions_str
|
|
214
205
|
)
|
|
215
206
|
print(f"Instructions: {instructions_preview}...")
|
|
216
|
-
print(f"Tools: {len(
|
|
207
|
+
print(f"Tools: {len(configuration.tools) if configuration.tools else 0}")
|
|
217
208
|
|
|
218
|
-
if
|
|
209
|
+
if configuration.tools:
|
|
219
210
|
print("\nTool names:")
|
|
220
|
-
for tool in
|
|
211
|
+
for tool in configuration.tools:
|
|
221
212
|
tool_name = tool.get("function", {}).get("name", "unknown")
|
|
222
213
|
print(f" - {tool_name}")
|
|
223
214
|
|
|
@@ -245,11 +236,6 @@ def main(argv: list[str] | None = None) -> int:
|
|
|
245
236
|
prog="openai-helpers",
|
|
246
237
|
description="OpenAI SDK Helpers CLI",
|
|
247
238
|
)
|
|
248
|
-
parser.add_argument(
|
|
249
|
-
"--version",
|
|
250
|
-
action="version",
|
|
251
|
-
version=f"openai-sdk-helpers {__version__}",
|
|
252
|
-
)
|
|
253
239
|
|
|
254
240
|
subparsers = parser.add_subparsers(dest="command", help="Commands")
|
|
255
241
|
|
|
@@ -19,9 +19,12 @@ get_data_path(name)
|
|
|
19
19
|
from __future__ import annotations
|
|
20
20
|
|
|
21
21
|
from pathlib import Path
|
|
22
|
+
from dotenv import load_dotenv
|
|
23
|
+
|
|
22
24
|
|
|
23
25
|
from openai_sdk_helpers.utils import ensure_directory
|
|
24
26
|
|
|
27
|
+
load_dotenv()
|
|
25
28
|
DATETIME_FMT = "%Y%m%d_%H%M%S"
|
|
26
29
|
DEFAULT_MODEL = "gpt-4o-mini"
|
|
27
30
|
|
|
@@ -54,3 +57,17 @@ def get_data_path(name: str) -> Path:
|
|
|
54
57
|
base = Path(__file__).parent.parent.parent / "data"
|
|
55
58
|
path = base / name
|
|
56
59
|
return ensure_directory(path)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def get_package_path() -> Path:
|
|
63
|
+
"""Return the root path of the openai-sdk-helpers package.
|
|
64
|
+
|
|
65
|
+
Returns
|
|
66
|
+
-------
|
|
67
|
+
Path
|
|
68
|
+
Root directory path of the openai-sdk-helpers package.
|
|
69
|
+
"""
|
|
70
|
+
return Path(__file__).parent
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
DEFAULT_PROMPT_DIR = get_package_path() / "prompt"
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
You are a vector search planner.
|
|
2
|
+
|
|
3
|
+
Instructions:
|
|
4
|
+
- Break the user query into 1-5 focused vector search queries.
|
|
5
|
+
- Prefer short, keyword-rich queries optimized for semantic retrieval.
|
|
6
|
+
- Avoid web-search phrasing like "site:" or "latest news."
|
|
7
|
+
- Provide a clear reason for each query.
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
You are a vector search report writer.
|
|
2
|
+
|
|
3
|
+
Instructions:
|
|
4
|
+
- Use only the provided vector search results.
|
|
5
|
+
- Summarize findings without referencing the public web.
|
|
6
|
+
- If sources are requested, describe them as vector store entries (e.g., file names or "vector store chunk").
|
|
7
|
+
- Keep the report focused on information grounded in the retrieved texts.
|
|
@@ -35,7 +35,7 @@ process_files
|
|
|
35
35
|
from __future__ import annotations
|
|
36
36
|
|
|
37
37
|
from .base import ResponseBase
|
|
38
|
-
from .
|
|
38
|
+
from .configuration import ResponseConfiguration, ResponseRegistry, get_default_registry
|
|
39
39
|
from .files import process_files
|
|
40
40
|
from .messages import ResponseMessage, ResponseMessages
|
|
41
41
|
from .runner import run_async, run_streamed, run_sync
|
|
@@ -43,7 +43,7 @@ from openai.types.responses.response_input_text_param import ResponseInputTextPa
|
|
|
43
43
|
from openai.types.responses.response_output_message import ResponseOutputMessage
|
|
44
44
|
|
|
45
45
|
from .messages import ResponseMessage, ResponseMessages
|
|
46
|
-
from ..
|
|
46
|
+
from ..settings import OpenAISettings
|
|
47
47
|
from ..structure import StructureBase
|
|
48
48
|
from ..types import OpenAIClient
|
|
49
49
|
from ..utils import (
|
|
@@ -55,7 +55,7 @@ from ..utils import (
|
|
|
55
55
|
)
|
|
56
56
|
|
|
57
57
|
if TYPE_CHECKING: # pragma: no cover - only for typing hints
|
|
58
|
-
from openai_sdk_helpers.streamlit_app.
|
|
58
|
+
from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
|
|
59
59
|
|
|
60
60
|
T = TypeVar("T", bound=StructureBase)
|
|
61
61
|
ToolHandler = Callable[[ResponseFunctionToolCall], str | Any]
|
|
@@ -814,14 +814,14 @@ class ResponseBase(Generic[T]):
|
|
|
814
814
|
|
|
815
815
|
Examples
|
|
816
816
|
--------
|
|
817
|
-
>>>
|
|
817
|
+
>>> configuration = MyResponse.build_streamlit_config(
|
|
818
818
|
... display_title="My Assistant",
|
|
819
819
|
... description="A helpful AI assistant",
|
|
820
820
|
... system_vector_store=["docs", "kb"],
|
|
821
821
|
... model="gpt-4"
|
|
822
822
|
... )
|
|
823
823
|
"""
|
|
824
|
-
from openai_sdk_helpers.streamlit_app.
|
|
824
|
+
from openai_sdk_helpers.streamlit_app.configuration import StreamlitAppConfig
|
|
825
825
|
|
|
826
826
|
normalized_stores = None
|
|
827
827
|
if system_vector_store is not None:
|
|
@@ -6,9 +6,9 @@ from dataclasses import dataclass
|
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
from typing import Generic, Optional, Sequence, Type, TypeVar
|
|
8
8
|
|
|
9
|
-
from ..
|
|
9
|
+
from ..settings import OpenAISettings
|
|
10
10
|
from ..structure.base import StructureBase
|
|
11
|
-
from
|
|
11
|
+
from .base import ResponseBase, ToolHandler
|
|
12
12
|
from ..utils.json.data_class import DataclassJSONSerializable
|
|
13
13
|
from ..utils.registry import RegistryBase
|
|
14
14
|
from ..utils.instructions import resolve_instructions_from_path
|
|
@@ -26,14 +26,14 @@ class ResponseRegistry(RegistryBase["ResponseConfiguration"]):
|
|
|
26
26
|
Examples
|
|
27
27
|
--------
|
|
28
28
|
>>> registry = ResponseRegistry()
|
|
29
|
-
>>>
|
|
29
|
+
>>> configuration = ResponseConfiguration(
|
|
30
30
|
... name="test",
|
|
31
31
|
... instructions="Test instructions",
|
|
32
32
|
... tools=None,
|
|
33
33
|
... input_structure=None,
|
|
34
34
|
... output_structure=None
|
|
35
35
|
... )
|
|
36
|
-
>>> registry.register(
|
|
36
|
+
>>> registry.register(configuration)
|
|
37
37
|
>>> retrieved = registry.get("test")
|
|
38
38
|
>>> retrieved.name
|
|
39
39
|
'test'
|
|
@@ -53,8 +53,8 @@ def get_default_registry() -> ResponseRegistry:
|
|
|
53
53
|
Examples
|
|
54
54
|
--------
|
|
55
55
|
>>> registry = get_default_registry()
|
|
56
|
-
>>>
|
|
57
|
-
>>> registry.register(
|
|
56
|
+
>>> configuration = ResponseConfiguration(...)
|
|
57
|
+
>>> registry.register(configuration)
|
|
58
58
|
"""
|
|
59
59
|
return _default_registry
|
|
60
60
|
|
|
@@ -119,13 +119,13 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
|
|
|
119
119
|
|
|
120
120
|
Examples
|
|
121
121
|
--------
|
|
122
|
-
>>>
|
|
122
|
+
>>> configuration = Configuration(
|
|
123
123
|
... name="targeting_to_plan",
|
|
124
124
|
... tools=None,
|
|
125
125
|
... input_structure=PromptStructure,
|
|
126
126
|
... output_structure=WebSearchStructure,
|
|
127
127
|
... )
|
|
128
|
-
>>>
|
|
128
|
+
>>> configuration.name
|
|
129
129
|
'prompt_to_websearch'
|
|
130
130
|
"""
|
|
131
131
|
|
|
@@ -135,7 +135,7 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
|
|
|
135
135
|
input_structure: Optional[Type[TIn]]
|
|
136
136
|
output_structure: Optional[Type[TOut]]
|
|
137
137
|
system_vector_store: Optional[list[str]] = None
|
|
138
|
-
add_output_instructions: bool =
|
|
138
|
+
add_output_instructions: bool = False
|
|
139
139
|
add_web_search_tool: bool = False
|
|
140
140
|
|
|
141
141
|
def __post_init__(self) -> None:
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Planner response configuration."""
|
|
2
|
+
|
|
3
|
+
from ..structure.plan.plan import PlanStructure
|
|
4
|
+
from .configuration import ResponseConfiguration
|
|
5
|
+
|
|
6
|
+
PLANNER = ResponseConfiguration(
|
|
7
|
+
name="planner",
|
|
8
|
+
instructions="Generates structured prompts based on user input.",
|
|
9
|
+
tools=None,
|
|
10
|
+
input_structure=None,
|
|
11
|
+
output_structure=PlanStructure,
|
|
12
|
+
)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Prompter response configuration."""
|
|
2
|
+
|
|
3
|
+
from .configuration import ResponseConfiguration
|
|
4
|
+
from ..structure.prompt import PromptStructure
|
|
5
|
+
|
|
6
|
+
PROMPTER = ResponseConfiguration(
|
|
7
|
+
name="prompter",
|
|
8
|
+
instructions="Generates structured prompts based on user input.",
|
|
9
|
+
tools=None,
|
|
10
|
+
input_structure=None,
|
|
11
|
+
output_structure=PromptStructure,
|
|
12
|
+
)
|
|
@@ -28,7 +28,6 @@ from openai_sdk_helpers.utils import (
|
|
|
28
28
|
coerce_jsonable,
|
|
29
29
|
customJSONEncoder,
|
|
30
30
|
ensure_list,
|
|
31
|
-
log,
|
|
32
31
|
)
|
|
33
32
|
|
|
34
33
|
# Supported file extensions for OpenAI Assistants file search and vision
|
|
@@ -226,7 +225,7 @@ def _build_raw_output(result: Any, response: ResponseBase[Any]) -> dict[str, Any
|
|
|
226
225
|
}
|
|
227
226
|
|
|
228
227
|
|
|
229
|
-
def _get_response_instance(
|
|
228
|
+
def _get_response_instance(configuration: StreamlitAppConfig) -> ResponseBase[Any]:
|
|
230
229
|
"""Instantiate and cache the configured ResponseBase.
|
|
231
230
|
|
|
232
231
|
Creates a new response instance from the configuration if not already
|
|
@@ -235,7 +234,7 @@ def _get_response_instance(config: StreamlitAppConfig) -> ResponseBase[Any]:
|
|
|
235
234
|
|
|
236
235
|
Parameters
|
|
237
236
|
----------
|
|
238
|
-
|
|
237
|
+
configuration : StreamlitAppConfig
|
|
239
238
|
Loaded configuration with response handler definition.
|
|
240
239
|
|
|
241
240
|
Returns
|
|
@@ -258,13 +257,13 @@ def _get_response_instance(config: StreamlitAppConfig) -> ResponseBase[Any]:
|
|
|
258
257
|
if isinstance(cached, ResponseBase):
|
|
259
258
|
return cached
|
|
260
259
|
|
|
261
|
-
response =
|
|
260
|
+
response = configuration.create_response()
|
|
262
261
|
|
|
263
|
-
if
|
|
262
|
+
if configuration.preserve_vector_stores:
|
|
264
263
|
setattr(response, "_cleanup_system_vector_storage", False)
|
|
265
264
|
setattr(response, "_cleanup_user_vector_storage", False)
|
|
266
265
|
|
|
267
|
-
vector_stores =
|
|
266
|
+
vector_stores = configuration.normalized_vector_stores()
|
|
268
267
|
if vector_stores:
|
|
269
268
|
attach_vector_store(response=response, vector_stores=vector_stores)
|
|
270
269
|
|
|
@@ -357,7 +356,7 @@ def _render_chat_history() -> None:
|
|
|
357
356
|
|
|
358
357
|
def _handle_user_message(
|
|
359
358
|
prompt: str,
|
|
360
|
-
|
|
359
|
+
configuration: StreamlitAppConfig,
|
|
361
360
|
attachment_paths: list[str] | None = None,
|
|
362
361
|
attachment_names: list[str] | None = None,
|
|
363
362
|
) -> None:
|
|
@@ -371,7 +370,7 @@ def _handle_user_message(
|
|
|
371
370
|
----------
|
|
372
371
|
prompt : str
|
|
373
372
|
User-entered text to send to the assistant.
|
|
374
|
-
|
|
373
|
+
configuration : StreamlitAppConfig
|
|
375
374
|
Loaded configuration with response handler definition.
|
|
376
375
|
attachment_paths : list[str] or None, default None
|
|
377
376
|
Optional list of file paths to attach to the message.
|
|
@@ -395,7 +394,7 @@ def _handle_user_message(
|
|
|
395
394
|
{"role": "user", "content": prompt, "attachments": display_names}
|
|
396
395
|
)
|
|
397
396
|
try:
|
|
398
|
-
response = _get_response_instance(
|
|
397
|
+
response = _get_response_instance(configuration)
|
|
399
398
|
except Exception as exc: # pragma: no cover - surfaced in UI
|
|
400
399
|
st.error(f"Failed to start response session: {exc}")
|
|
401
400
|
return
|
|
@@ -442,15 +441,15 @@ def main(config_path: Path) -> None:
|
|
|
442
441
|
>>> from pathlib import Path
|
|
443
442
|
>>> main(Path("./my_config.py"))
|
|
444
443
|
"""
|
|
445
|
-
|
|
446
|
-
st.set_page_config(page_title=
|
|
444
|
+
configuration = _load_configuration(config_path)
|
|
445
|
+
st.set_page_config(page_title=configuration.display_title, layout="wide")
|
|
447
446
|
_init_session_state()
|
|
448
447
|
|
|
449
|
-
st.title(
|
|
450
|
-
if
|
|
451
|
-
st.caption(
|
|
452
|
-
if
|
|
453
|
-
st.caption(f"Model: {
|
|
448
|
+
st.title(configuration.display_title)
|
|
449
|
+
if configuration.description:
|
|
450
|
+
st.caption(configuration.description)
|
|
451
|
+
if configuration.model:
|
|
452
|
+
st.caption(f"Model: {configuration.model}")
|
|
454
453
|
|
|
455
454
|
close_col, _ = st.columns([1, 5])
|
|
456
455
|
with close_col:
|
|
@@ -514,7 +513,7 @@ def main(config_path: Path) -> None:
|
|
|
514
513
|
st.session_state["attachment_names"] = []
|
|
515
514
|
_handle_user_message(
|
|
516
515
|
prompt,
|
|
517
|
-
|
|
516
|
+
configuration,
|
|
518
517
|
attachment_paths or None,
|
|
519
518
|
attachment_display_names or None,
|
|
520
519
|
)
|