openai-sdk-helpers 0.4.0__py3-none-any.whl → 0.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/__init__.py +6 -2
- openai_sdk_helpers/agent/__init__.py +4 -4
- openai_sdk_helpers/agent/base.py +168 -6
- openai_sdk_helpers/agent/config.py +47 -72
- openai_sdk_helpers/agent/search/__init__.py +4 -4
- openai_sdk_helpers/agent/search/vector.py +41 -29
- openai_sdk_helpers/agent/search/web.py +28 -24
- openai_sdk_helpers/environment.py +22 -0
- openai_sdk_helpers/response/config.py +22 -9
- openai_sdk_helpers/response/planner.py +12 -0
- openai_sdk_helpers/response/prompter.py +12 -0
- openai_sdk_helpers/streamlit_app/__init__.py +4 -4
- openai_sdk_helpers/streamlit_app/config.py +44 -33
- openai_sdk_helpers/streamlit_app/streamlit_web_search.py +1 -1
- openai_sdk_helpers/utils/__init__.py +2 -2
- openai_sdk_helpers/utils/json/ref.py +3 -0
- openai_sdk_helpers/utils/registry.py +31 -4
- {openai_sdk_helpers-0.4.0.dist-info → openai_sdk_helpers-0.4.2.dist-info}/METADATA +1 -1
- {openai_sdk_helpers-0.4.0.dist-info → openai_sdk_helpers-0.4.2.dist-info}/RECORD +22 -20
- {openai_sdk_helpers-0.4.0.dist-info → openai_sdk_helpers-0.4.2.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.4.0.dist-info → openai_sdk_helpers-0.4.2.dist-info}/entry_points.txt +0 -0
- {openai_sdk_helpers-0.4.0.dist-info → openai_sdk_helpers-0.4.2.dist-info}/licenses/LICENSE +0 -0
openai_sdk_helpers/__init__.py
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
+
from .environment import get_data_path, get_model
|
|
5
6
|
from .utils.async_utils import run_coroutine_thread_safe, run_coroutine_with_fallback
|
|
6
7
|
from .context_manager import (
|
|
7
8
|
AsyncManagedResource,
|
|
@@ -61,7 +62,7 @@ from .agent import (
|
|
|
61
62
|
SummarizerAgent,
|
|
62
63
|
TranslatorAgent,
|
|
63
64
|
ValidatorAgent,
|
|
64
|
-
|
|
65
|
+
VectorAgentSearch,
|
|
65
66
|
WebAgentSearch,
|
|
66
67
|
)
|
|
67
68
|
from .response import (
|
|
@@ -103,6 +104,9 @@ from .types import (
|
|
|
103
104
|
)
|
|
104
105
|
|
|
105
106
|
__all__ = [
|
|
107
|
+
# Environment utilities
|
|
108
|
+
"get_data_path",
|
|
109
|
+
"get_model",
|
|
106
110
|
# Async utilities
|
|
107
111
|
"run_coroutine_thread_safe",
|
|
108
112
|
"run_coroutine_with_fallback",
|
|
@@ -156,7 +160,7 @@ __all__ = [
|
|
|
156
160
|
"SummarizerAgent",
|
|
157
161
|
"TranslatorAgent",
|
|
158
162
|
"ValidatorAgent",
|
|
159
|
-
"
|
|
163
|
+
"VectorAgentSearch",
|
|
160
164
|
"WebAgentSearch",
|
|
161
165
|
"ExtendedSummaryStructure",
|
|
162
166
|
"WebSearchStructure",
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
from .base import AgentBase
|
|
6
|
-
from .config import AgentConfiguration,
|
|
6
|
+
from .config import AgentConfiguration, AgentRegistry, get_default_registry
|
|
7
7
|
from ..structure.plan.enum import AgentEnum
|
|
8
8
|
from .coordination import CoordinatorAgent
|
|
9
9
|
from .runner import run_sync, run_async, run_streamed
|
|
@@ -12,13 +12,13 @@ from .summarizer import SummarizerAgent
|
|
|
12
12
|
from .translator import TranslatorAgent
|
|
13
13
|
from .validation import ValidatorAgent
|
|
14
14
|
from .utils import run_coroutine_agent_sync
|
|
15
|
-
from .search.vector import
|
|
15
|
+
from .search.vector import VectorAgentSearch
|
|
16
16
|
from .search.web import WebAgentSearch
|
|
17
17
|
|
|
18
18
|
__all__ = [
|
|
19
19
|
"AgentBase",
|
|
20
20
|
"AgentConfiguration",
|
|
21
|
-
"
|
|
21
|
+
"AgentRegistry",
|
|
22
22
|
"get_default_registry",
|
|
23
23
|
"AgentEnum",
|
|
24
24
|
"CoordinatorAgent",
|
|
@@ -32,6 +32,6 @@ __all__ = [
|
|
|
32
32
|
"SummarizerAgent",
|
|
33
33
|
"TranslatorAgent",
|
|
34
34
|
"ValidatorAgent",
|
|
35
|
-
"
|
|
35
|
+
"VectorAgentSearch",
|
|
36
36
|
"WebAgentSearch",
|
|
37
37
|
]
|
openai_sdk_helpers/agent/base.py
CHANGED
|
@@ -2,8 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
+
import json
|
|
5
6
|
from pathlib import Path
|
|
6
|
-
from typing import Any, Dict, Optional, Protocol, cast
|
|
7
|
+
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Protocol, cast
|
|
7
8
|
import uuid
|
|
8
9
|
|
|
9
10
|
from agents import (
|
|
@@ -21,16 +22,23 @@ from jinja2 import Template
|
|
|
21
22
|
|
|
22
23
|
from ..utils.json.data_class import DataclassJSONSerializable
|
|
23
24
|
from ..structure.base import StructureBase
|
|
25
|
+
from ..structure.prompt import PromptStructure
|
|
24
26
|
|
|
25
27
|
from ..utils import (
|
|
26
28
|
check_filepath,
|
|
27
29
|
log,
|
|
28
30
|
)
|
|
29
31
|
|
|
32
|
+
from ..tools import tool_handler_factory
|
|
33
|
+
|
|
30
34
|
from .runner import run_async, run_streamed, run_sync
|
|
31
35
|
|
|
36
|
+
if TYPE_CHECKING:
|
|
37
|
+
from ..config import OpenAISettings
|
|
38
|
+
from ..response.base import ResponseBase, ToolHandler
|
|
39
|
+
|
|
32
40
|
|
|
33
|
-
class
|
|
41
|
+
class AgentConfigurationProtocol(Protocol):
|
|
34
42
|
"""Protocol describing the configuration attributes for AgentBase."""
|
|
35
43
|
|
|
36
44
|
@property
|
|
@@ -181,6 +189,10 @@ class AgentBase(DataclassJSONSerializable):
|
|
|
181
189
|
Return a streaming result for the agent execution.
|
|
182
190
|
as_tool()
|
|
183
191
|
Return the agent as a callable tool.
|
|
192
|
+
as_response_tool()
|
|
193
|
+
Return response tool handler and definition for Responses API use.
|
|
194
|
+
build_response(openai_settings, data_path=None, tool_handlers=None, system_vector_store=None)
|
|
195
|
+
Build a ResponseBase instance based on this agent.
|
|
184
196
|
close()
|
|
185
197
|
Clean up agent resources (can be overridden by subclasses).
|
|
186
198
|
"""
|
|
@@ -188,7 +200,7 @@ class AgentBase(DataclassJSONSerializable):
|
|
|
188
200
|
def __init__(
|
|
189
201
|
self,
|
|
190
202
|
*,
|
|
191
|
-
config:
|
|
203
|
+
config: AgentConfigurationProtocol,
|
|
192
204
|
run_context_wrapper: Optional[RunContextWrapper[Dict[str, Any]]] = None,
|
|
193
205
|
data_path: Path | str | None = None,
|
|
194
206
|
prompt_dir: Optional[Path] = None,
|
|
@@ -198,7 +210,7 @@ class AgentBase(DataclassJSONSerializable):
|
|
|
198
210
|
|
|
199
211
|
Parameters
|
|
200
212
|
----------
|
|
201
|
-
config :
|
|
213
|
+
config : AgentConfigurationProtocol
|
|
202
214
|
Configuration describing this agent.
|
|
203
215
|
run_context_wrapper : RunContextWrapper or None, default=None
|
|
204
216
|
Optional wrapper providing runtime context for prompt rendering.
|
|
@@ -428,7 +440,7 @@ class AgentBase(DataclassJSONSerializable):
|
|
|
428
440
|
"model": self.model,
|
|
429
441
|
}
|
|
430
442
|
if self._output_structure:
|
|
431
|
-
agent_config["
|
|
443
|
+
agent_config["output_type"] = self._output_structure
|
|
432
444
|
if self._tools:
|
|
433
445
|
agent_config["tools"] = self._tools
|
|
434
446
|
if self._model_settings:
|
|
@@ -575,6 +587,156 @@ class AgentBase(DataclassJSONSerializable):
|
|
|
575
587
|
)
|
|
576
588
|
return tool_obj
|
|
577
589
|
|
|
590
|
+
def as_response_tool(
|
|
591
|
+
self,
|
|
592
|
+
*,
|
|
593
|
+
tool_name: str | None = None,
|
|
594
|
+
tool_description: str | None = None,
|
|
595
|
+
) -> tuple[dict[str, Callable[..., Any]], dict[str, Any]]:
|
|
596
|
+
"""Return response tool handler and definition for Responses API use.
|
|
597
|
+
|
|
598
|
+
The returned handler serializes tool output as JSON using
|
|
599
|
+
``tool_handler_factory`` so downstream response flows can rely on a
|
|
600
|
+
consistent payload format.
|
|
601
|
+
|
|
602
|
+
Parameters
|
|
603
|
+
----------
|
|
604
|
+
tool_name : str or None, default=None
|
|
605
|
+
Optional override for the tool name. When None, uses the agent name.
|
|
606
|
+
tool_description : str or None, default=None
|
|
607
|
+
Optional override for the tool description. When None, uses the
|
|
608
|
+
agent description.
|
|
609
|
+
|
|
610
|
+
Returns
|
|
611
|
+
-------
|
|
612
|
+
tuple[dict[str, Callable[..., Any]], dict[str, Any]]
|
|
613
|
+
Tool handler mapping and tool definition for Responses API usage.
|
|
614
|
+
|
|
615
|
+
Examples
|
|
616
|
+
--------
|
|
617
|
+
>>> tool_handler, tool_definition = agent.as_response_tool()
|
|
618
|
+
>>> response = ResponseBase(
|
|
619
|
+
... name="agent_tool",
|
|
620
|
+
... instructions="Use the agent tool when needed.",
|
|
621
|
+
... tools=[tool_definition],
|
|
622
|
+
... output_structure=None,
|
|
623
|
+
... tool_handlers=tool_handler,
|
|
624
|
+
... openai_settings=settings,
|
|
625
|
+
... )
|
|
626
|
+
>>> response.run_sync("Invoke the agent tool") # doctest: +SKIP
|
|
627
|
+
"""
|
|
628
|
+
|
|
629
|
+
def _run_agent(**kwargs: Any) -> Any:
|
|
630
|
+
prompt = kwargs.get("prompt")
|
|
631
|
+
if prompt is None:
|
|
632
|
+
if len(kwargs) == 1:
|
|
633
|
+
prompt = next(iter(kwargs.values()))
|
|
634
|
+
else:
|
|
635
|
+
prompt = json.dumps(kwargs)
|
|
636
|
+
return self.run_sync(str(prompt))
|
|
637
|
+
|
|
638
|
+
name = tool_name or self.name
|
|
639
|
+
description = tool_description or self.description
|
|
640
|
+
input_model = self._input_structure or PromptStructure
|
|
641
|
+
tool_handler = {name: tool_handler_factory(_run_agent, input_model=input_model)}
|
|
642
|
+
tool_definition = {
|
|
643
|
+
"type": "function",
|
|
644
|
+
"name": name,
|
|
645
|
+
"description": description,
|
|
646
|
+
"strict": True,
|
|
647
|
+
"additionalProperties": False,
|
|
648
|
+
"parameters": self._build_response_parameters(),
|
|
649
|
+
}
|
|
650
|
+
return tool_handler, tool_definition
|
|
651
|
+
|
|
652
|
+
def build_response(
|
|
653
|
+
self,
|
|
654
|
+
*,
|
|
655
|
+
openai_settings: OpenAISettings,
|
|
656
|
+
data_path: Path | str | None = None,
|
|
657
|
+
tool_handlers: dict[str, ToolHandler] | None = None,
|
|
658
|
+
system_vector_store: list[str] | None = None,
|
|
659
|
+
) -> ResponseBase[StructureBase]:
|
|
660
|
+
"""Build a ResponseBase instance from this agent configuration.
|
|
661
|
+
|
|
662
|
+
Parameters
|
|
663
|
+
----------
|
|
664
|
+
openai_settings : OpenAISettings
|
|
665
|
+
Authentication and model settings applied to the generated response.
|
|
666
|
+
data_path : Path, str, or None, default None
|
|
667
|
+
Optional path for storing response artifacts. When None, the
|
|
668
|
+
response uses the default data directory.
|
|
669
|
+
tool_handlers : dict[str, ToolHandler] or None, default None
|
|
670
|
+
Optional mapping of tool names to handler callables.
|
|
671
|
+
system_vector_store : list[str] or None, default None
|
|
672
|
+
Optional list of vector store names to attach as system context.
|
|
673
|
+
|
|
674
|
+
Returns
|
|
675
|
+
-------
|
|
676
|
+
ResponseBase[StructureBase]
|
|
677
|
+
ResponseBase instance configured with this agent's settings.
|
|
678
|
+
|
|
679
|
+
Examples
|
|
680
|
+
--------
|
|
681
|
+
>>> from openai_sdk_helpers import OpenAISettings
|
|
682
|
+
>>> response = agent.build_response(openai_settings=OpenAISettings.from_env())
|
|
683
|
+
"""
|
|
684
|
+
from ..response.base import ResponseBase, ToolHandler
|
|
685
|
+
from ..config import OpenAISettings
|
|
686
|
+
|
|
687
|
+
if not isinstance(openai_settings, OpenAISettings):
|
|
688
|
+
raise TypeError("openai_settings must be an OpenAISettings instance")
|
|
689
|
+
|
|
690
|
+
tools = self._normalize_response_tools(self.tools)
|
|
691
|
+
|
|
692
|
+
return ResponseBase(
|
|
693
|
+
name=self.name,
|
|
694
|
+
instructions=self.instructions_text,
|
|
695
|
+
tools=tools,
|
|
696
|
+
output_structure=self.output_structure,
|
|
697
|
+
system_vector_store=system_vector_store,
|
|
698
|
+
data_path=data_path,
|
|
699
|
+
tool_handlers=tool_handlers,
|
|
700
|
+
openai_settings=openai_settings,
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
def _build_response_parameters(self) -> dict[str, Any]:
|
|
704
|
+
"""Build the Responses API parameter schema for this agent tool.
|
|
705
|
+
|
|
706
|
+
Returns
|
|
707
|
+
-------
|
|
708
|
+
dict[str, Any]
|
|
709
|
+
JSON schema describing tool input parameters.
|
|
710
|
+
"""
|
|
711
|
+
if self._input_structure is not None:
|
|
712
|
+
return self._input_structure.get_schema()
|
|
713
|
+
return {
|
|
714
|
+
"type": "object",
|
|
715
|
+
"properties": {
|
|
716
|
+
"prompt": {"type": "string", "description": "Prompt text to run."}
|
|
717
|
+
},
|
|
718
|
+
"required": ["prompt"],
|
|
719
|
+
"additionalProperties": False,
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
@staticmethod
|
|
723
|
+
def _normalize_response_tools(tools: Optional[list]) -> Optional[list]:
|
|
724
|
+
"""Normalize tool definitions for the Responses API."""
|
|
725
|
+
if not tools:
|
|
726
|
+
return tools
|
|
727
|
+
|
|
728
|
+
normalized: list[Any] = []
|
|
729
|
+
for tool in tools:
|
|
730
|
+
if hasattr(tool, "to_dict") and callable(tool.to_dict):
|
|
731
|
+
normalized.append(tool.to_dict())
|
|
732
|
+
elif hasattr(tool, "to_openai_tool") and callable(tool.to_openai_tool):
|
|
733
|
+
normalized.append(tool.to_openai_tool())
|
|
734
|
+
elif hasattr(tool, "schema"):
|
|
735
|
+
normalized.append(tool.schema)
|
|
736
|
+
else:
|
|
737
|
+
normalized.append(tool)
|
|
738
|
+
return normalized
|
|
739
|
+
|
|
578
740
|
def __enter__(self) -> AgentBase:
|
|
579
741
|
"""Enter the context manager for resource management.
|
|
580
742
|
|
|
@@ -651,4 +813,4 @@ class AgentBase(DataclassJSONSerializable):
|
|
|
651
813
|
log(f"Saved messages to {target}")
|
|
652
814
|
|
|
653
815
|
|
|
654
|
-
__all__ = ["
|
|
816
|
+
__all__ = ["AgentConfigurationProtocol", "AgentBase"]
|
|
@@ -10,20 +10,20 @@ from agents import Agent, Handoff, InputGuardrail, OutputGuardrail, Session
|
|
|
10
10
|
from agents.model_settings import ModelSettings
|
|
11
11
|
|
|
12
12
|
from ..utils.json.data_class import DataclassJSONSerializable
|
|
13
|
-
from ..utils.registry import
|
|
13
|
+
from ..utils.registry import RegistryBase
|
|
14
14
|
from ..utils.instructions import resolve_instructions_from_path
|
|
15
15
|
from ..structure.base import StructureBase
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
class
|
|
18
|
+
class AgentRegistry(RegistryBase["AgentConfiguration"]):
|
|
19
19
|
"""Registry for managing AgentConfiguration instances.
|
|
20
20
|
|
|
21
|
-
Inherits from
|
|
21
|
+
Inherits from RegistryBase to provide centralized storage and retrieval
|
|
22
22
|
of agent configurations, enabling reusable agent specs across the application.
|
|
23
23
|
|
|
24
24
|
Examples
|
|
25
25
|
--------
|
|
26
|
-
>>> registry =
|
|
26
|
+
>>> registry = AgentRegistry()
|
|
27
27
|
>>> config = AgentConfiguration(
|
|
28
28
|
... name="test_agent",
|
|
29
29
|
... model="gpt-4o-mini",
|
|
@@ -65,7 +65,7 @@ class AgentConfigurationRegistry(BaseRegistry["AgentConfiguration"]):
|
|
|
65
65
|
|
|
66
66
|
Examples
|
|
67
67
|
--------
|
|
68
|
-
>>> registry =
|
|
68
|
+
>>> registry = AgentRegistry()
|
|
69
69
|
>>> count = registry.load_from_directory("./agents")
|
|
70
70
|
>>> print(f"Loaded {count} configurations")
|
|
71
71
|
"""
|
|
@@ -74,12 +74,12 @@ class AgentConfigurationRegistry(BaseRegistry["AgentConfiguration"]):
|
|
|
74
74
|
return super().load_from_directory(path, config_class=config_class)
|
|
75
75
|
|
|
76
76
|
|
|
77
|
-
def get_default_registry() ->
|
|
77
|
+
def get_default_registry() -> AgentRegistry:
|
|
78
78
|
"""Return the global default registry instance.
|
|
79
79
|
|
|
80
80
|
Returns
|
|
81
81
|
-------
|
|
82
|
-
|
|
82
|
+
AgentRegistry
|
|
83
83
|
Singleton registry for application-wide configuration storage.
|
|
84
84
|
|
|
85
85
|
Examples
|
|
@@ -187,9 +187,8 @@ class AgentConfiguration(DataclassJSONSerializable):
|
|
|
187
187
|
input_guardrails: Optional[list[InputGuardrail]] = None
|
|
188
188
|
output_guardrails: Optional[list[OutputGuardrail]] = None
|
|
189
189
|
session: Optional[Session] = None
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
)
|
|
190
|
+
add_output_instructions: bool = False
|
|
191
|
+
add_web_search_tool: bool = False
|
|
193
192
|
|
|
194
193
|
def __post_init__(self) -> None:
|
|
195
194
|
"""Validate configuration invariants after initialization.
|
|
@@ -258,11 +257,16 @@ class AgentConfiguration(DataclassJSONSerializable):
|
|
|
258
257
|
str
|
|
259
258
|
Plain-text instructions, loading template files when necessary.
|
|
260
259
|
"""
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
260
|
+
resolved_instructions: str = resolve_instructions_from_path(self.instructions)
|
|
261
|
+
output_instructions = ""
|
|
262
|
+
if self.output_structure is not None and self.add_output_instructions:
|
|
263
|
+
output_instructions = self.output_structure.get_prompt(
|
|
264
|
+
add_enum_values=False
|
|
265
|
+
)
|
|
266
|
+
if output_instructions:
|
|
267
|
+
return f"{resolved_instructions}\n{output_instructions}"
|
|
268
|
+
|
|
269
|
+
return resolved_instructions
|
|
266
270
|
|
|
267
271
|
def _resolve_instructions(self) -> str:
|
|
268
272
|
"""Resolve instructions from string or file path."""
|
|
@@ -361,69 +365,40 @@ class AgentConfiguration(DataclassJSONSerializable):
|
|
|
361
365
|
|
|
362
366
|
return replace(self, **changes)
|
|
363
367
|
|
|
364
|
-
def
|
|
365
|
-
"""
|
|
366
|
-
|
|
367
|
-
Returns
|
|
368
|
-
-------
|
|
369
|
-
dict[str, Any]
|
|
370
|
-
Serialized configuration data without cached fields.
|
|
371
|
-
"""
|
|
372
|
-
data = DataclassJSONSerializable.to_json(self)
|
|
373
|
-
data.pop("_instructions_cache", None)
|
|
374
|
-
return data
|
|
375
|
-
|
|
376
|
-
@classmethod
|
|
377
|
-
def from_json(cls, data: dict[str, Any]) -> AgentConfiguration:
|
|
378
|
-
"""Create an AgentConfiguration from JSON data.
|
|
368
|
+
def to_response_config(self) -> Any:
|
|
369
|
+
"""Convert this AgentConfiguration to a ResponseConfiguration.
|
|
379
370
|
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
paths back to Path objects for proper file loading.
|
|
383
|
-
|
|
384
|
-
Parameters
|
|
385
|
-
----------
|
|
386
|
-
data : dict[str, Any]
|
|
387
|
-
Dictionary containing the configuration data.
|
|
371
|
+
This is a convenience method for creating a ResponseConfiguration
|
|
372
|
+
instance using the relevant fields from this agent configuration.
|
|
388
373
|
|
|
389
374
|
Returns
|
|
390
375
|
-------
|
|
391
|
-
|
|
392
|
-
New configuration instance.
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
376
|
+
ResponseConfiguration
|
|
377
|
+
New response configuration instance.
|
|
378
|
+
|
|
379
|
+
Examples
|
|
380
|
+
--------
|
|
381
|
+
>>> agent_config = AgentConfiguration(
|
|
382
|
+
... name="responder",
|
|
383
|
+
... model="gpt-4o-mini",
|
|
384
|
+
... instructions="Respond to user queries"
|
|
385
|
+
... )
|
|
386
|
+
>>> response_config = agent_config.to_response_config()
|
|
387
|
+
>>> response_config.name
|
|
388
|
+
'responder'
|
|
400
389
|
"""
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
# Check if it looks like a file path and the file exists
|
|
411
|
-
# This preserves the intended behavior for file-based instructions
|
|
412
|
-
try:
|
|
413
|
-
potential_path = Path(instructions_value)
|
|
414
|
-
# Only convert to Path if it's an existing file
|
|
415
|
-
# This way, plain text instructions stay as strings
|
|
416
|
-
if potential_path.exists() and potential_path.is_file():
|
|
417
|
-
data["instructions"] = potential_path
|
|
418
|
-
except (OSError, ValueError):
|
|
419
|
-
# If path parsing fails, keep it as a string (likely plain text)
|
|
420
|
-
pass
|
|
421
|
-
|
|
422
|
-
# Use the parent class method for the rest
|
|
423
|
-
return super(AgentConfiguration, cls).from_json(data)
|
|
390
|
+
from ..response.config import ResponseConfiguration
|
|
391
|
+
|
|
392
|
+
return ResponseConfiguration(
|
|
393
|
+
name=self.name,
|
|
394
|
+
instructions=self.instructions,
|
|
395
|
+
input_structure=self.input_structure,
|
|
396
|
+
output_structure=self.output_structure,
|
|
397
|
+
tools=self.tools,
|
|
398
|
+
)
|
|
424
399
|
|
|
425
400
|
|
|
426
401
|
# Global default registry instance
|
|
427
|
-
_default_registry =
|
|
402
|
+
_default_registry = AgentRegistry()
|
|
428
403
|
|
|
429
|
-
__all__ = ["AgentConfiguration", "
|
|
404
|
+
__all__ = ["AgentConfiguration", "AgentRegistry", "get_default_registry"]
|
|
@@ -10,10 +10,10 @@ from .web import (
|
|
|
10
10
|
)
|
|
11
11
|
from .vector import (
|
|
12
12
|
MAX_CONCURRENT_SEARCHES as VECTOR_MAX_CONCURRENT_SEARCHES,
|
|
13
|
-
|
|
13
|
+
VectorAgentPlanner,
|
|
14
14
|
VectorSearchTool,
|
|
15
15
|
VectorSearchWriter,
|
|
16
|
-
|
|
16
|
+
VectorAgentSearch,
|
|
17
17
|
)
|
|
18
18
|
|
|
19
19
|
__all__ = [
|
|
@@ -26,8 +26,8 @@ __all__ = [
|
|
|
26
26
|
"WebAgentWriter",
|
|
27
27
|
"WebAgentSearch",
|
|
28
28
|
"VECTOR_MAX_CONCURRENT_SEARCHES",
|
|
29
|
-
"
|
|
29
|
+
"VectorAgentPlanner",
|
|
30
30
|
"VectorSearchTool",
|
|
31
31
|
"VectorSearchWriter",
|
|
32
|
-
"
|
|
32
|
+
"VectorAgentSearch",
|
|
33
33
|
]
|
|
@@ -7,6 +7,7 @@ from typing import Any, Callable, Dict, List, Optional
|
|
|
7
7
|
|
|
8
8
|
from agents import custom_span, gen_trace_id, trace
|
|
9
9
|
|
|
10
|
+
from ...structure.prompt import PromptStructure
|
|
10
11
|
from ...structure.vector_search import (
|
|
11
12
|
VectorSearchItemStructure,
|
|
12
13
|
VectorSearchItemResultStructure,
|
|
@@ -15,6 +16,7 @@ from ...structure.vector_search import (
|
|
|
15
16
|
VectorSearchPlanStructure,
|
|
16
17
|
VectorSearchReportStructure,
|
|
17
18
|
)
|
|
19
|
+
from ...tools import tool_handler_factory
|
|
18
20
|
from ...vector_storage import VectorStorage
|
|
19
21
|
from ..config import AgentConfiguration
|
|
20
22
|
from ..utils import run_coroutine_agent_sync
|
|
@@ -23,7 +25,7 @@ from .base import SearchPlanner, SearchToolAgent, SearchWriter
|
|
|
23
25
|
MAX_CONCURRENT_SEARCHES = 10
|
|
24
26
|
|
|
25
27
|
|
|
26
|
-
class
|
|
28
|
+
class VectorAgentPlanner(SearchPlanner[VectorSearchPlanStructure]):
|
|
27
29
|
"""Plan vector searches to satisfy a user query.
|
|
28
30
|
|
|
29
31
|
Parameters
|
|
@@ -241,7 +243,7 @@ class VectorSearchWriter(SearchWriter[VectorSearchReportStructure]):
|
|
|
241
243
|
)
|
|
242
244
|
|
|
243
245
|
|
|
244
|
-
class
|
|
246
|
+
class VectorAgentSearch:
|
|
245
247
|
"""Manage the complete vector search workflow.
|
|
246
248
|
|
|
247
249
|
This high-level agent orchestrates a multi-step research process that plans
|
|
@@ -292,6 +294,8 @@ class VectorSearch:
|
|
|
292
294
|
Execute the research workflow asynchronously.
|
|
293
295
|
run_agent_sync(search_query)
|
|
294
296
|
Execute the research workflow synchronously.
|
|
297
|
+
as_response_tool(vector_store_name, tool_name, tool_description)
|
|
298
|
+
Build a Responses API tool definition and handler.
|
|
295
299
|
run_vector_agent(search_query)
|
|
296
300
|
Convenience asynchronous entry point for the workflow.
|
|
297
301
|
run_vector_agent_sync(search_query)
|
|
@@ -306,9 +310,9 @@ class VectorSearch:
|
|
|
306
310
|
def __init__(
|
|
307
311
|
self,
|
|
308
312
|
*,
|
|
313
|
+
vector_store_name: str,
|
|
309
314
|
prompt_dir: Optional[Path] = None,
|
|
310
315
|
default_model: Optional[str] = None,
|
|
311
|
-
vector_store_name: Optional[str] = None,
|
|
312
316
|
max_concurrent_searches: int = MAX_CONCURRENT_SEARCHES,
|
|
313
317
|
vector_storage: Optional[VectorStorage] = None,
|
|
314
318
|
vector_storage_factory: Optional[Callable[[str], VectorStorage]] = None,
|
|
@@ -336,7 +340,7 @@ class VectorSearch:
|
|
|
336
340
|
"""
|
|
337
341
|
trace_id = gen_trace_id()
|
|
338
342
|
with trace("VectorSearch trace", trace_id=trace_id):
|
|
339
|
-
planner =
|
|
343
|
+
planner = VectorAgentPlanner(
|
|
340
344
|
prompt_dir=self._prompt_dir, default_model=self._default_model
|
|
341
345
|
)
|
|
342
346
|
tool = VectorSearchTool(
|
|
@@ -383,45 +387,53 @@ class VectorSearch:
|
|
|
383
387
|
"""
|
|
384
388
|
return run_coroutine_agent_sync(self.run_agent(search_query))
|
|
385
389
|
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
390
|
+
def as_response_tool(
|
|
391
|
+
self,
|
|
392
|
+
*,
|
|
393
|
+
tool_name: str = "vector_search",
|
|
394
|
+
tool_description: str = "Run the vector search workflow.",
|
|
395
|
+
) -> tuple[dict[str, Callable[..., Any]], dict[str, Any]]:
|
|
396
|
+
"""Return a Responses API tool handler and definition.
|
|
389
397
|
|
|
390
398
|
Parameters
|
|
391
399
|
----------
|
|
392
|
-
|
|
393
|
-
|
|
400
|
+
vector_store_name : str
|
|
401
|
+
Name of the vector store to use for the response tool.
|
|
402
|
+
tool_name : str, default="vector_search"
|
|
403
|
+
Name to use for the response tool.
|
|
404
|
+
tool_description : str, default="Run the vector search workflow."
|
|
405
|
+
Description for the response tool.
|
|
394
406
|
|
|
395
407
|
Returns
|
|
396
408
|
-------
|
|
397
|
-
|
|
398
|
-
|
|
409
|
+
tuple[dict[str, Callable[..., Any]], dict[str, Any]]
|
|
410
|
+
Tool handler mapping and tool definition for Responses API usage.
|
|
399
411
|
"""
|
|
400
|
-
|
|
412
|
+
search = VectorAgentSearch(
|
|
413
|
+
prompt_dir=self._prompt_dir,
|
|
414
|
+
default_model=self._default_model,
|
|
415
|
+
vector_store_name=self._vector_store_name,
|
|
416
|
+
max_concurrent_searches=self._max_concurrent_searches,
|
|
417
|
+
vector_storage=self._vector_storage,
|
|
418
|
+
vector_storage_factory=self._vector_storage_factory,
|
|
419
|
+
)
|
|
401
420
|
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
"""Run :meth:`run_vector_agent` synchronously for ``search_query``.
|
|
421
|
+
def _run_search(prompt: str) -> VectorSearchStructure:
|
|
422
|
+
return run_coroutine_agent_sync(search.run_agent(search_query=prompt))
|
|
405
423
|
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
Returns
|
|
412
|
-
-------
|
|
413
|
-
VectorSearchStructure
|
|
414
|
-
Completed research output.
|
|
415
|
-
"""
|
|
416
|
-
return run_coroutine_agent_sync(
|
|
417
|
-
VectorSearch.run_vector_agent(search_query=search_query)
|
|
424
|
+
tool_handler = {
|
|
425
|
+
tool_name: tool_handler_factory(_run_search, input_model=PromptStructure)
|
|
426
|
+
}
|
|
427
|
+
tool_definition = PromptStructure.response_tool_definition(
|
|
428
|
+
tool_name, tool_description=tool_description
|
|
418
429
|
)
|
|
430
|
+
return tool_handler, tool_definition
|
|
419
431
|
|
|
420
432
|
|
|
421
433
|
__all__ = [
|
|
422
434
|
"MAX_CONCURRENT_SEARCHES",
|
|
423
|
-
"
|
|
435
|
+
"VectorAgentPlanner",
|
|
424
436
|
"VectorSearchTool",
|
|
425
437
|
"VectorSearchWriter",
|
|
426
|
-
"
|
|
438
|
+
"VectorAgentSearch",
|
|
427
439
|
]
|
|
@@ -3,12 +3,13 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
from pathlib import Path
|
|
6
|
-
from typing import Any, Dict, List, Optional, Union
|
|
6
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
|
7
7
|
|
|
8
8
|
from agents import custom_span, gen_trace_id, trace
|
|
9
9
|
from agents.model_settings import ModelSettings
|
|
10
10
|
from agents.tool import WebSearchTool
|
|
11
11
|
|
|
12
|
+
from ...structure.prompt import PromptStructure
|
|
12
13
|
from ...structure.web_search import (
|
|
13
14
|
WebSearchItemStructure,
|
|
14
15
|
WebSearchItemResultStructure,
|
|
@@ -16,6 +17,7 @@ from ...structure.web_search import (
|
|
|
16
17
|
WebSearchPlanStructure,
|
|
17
18
|
WebSearchReportStructure,
|
|
18
19
|
)
|
|
20
|
+
from ...tools import tool_handler_factory
|
|
19
21
|
from ..config import AgentConfiguration
|
|
20
22
|
from ..utils import run_coroutine_agent_sync
|
|
21
23
|
from .base import SearchPlanner, SearchToolAgent, SearchWriter
|
|
@@ -242,6 +244,8 @@ class WebAgentSearch:
|
|
|
242
244
|
Execute the research workflow asynchronously.
|
|
243
245
|
run_agent_sync(search_query)
|
|
244
246
|
Execute the research workflow synchronously.
|
|
247
|
+
as_response_tool(tool_name, tool_description)
|
|
248
|
+
Build a Responses API tool definition and handler.
|
|
245
249
|
run_web_agent_async(search_query)
|
|
246
250
|
Convenience asynchronous entry point for the workflow.
|
|
247
251
|
run_web_agent_sync(search_query)
|
|
@@ -301,7 +305,7 @@ class WebAgentSearch:
|
|
|
301
305
|
)
|
|
302
306
|
|
|
303
307
|
def run_agent_sync(self, search_query: str) -> WebSearchStructure:
|
|
304
|
-
"""
|
|
308
|
+
"""Execute the entire research workflow for ``search_query`` synchronously.
|
|
305
309
|
|
|
306
310
|
Parameters
|
|
307
311
|
----------
|
|
@@ -312,41 +316,41 @@ class WebAgentSearch:
|
|
|
312
316
|
-------
|
|
313
317
|
WebSearchStructure
|
|
314
318
|
Completed research output.
|
|
319
|
+
|
|
315
320
|
"""
|
|
316
321
|
return run_coroutine_agent_sync(self.run_agent_async(search_query))
|
|
317
322
|
|
|
318
|
-
|
|
319
|
-
|
|
323
|
+
def as_response_tool(
|
|
324
|
+
self,
|
|
325
|
+
*,
|
|
326
|
+
tool_name: str = "web_search",
|
|
327
|
+
tool_description: str = "Run the web search workflow.",
|
|
328
|
+
) -> tuple[dict[str, Callable[..., Any]], dict[str, Any]]:
|
|
329
|
+
"""Return a Responses API tool handler and definition.
|
|
320
330
|
|
|
321
331
|
Parameters
|
|
322
332
|
----------
|
|
323
|
-
|
|
324
|
-
|
|
333
|
+
tool_name : str, default="web_search"
|
|
334
|
+
Name to use for the response tool.
|
|
335
|
+
tool_description : str, default="Run the web search workflow."
|
|
336
|
+
Description for the response tool.
|
|
325
337
|
|
|
326
338
|
Returns
|
|
327
339
|
-------
|
|
328
|
-
|
|
329
|
-
|
|
340
|
+
tuple[dict[str, Callable[..., Any]], dict[str, Any]]
|
|
341
|
+
Tool handler mapping and tool definition for Responses API usage.
|
|
330
342
|
"""
|
|
331
|
-
return await self.run_agent_async(search_query=search_query)
|
|
332
343
|
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
"""Run :meth:`run_web_agent_async` synchronously for ``search_query``.
|
|
344
|
+
def _run_search(prompt: str) -> WebSearchStructure:
|
|
345
|
+
return run_coroutine_agent_sync(self.run_agent_async(search_query=prompt))
|
|
336
346
|
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
Returns
|
|
343
|
-
-------
|
|
344
|
-
WebSearchStructure
|
|
345
|
-
Completed research output.
|
|
346
|
-
"""
|
|
347
|
-
return run_coroutine_agent_sync(
|
|
348
|
-
WebAgentSearch().run_web_agent_async(search_query=search_query)
|
|
347
|
+
tool_handler = {
|
|
348
|
+
tool_name: tool_handler_factory(_run_search, input_model=PromptStructure)
|
|
349
|
+
}
|
|
350
|
+
tool_definition = PromptStructure.response_tool_definition(
|
|
351
|
+
tool_name, tool_description=tool_description
|
|
349
352
|
)
|
|
353
|
+
return tool_handler, tool_definition
|
|
350
354
|
|
|
351
355
|
|
|
352
356
|
__all__ = [
|
|
@@ -18,10 +18,15 @@ get_data_path(name)
|
|
|
18
18
|
|
|
19
19
|
from __future__ import annotations
|
|
20
20
|
|
|
21
|
+
import os
|
|
22
|
+
import os
|
|
21
23
|
from pathlib import Path
|
|
24
|
+
from dotenv import load_dotenv
|
|
25
|
+
|
|
22
26
|
|
|
23
27
|
from openai_sdk_helpers.utils import ensure_directory
|
|
24
28
|
|
|
29
|
+
load_dotenv()
|
|
25
30
|
DATETIME_FMT = "%Y%m%d_%H%M%S"
|
|
26
31
|
DEFAULT_MODEL = "gpt-4o-mini"
|
|
27
32
|
|
|
@@ -54,3 +59,20 @@ def get_data_path(name: str) -> Path:
|
|
|
54
59
|
base = Path(__file__).parent.parent.parent / "data"
|
|
55
60
|
path = base / name
|
|
56
61
|
return ensure_directory(path)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def get_model() -> str:
|
|
65
|
+
"""Return the default model identifier.
|
|
66
|
+
|
|
67
|
+
Returns
|
|
68
|
+
-------
|
|
69
|
+
str
|
|
70
|
+
Default OpenAI model identifier.
|
|
71
|
+
|
|
72
|
+
Examples
|
|
73
|
+
--------
|
|
74
|
+
>>> from openai_sdk_helpers.environment import _get_default_model
|
|
75
|
+
>>> _get_default_model()
|
|
76
|
+
'gpt-4o-mini'
|
|
77
|
+
"""
|
|
78
|
+
return os.getenv("DEFAULT_MODEL", DEFAULT_MODEL)
|
|
@@ -10,17 +10,17 @@ from ..config import OpenAISettings
|
|
|
10
10
|
from ..structure.base import StructureBase
|
|
11
11
|
from ..response.base import ResponseBase, ToolHandler
|
|
12
12
|
from ..utils.json.data_class import DataclassJSONSerializable
|
|
13
|
-
from ..utils.registry import
|
|
13
|
+
from ..utils.registry import RegistryBase
|
|
14
14
|
from ..utils.instructions import resolve_instructions_from_path
|
|
15
15
|
|
|
16
16
|
TIn = TypeVar("TIn", bound="StructureBase")
|
|
17
17
|
TOut = TypeVar("TOut", bound="StructureBase")
|
|
18
18
|
|
|
19
19
|
|
|
20
|
-
class ResponseRegistry(
|
|
20
|
+
class ResponseRegistry(RegistryBase["ResponseConfiguration"]):
|
|
21
21
|
"""Registry for managing ResponseConfiguration instances.
|
|
22
22
|
|
|
23
|
-
Inherits from
|
|
23
|
+
Inherits from RegistryBase to provide centralized storage and retrieval
|
|
24
24
|
of response configurations, enabling reusable response specs across the application.
|
|
25
25
|
|
|
26
26
|
Examples
|
|
@@ -108,8 +108,6 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
|
|
|
108
108
|
-------
|
|
109
109
|
__post_init__()
|
|
110
110
|
Validate configuration invariants and enforce StructureBase subclassing.
|
|
111
|
-
instructions_text
|
|
112
|
-
Return the resolved instruction content as a string.
|
|
113
111
|
to_json()
|
|
114
112
|
Return a JSON-compatible dict representation (inherited from JSONSerializable).
|
|
115
113
|
to_json_file(filepath)
|
|
@@ -137,7 +135,8 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
|
|
|
137
135
|
input_structure: Optional[Type[TIn]]
|
|
138
136
|
output_structure: Optional[Type[TOut]]
|
|
139
137
|
system_vector_store: Optional[list[str]] = None
|
|
140
|
-
add_output_instructions: bool =
|
|
138
|
+
add_output_instructions: bool = False
|
|
139
|
+
add_web_search_tool: bool = False
|
|
141
140
|
|
|
142
141
|
def __post_init__(self) -> None:
|
|
143
142
|
"""
|
|
@@ -185,7 +184,7 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
|
|
|
185
184
|
raise TypeError("Configuration.tools must be a Sequence or None")
|
|
186
185
|
|
|
187
186
|
@property
|
|
188
|
-
def
|
|
187
|
+
def get_resolved_instructions(self) -> str:
|
|
189
188
|
"""Return the resolved instruction text.
|
|
190
189
|
|
|
191
190
|
Returns
|
|
@@ -204,6 +203,20 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
|
|
|
204
203
|
|
|
205
204
|
return resolved_instructions
|
|
206
205
|
|
|
206
|
+
@property
|
|
207
|
+
def get_resolved_tools(self) -> list:
|
|
208
|
+
"""Return the complete list of tools, including optional web search tool.
|
|
209
|
+
|
|
210
|
+
Returns
|
|
211
|
+
-------
|
|
212
|
+
list
|
|
213
|
+
List of tool definitions, including web search tool if enabled.
|
|
214
|
+
"""
|
|
215
|
+
tools = self.tools or []
|
|
216
|
+
if self.add_web_search_tool:
|
|
217
|
+
tools = tools + [{"type": "web_search"}]
|
|
218
|
+
return tools
|
|
219
|
+
|
|
207
220
|
def gen_response(
|
|
208
221
|
self,
|
|
209
222
|
*,
|
|
@@ -229,8 +242,8 @@ class ResponseConfiguration(DataclassJSONSerializable, Generic[TIn, TOut]):
|
|
|
229
242
|
"""
|
|
230
243
|
return ResponseBase[TOut](
|
|
231
244
|
name=self.name,
|
|
232
|
-
instructions=self.
|
|
233
|
-
tools=self.
|
|
245
|
+
instructions=self.get_resolved_instructions,
|
|
246
|
+
tools=self.get_resolved_tools,
|
|
234
247
|
output_structure=self.output_structure,
|
|
235
248
|
system_vector_store=self.system_vector_store,
|
|
236
249
|
data_path=data_path,
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Planner response configuration."""
|
|
2
|
+
|
|
3
|
+
from ..structure.plan.plan import PlanStructure
|
|
4
|
+
from .config import ResponseConfiguration
|
|
5
|
+
|
|
6
|
+
PLANNER = ResponseConfiguration(
|
|
7
|
+
name="planner",
|
|
8
|
+
instructions="Generates structured prompts based on user input.",
|
|
9
|
+
tools=None,
|
|
10
|
+
input_structure=None,
|
|
11
|
+
output_structure=PlanStructure,
|
|
12
|
+
)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Prompter response configuration."""
|
|
2
|
+
|
|
3
|
+
from .config import ResponseConfiguration
|
|
4
|
+
from ..structure.prompt import PromptStructure
|
|
5
|
+
|
|
6
|
+
PROMPTER = ResponseConfiguration(
|
|
7
|
+
name="prompter",
|
|
8
|
+
instructions="Generates structured prompts based on user input.",
|
|
9
|
+
tools=None,
|
|
10
|
+
input_structure=None,
|
|
11
|
+
output_structure=PromptStructure,
|
|
12
|
+
)
|
|
@@ -8,23 +8,23 @@ Classes
|
|
|
8
8
|
-------
|
|
9
9
|
StreamlitAppConfig
|
|
10
10
|
Validated configuration for Streamlit chat applications.
|
|
11
|
+
StreamlitAppRegistry
|
|
12
|
+
Registry for storing Streamlit app configurations.
|
|
11
13
|
|
|
12
14
|
Functions
|
|
13
15
|
---------
|
|
14
|
-
load_app_config
|
|
15
|
-
Load and validate configuration from a Python module.
|
|
16
16
|
_load_configuration
|
|
17
17
|
Load configuration with user-friendly error handling for Streamlit UI.
|
|
18
18
|
"""
|
|
19
19
|
|
|
20
20
|
from .config import (
|
|
21
21
|
StreamlitAppConfig,
|
|
22
|
+
StreamlitAppRegistry,
|
|
22
23
|
_load_configuration,
|
|
23
|
-
load_app_config,
|
|
24
24
|
)
|
|
25
25
|
|
|
26
26
|
__all__ = [
|
|
27
27
|
"StreamlitAppConfig",
|
|
28
|
+
"StreamlitAppRegistry",
|
|
28
29
|
"_load_configuration",
|
|
29
|
-
"load_app_config",
|
|
30
30
|
]
|
|
@@ -15,7 +15,7 @@ from pydantic import ConfigDict, Field, field_validator, model_validator
|
|
|
15
15
|
|
|
16
16
|
from openai_sdk_helpers.response.base import ResponseBase
|
|
17
17
|
from openai_sdk_helpers.structure.base import StructureBase
|
|
18
|
-
from openai_sdk_helpers.utils import ensure_list
|
|
18
|
+
from openai_sdk_helpers.utils import RegistryBase, ensure_list
|
|
19
19
|
from ..utils.json import BaseModelJSONSerializable
|
|
20
20
|
|
|
21
21
|
|
|
@@ -29,6 +29,8 @@ class StreamlitAppConfig(BaseModelJSONSerializable):
|
|
|
29
29
|
|
|
30
30
|
Attributes
|
|
31
31
|
----------
|
|
32
|
+
name : str
|
|
33
|
+
Unique configuration identifier. Default is ``"streamlit_app"``.
|
|
32
34
|
response : ResponseBase, type[ResponseBase], Callable, or None
|
|
33
35
|
Response handler as an instance, class, or callable factory.
|
|
34
36
|
display_title : str
|
|
@@ -48,8 +50,6 @@ class StreamlitAppConfig(BaseModelJSONSerializable):
|
|
|
48
50
|
Return configured system vector stores as a list.
|
|
49
51
|
create_response()
|
|
50
52
|
Instantiate and return the configured ResponseBase.
|
|
51
|
-
load_app_config(config_path)
|
|
52
|
-
Load, validate, and return configuration from a Python module.
|
|
53
53
|
|
|
54
54
|
Examples
|
|
55
55
|
--------
|
|
@@ -63,6 +63,10 @@ class StreamlitAppConfig(BaseModelJSONSerializable):
|
|
|
63
63
|
|
|
64
64
|
model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True)
|
|
65
65
|
|
|
66
|
+
name: str = Field(
|
|
67
|
+
default="streamlit_app",
|
|
68
|
+
description="Unique configuration identifier used for registry lookup.",
|
|
69
|
+
)
|
|
66
70
|
response: ResponseBase[StructureBase] | type[ResponseBase] | Callable | None = (
|
|
67
71
|
Field(
|
|
68
72
|
default=None,
|
|
@@ -225,6 +229,39 @@ class StreamlitAppConfig(BaseModelJSONSerializable):
|
|
|
225
229
|
"""
|
|
226
230
|
return _instantiate_response(self.response)
|
|
227
231
|
|
|
232
|
+
|
|
233
|
+
class StreamlitAppRegistry(RegistryBase[StreamlitAppConfig]):
|
|
234
|
+
"""Registry for managing StreamlitAppConfig instances.
|
|
235
|
+
|
|
236
|
+
Inherits from RegistryBase to provide centralized storage and retrieval
|
|
237
|
+
of Streamlit app configurations, enabling reuse across applications.
|
|
238
|
+
|
|
239
|
+
Methods
|
|
240
|
+
-------
|
|
241
|
+
register(config)
|
|
242
|
+
Add a configuration to the registry.
|
|
243
|
+
get(name)
|
|
244
|
+
Retrieve a configuration by name.
|
|
245
|
+
list_names()
|
|
246
|
+
Return all registered configuration names.
|
|
247
|
+
clear()
|
|
248
|
+
Remove all registered configurations.
|
|
249
|
+
save_to_directory(path)
|
|
250
|
+
Export all registered configurations to JSON files.
|
|
251
|
+
load_from_directory(path)
|
|
252
|
+
Load configurations from JSON files in a directory.
|
|
253
|
+
load_app_config(config_path)
|
|
254
|
+
Load, validate, and return configuration from a Python module.
|
|
255
|
+
|
|
256
|
+
Examples
|
|
257
|
+
--------
|
|
258
|
+
>>> registry = StreamlitAppRegistry()
|
|
259
|
+
>>> config = StreamlitAppConfig(response=MyResponse)
|
|
260
|
+
>>> registry.register(config)
|
|
261
|
+
>>> registry.get(config.name)
|
|
262
|
+
StreamlitAppConfig(...)
|
|
263
|
+
"""
|
|
264
|
+
|
|
228
265
|
@staticmethod
|
|
229
266
|
def load_app_config(
|
|
230
267
|
config_path: Path,
|
|
@@ -258,7 +295,7 @@ class StreamlitAppConfig(BaseModelJSONSerializable):
|
|
|
258
295
|
Examples
|
|
259
296
|
--------
|
|
260
297
|
>>> from pathlib import Path
|
|
261
|
-
>>> config =
|
|
298
|
+
>>> config = StreamlitAppRegistry.load_app_config(
|
|
262
299
|
... Path("./my_config.py")
|
|
263
300
|
... )
|
|
264
301
|
"""
|
|
@@ -433,36 +470,10 @@ def _config_from_mapping(raw_config: dict) -> StreamlitAppConfig:
|
|
|
433
470
|
return StreamlitAppConfig(**config_kwargs)
|
|
434
471
|
|
|
435
472
|
|
|
436
|
-
def load_app_config(
|
|
437
|
-
config_path: Path,
|
|
438
|
-
) -> StreamlitAppConfig:
|
|
439
|
-
"""Load and validate Streamlit configuration from a Python module.
|
|
440
|
-
|
|
441
|
-
Convenience function that proxies to StreamlitAppConfig.load_app_config
|
|
442
|
-
for backward compatibility.
|
|
443
|
-
|
|
444
|
-
Parameters
|
|
445
|
-
----------
|
|
446
|
-
config_path : Path
|
|
447
|
-
Filesystem path to the configuration module.
|
|
448
|
-
|
|
449
|
-
Returns
|
|
450
|
-
-------
|
|
451
|
-
StreamlitAppConfig
|
|
452
|
-
Validated configuration loaded from the module.
|
|
453
|
-
|
|
454
|
-
Examples
|
|
455
|
-
--------
|
|
456
|
-
>>> from pathlib import Path
|
|
457
|
-
>>> config = load_app_config(Path("./my_config.py"))
|
|
458
|
-
"""
|
|
459
|
-
return StreamlitAppConfig.load_app_config(config_path=config_path)
|
|
460
|
-
|
|
461
|
-
|
|
462
473
|
def _load_configuration(config_path: Path) -> StreamlitAppConfig:
|
|
463
474
|
"""Load configuration with user-friendly error handling for Streamlit.
|
|
464
475
|
|
|
465
|
-
Wraps
|
|
476
|
+
Wraps StreamlitAppRegistry.load_app_config with exception handling that
|
|
466
477
|
displays errors in the Streamlit UI and halts execution gracefully.
|
|
467
478
|
|
|
468
479
|
Parameters
|
|
@@ -487,7 +498,7 @@ def _load_configuration(config_path: Path) -> StreamlitAppConfig:
|
|
|
487
498
|
than raising exceptions that crash the app.
|
|
488
499
|
"""
|
|
489
500
|
try:
|
|
490
|
-
return
|
|
501
|
+
return StreamlitAppRegistry.load_app_config(config_path=config_path)
|
|
491
502
|
except Exception as exc: # pragma: no cover - surfaced in UI
|
|
492
503
|
import streamlit as st # type: ignore[import-not-found]
|
|
493
504
|
|
|
@@ -498,6 +509,6 @@ def _load_configuration(config_path: Path) -> StreamlitAppConfig:
|
|
|
498
509
|
|
|
499
510
|
__all__ = [
|
|
500
511
|
"StreamlitAppConfig",
|
|
501
|
-
"
|
|
512
|
+
"StreamlitAppRegistry",
|
|
502
513
|
"_load_configuration",
|
|
503
514
|
]
|
|
@@ -46,7 +46,7 @@ class StreamlitWebSearch(ResponseBase[WebSearchStructure]):
|
|
|
46
46
|
async def perform_search(tool) -> str:
|
|
47
47
|
"""Perform a web search and return structured results."""
|
|
48
48
|
structured_data = PromptStructure.from_tool_arguments(tool.arguments)
|
|
49
|
-
web_result = await WebAgentSearch(default_model=DEFAULT_MODEL).
|
|
49
|
+
web_result = await WebAgentSearch(default_model=DEFAULT_MODEL).run_agent_async(
|
|
50
50
|
structured_data.prompt
|
|
51
51
|
)
|
|
52
52
|
payload = coerce_jsonable(web_result)
|
|
@@ -55,7 +55,7 @@ from .json import (
|
|
|
55
55
|
get_module_qualname,
|
|
56
56
|
to_jsonable,
|
|
57
57
|
)
|
|
58
|
-
from .registry import
|
|
58
|
+
from .registry import RegistryBase
|
|
59
59
|
|
|
60
60
|
from .path_utils import check_filepath, ensure_directory
|
|
61
61
|
from openai_sdk_helpers.logging_config import log
|
|
@@ -135,5 +135,5 @@ __all__ = [
|
|
|
135
135
|
"create_file_data_url",
|
|
136
136
|
"is_image_file",
|
|
137
137
|
# Registry
|
|
138
|
-
"
|
|
138
|
+
"RegistryBase",
|
|
139
139
|
]
|
|
@@ -32,6 +32,7 @@ def get_module_qualname(obj: Any) -> tuple[str, str] | None:
|
|
|
32
32
|
... pass
|
|
33
33
|
>>> get_module_qualname(MyClass)
|
|
34
34
|
('__main__', 'MyClass')
|
|
35
|
+
|
|
35
36
|
"""
|
|
36
37
|
module = getattr(obj, "__module__", None)
|
|
37
38
|
qualname = getattr(obj, "__qualname__", None)
|
|
@@ -59,6 +60,7 @@ def encode_module_qualname(obj: Any) -> dict[str, Any] | None:
|
|
|
59
60
|
... pass
|
|
60
61
|
>>> encode_module_qualname(MyClass)
|
|
61
62
|
{'module': '__main__', 'qualname': 'MyClass'}
|
|
63
|
+
|
|
62
64
|
"""
|
|
63
65
|
result = get_module_qualname(obj)
|
|
64
66
|
if result is None:
|
|
@@ -85,6 +87,7 @@ def decode_module_qualname(ref: dict[str, Any]) -> Any | None:
|
|
|
85
87
|
>>> ref = {'module': 'pathlib', 'qualname': 'Path'}
|
|
86
88
|
>>> decode_module_qualname(ref)
|
|
87
89
|
<class 'pathlib.Path'>
|
|
90
|
+
|
|
88
91
|
"""
|
|
89
92
|
if not isinstance(ref, dict):
|
|
90
93
|
return None
|
|
@@ -4,15 +4,42 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
import warnings
|
|
6
6
|
from pathlib import Path
|
|
7
|
-
from typing import Generic, TypeVar
|
|
7
|
+
from typing import Generic, Protocol, TypeVar
|
|
8
|
+
from typing_extensions import Self
|
|
8
9
|
|
|
9
|
-
from ..utils.json.data_class import DataclassJSONSerializable
|
|
10
10
|
from .path_utils import ensure_directory
|
|
11
11
|
|
|
12
|
-
T = TypeVar("T", bound=DataclassJSONSerializable)
|
|
13
12
|
|
|
13
|
+
class RegistrySerializable(Protocol):
|
|
14
|
+
"""Protocol describing serializable registry entries.
|
|
14
15
|
|
|
15
|
-
|
|
16
|
+
Methods
|
|
17
|
+
-------
|
|
18
|
+
to_json_file(filepath)
|
|
19
|
+
Write the instance to a JSON file path.
|
|
20
|
+
from_json_file(filepath)
|
|
21
|
+
Load an instance from a JSON file path.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def name(self) -> str:
|
|
26
|
+
"""Return the configuration name."""
|
|
27
|
+
...
|
|
28
|
+
|
|
29
|
+
def to_json_file(self, filepath: Path | str) -> str:
|
|
30
|
+
"""Write serialized JSON data to a file path."""
|
|
31
|
+
...
|
|
32
|
+
|
|
33
|
+
@classmethod
|
|
34
|
+
def from_json_file(cls, filepath: Path | str) -> Self:
|
|
35
|
+
"""Load an instance from a JSON file."""
|
|
36
|
+
...
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
T = TypeVar("T", bound=RegistrySerializable)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class RegistryBase(Generic[T]):
|
|
16
43
|
"""Base registry for managing configuration instances.
|
|
17
44
|
|
|
18
45
|
Provides centralized storage and retrieval of configurations,
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
openai_sdk_helpers/__init__.py,sha256=
|
|
1
|
+
openai_sdk_helpers/__init__.py,sha256=19MEuQFgMqr6YqEc0QdmNe5YTg5JJVVCIisrYLFbqGk,4766
|
|
2
2
|
openai_sdk_helpers/cli.py,sha256=YnQz-IcAqcBdh8eCCxVYa7NHDuHgHaU-PJ4FWPvkz58,8278
|
|
3
3
|
openai_sdk_helpers/config.py,sha256=xK_u0YNKgtPrLrZrVr4F4k0CvSuYbsmkqqw9mCMdyF8,10932
|
|
4
4
|
openai_sdk_helpers/context_manager.py,sha256=QqlrtenwKoz2krY0IzuToKdTX1HptUYtIEylxieybgY,6633
|
|
5
5
|
openai_sdk_helpers/deprecation.py,sha256=VF0VDDegawYhsu5f-vE6dop9ob-jv8egxsm0KsPvP9E,4753
|
|
6
|
-
openai_sdk_helpers/environment.py,sha256=
|
|
6
|
+
openai_sdk_helpers/environment.py,sha256=9SYGAgf6dp0aknDdvcnSD40vJWONZsVhO-i8Ayo3jpg,1906
|
|
7
7
|
openai_sdk_helpers/errors.py,sha256=0TLrcpRXPBvk2KlrU5I1VAQl-sYy-d15h_SMDkEawvI,2757
|
|
8
8
|
openai_sdk_helpers/files_api.py,sha256=uMKHvGg1Od0J95Izl3AG9ofQYq8EDJXEty7zP0oKjJM,12569
|
|
9
9
|
openai_sdk_helpers/logging_config.py,sha256=JcR0FTWht1tYdwD-bXH835pr0JV0RwHfY3poruiZGHM,795
|
|
@@ -11,9 +11,9 @@ openai_sdk_helpers/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
11
11
|
openai_sdk_helpers/retry.py,sha256=J10oQYphfzDXm3BnLoXwxk7PAhN93TC2LQOv0VDGOwI,6533
|
|
12
12
|
openai_sdk_helpers/tools.py,sha256=Awj5htt1ImBbNToM1u6qdrIZ-7MiPZAXZ_oKKiWivy8,10547
|
|
13
13
|
openai_sdk_helpers/types.py,sha256=xzldCRfwCZ3rZl18IBmfgA-PVdoZKSWNrlSIhirumSo,1451
|
|
14
|
-
openai_sdk_helpers/agent/__init__.py,sha256=
|
|
15
|
-
openai_sdk_helpers/agent/base.py,sha256=
|
|
16
|
-
openai_sdk_helpers/agent/config.py,sha256=
|
|
14
|
+
openai_sdk_helpers/agent/__init__.py,sha256=39nYVK8okZv_srC86HStwtKirkH1_FXkacoqfV73naA,1070
|
|
15
|
+
openai_sdk_helpers/agent/base.py,sha256=iv14LURB5PFcbRHuP1lWyj8JbKvDqM1m1Tf1mOi6CA8,27080
|
|
16
|
+
openai_sdk_helpers/agent/config.py,sha256=NlSE1T1T3fUOu4GJzwjmrVGxB1aI969yYg6sFXgwHCI,14611
|
|
17
17
|
openai_sdk_helpers/agent/coordination.py,sha256=VTzyl4RV1q4ugiyFW4Fj7pOAVVO0bMRD63PfQRDwfoQ,18030
|
|
18
18
|
openai_sdk_helpers/agent/prompt_utils.py,sha256=-1M66tqQxh9wWCFg6X-K7cCcqauca3yA04ZjvOpN3bA,337
|
|
19
19
|
openai_sdk_helpers/agent/runner.py,sha256=aOVN1OYKK5_u7oFBqRCOOeTgcb-lLl4kZGxuPLmJrMw,4884
|
|
@@ -21,10 +21,10 @@ openai_sdk_helpers/agent/summarizer.py,sha256=lg_PLB1DSHox3PNDgiCzvCPM5VoCUbKEMG
|
|
|
21
21
|
openai_sdk_helpers/agent/translator.py,sha256=3u7er1GhUGdy7OMa3A_vyqFFZfev3XBCZW_6w5OwYVc,6286
|
|
22
22
|
openai_sdk_helpers/agent/utils.py,sha256=DTD5foCqGYfXf13F2bZMYIQROl7SbDSy5GDPGi0Zl-0,1089
|
|
23
23
|
openai_sdk_helpers/agent/validation.py,sha256=6NHZIFaUOqRZeYqvRBnDc_uApAV3YHJnOhLHKbVUsi0,5094
|
|
24
|
-
openai_sdk_helpers/agent/search/__init__.py,sha256=
|
|
24
|
+
openai_sdk_helpers/agent/search/__init__.py,sha256=LXXzEcX2MU7_htHRdRCGPw0hsr9CrZn0ESii7GZJMBw,806
|
|
25
25
|
openai_sdk_helpers/agent/search/base.py,sha256=VokTw3-V2yxGzm2WzlcvU100h3UaeyGslCFwIgMvJwI,10146
|
|
26
|
-
openai_sdk_helpers/agent/search/vector.py,sha256=
|
|
27
|
-
openai_sdk_helpers/agent/search/web.py,sha256=
|
|
26
|
+
openai_sdk_helpers/agent/search/vector.py,sha256=A1HskDI6YVd3D9IQncowgiWUy9ptlMlSJhrBRDyqroM,15167
|
|
27
|
+
openai_sdk_helpers/agent/search/web.py,sha256=EQ0Rgcz21Rm9bDGPr8XPlDj34_nH2wnB7ER9rBy48Ak,11199
|
|
28
28
|
openai_sdk_helpers/enums/__init__.py,sha256=aFf79C4JBeLC3kMlJfSpehyjx5uNCtW6eK5rD6ZFfhM,322
|
|
29
29
|
openai_sdk_helpers/enums/base.py,sha256=cNllDtzcgI0_eZYXxFko14yhxwicX6xbeDfz9gFE3qo,2753
|
|
30
30
|
openai_sdk_helpers/prompt/__init__.py,sha256=MOqgKwG9KLqKudoKRlUfLxiSmdOi2aD6hNrWDFqLHkk,418
|
|
@@ -34,16 +34,18 @@ openai_sdk_helpers/prompt/translator.jinja,sha256=SZhW8ipEzM-9IA4wyS_r2wIMTAclWr
|
|
|
34
34
|
openai_sdk_helpers/prompt/validator.jinja,sha256=6t8q_IdxFd3mVBGX6SFKNOert1Wo3YpTOji2SNEbbtE,547
|
|
35
35
|
openai_sdk_helpers/response/__init__.py,sha256=Rh3tBygneOhS-Er_4dtX4Xa69ukvxYv01brq26VpgwQ,1886
|
|
36
36
|
openai_sdk_helpers/response/base.py,sha256=OA1p9h6EIzwt8VCWFXEalaQHOe0_eZDefqs5jQKu-vU,34844
|
|
37
|
-
openai_sdk_helpers/response/config.py,sha256=
|
|
37
|
+
openai_sdk_helpers/response/config.py,sha256=ugZIP29krecf6JXiwkrc1nBDCdT_C9DSOCdPkLRN4wY,9305
|
|
38
38
|
openai_sdk_helpers/response/files.py,sha256=6iHXeNZg4R08ilQ7D53qIJDQGYPpTLcByAhNJlEwbZ4,13226
|
|
39
39
|
openai_sdk_helpers/response/messages.py,sha256=qX3sW79rLuJEys28zyv5MovZikwGOaLevzdVN0VYMRE,10104
|
|
40
|
+
openai_sdk_helpers/response/planner.py,sha256=OfqrANheofY2155kVVfAWPPAHlnSnhaF0MLUHwNgPBU,333
|
|
41
|
+
openai_sdk_helpers/response/prompter.py,sha256=vaHrNAAB9Z5WYwQeTKfOkAoH6DaFx1aRcywngqr47Pc,337
|
|
40
42
|
openai_sdk_helpers/response/runner.py,sha256=Rg8XmxU5UwxJc3MjPlYlXWDimxy_cjxzefGiruNZK6s,4269
|
|
41
43
|
openai_sdk_helpers/response/tool_call.py,sha256=c9Filh4IG5H_RWuJlYl6KUZDaF7mCjkabFRQMNiz7zM,7422
|
|
42
44
|
openai_sdk_helpers/response/vector_store.py,sha256=cG5Mzdhjw5FsX1phgclIGz2MQ8f8uMKBaage1O2EZQU,3074
|
|
43
|
-
openai_sdk_helpers/streamlit_app/__init__.py,sha256=
|
|
45
|
+
openai_sdk_helpers/streamlit_app/__init__.py,sha256=DIXClgbzncsex2vnXUGjBwvykazx4-Bz089beZiq8vc,805
|
|
44
46
|
openai_sdk_helpers/streamlit_app/app.py,sha256=jNkMQ4zkfojP501qk_vncyLN4TymiDXxA3cXkUvBfsw,17402
|
|
45
|
-
openai_sdk_helpers/streamlit_app/config.py,sha256=
|
|
46
|
-
openai_sdk_helpers/streamlit_app/streamlit_web_search.py,sha256
|
|
47
|
+
openai_sdk_helpers/streamlit_app/config.py,sha256=t1fylt53eVmnNOfBXwpfDyG-Jji9JBUb0ZyrtUWBZ1s,16594
|
|
48
|
+
openai_sdk_helpers/streamlit_app/streamlit_web_search.py,sha256=-5T22a7XbNDjQxC3pLySH85iAdlqSM2ZrR4ZIIYk_KA,2808
|
|
47
49
|
openai_sdk_helpers/structure/__init__.py,sha256=jROw0IbXYVRD2Eb3dBMsB6amQZrX8X7XSgGh_zjsZWc,3469
|
|
48
50
|
openai_sdk_helpers/structure/agent_blueprint.py,sha256=VyJWkgPNzAYKRDMeR1M4kE6qqQURnwqtrrEn0TRJf0g,9698
|
|
49
51
|
openai_sdk_helpers/structure/base.py,sha256=7JuHxKkLR5gP0RWGQIjOQlvySfain6LrB4-zHb0oFxo,25298
|
|
@@ -60,7 +62,7 @@ openai_sdk_helpers/structure/plan/helpers.py,sha256=Vc6dBTMFrNWlsaCTpEImEIKjfFq4
|
|
|
60
62
|
openai_sdk_helpers/structure/plan/plan.py,sha256=CStfSfCdcv7HfLWV_G09xElJvq_kAKi_6JDkB3I7cSI,9663
|
|
61
63
|
openai_sdk_helpers/structure/plan/task.py,sha256=FSdt2OJ_arC60zMoWIUHMT3U1syWM_7svyTpOIwiRSM,4580
|
|
62
64
|
openai_sdk_helpers/structure/plan/types.py,sha256=7y9QEVdZreQUXV7n-R4RoNZzw5HeOVbJGWx9QkSfuNY,418
|
|
63
|
-
openai_sdk_helpers/utils/__init__.py,sha256=
|
|
65
|
+
openai_sdk_helpers/utils/__init__.py,sha256=8SghfmiFhNhMj8Wuop1SAtEt1F8QJb_r4jhi5DtSCDE,3670
|
|
64
66
|
openai_sdk_helpers/utils/async_utils.py,sha256=9KbPEVfi6IXdbwkTUE0h5DleK8TI7I6P_VPL8UgUv98,3689
|
|
65
67
|
openai_sdk_helpers/utils/coercion.py,sha256=Pq1u7tAbD7kTZ84lK-7Fb9CyYKKKQt4fypG5BlSI6oQ,3774
|
|
66
68
|
openai_sdk_helpers/utils/deprecation.py,sha256=VF0VDDegawYhsu5f-vE6dop9ob-jv8egxsm0KsPvP9E,4753
|
|
@@ -68,19 +70,19 @@ openai_sdk_helpers/utils/encoding.py,sha256=oDtlNGZ5p-edXiHW76REs-0-8NXkQNReKJdj
|
|
|
68
70
|
openai_sdk_helpers/utils/instructions.py,sha256=trbjxjxv2otQR9VmBsPFk1_CJj8nA85Sgtj_5QmQOKI,942
|
|
69
71
|
openai_sdk_helpers/utils/output_validation.py,sha256=3DC6Hr6vAFx_bQGaLsxkGN_LuKe_MugLpVogMBgG9tc,12621
|
|
70
72
|
openai_sdk_helpers/utils/path_utils.py,sha256=NOSX7553cc8LqxbnvmdYjgDBi5Le2W2LuMINwFsTzyM,1969
|
|
71
|
-
openai_sdk_helpers/utils/registry.py,sha256=
|
|
73
|
+
openai_sdk_helpers/utils/registry.py,sha256=m59q6qm2IqXRdvdeKB-7H5tiUs1SB-aXTuyhTsVH5E4,6499
|
|
72
74
|
openai_sdk_helpers/utils/validation.py,sha256=ZjnZNOy5AoFlszRxarNol6YZwfgw6LnwPtkCekZmwAU,7826
|
|
73
75
|
openai_sdk_helpers/utils/json/__init__.py,sha256=wBm1DBgfy_n1uSUcnCPyqBn_cCq41mijjPigSMZJZqg,2118
|
|
74
76
|
openai_sdk_helpers/utils/json/base_model.py,sha256=8j__oKly46RRekmRjwFZjAxBHhZkIjMADcJReKo-QsQ,5100
|
|
75
77
|
openai_sdk_helpers/utils/json/data_class.py,sha256=hffMQQTNTwybuMTOtmKNzxd6kXrVyQen67F5BE_OGqE,6469
|
|
76
|
-
openai_sdk_helpers/utils/json/ref.py,sha256=
|
|
78
|
+
openai_sdk_helpers/utils/json/ref.py,sha256=FqBIRWIw33Up3rFyTlLYljcuUjg43f6Nu5wX3tOXn54,2809
|
|
77
79
|
openai_sdk_helpers/utils/json/utils.py,sha256=iyc25tnObqXQJWPKLZMVts932GArdKer59KuC8aQKsY,5948
|
|
78
80
|
openai_sdk_helpers/vector_storage/__init__.py,sha256=L5LxO09puh9_yBB9IDTvc1CvVkARVkHqYY1KX3inB4c,975
|
|
79
81
|
openai_sdk_helpers/vector_storage/cleanup.py,sha256=ImWIE-9lli-odD8qIARvmeaa0y8ZD4pYYP-kT0O3178,3552
|
|
80
82
|
openai_sdk_helpers/vector_storage/storage.py,sha256=A6zJDicObdSOVSlzhHVxEGq_tKO2_bNcsYi94xsKDNI,23655
|
|
81
83
|
openai_sdk_helpers/vector_storage/types.py,sha256=jTCcOYMeOpZWvcse0z4T3MVs-RBOPC-fqWTBeQrgafU,1639
|
|
82
|
-
openai_sdk_helpers-0.4.
|
|
83
|
-
openai_sdk_helpers-0.4.
|
|
84
|
-
openai_sdk_helpers-0.4.
|
|
85
|
-
openai_sdk_helpers-0.4.
|
|
86
|
-
openai_sdk_helpers-0.4.
|
|
84
|
+
openai_sdk_helpers-0.4.2.dist-info/METADATA,sha256=h1-_VwnRxkgrCwRJMEwpqMe3TCeUuMQ0U-PwYoJrJkU,23557
|
|
85
|
+
openai_sdk_helpers-0.4.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
86
|
+
openai_sdk_helpers-0.4.2.dist-info/entry_points.txt,sha256=gEOD1ZeXe8d2OP-KzUlG-b_9D9yUZTCt-GFW3EDbIIY,63
|
|
87
|
+
openai_sdk_helpers-0.4.2.dist-info/licenses/LICENSE,sha256=CUhc1NrE50bs45tcXF7OcTQBKEvkUuLqeOHgrWQ5jaA,1067
|
|
88
|
+
openai_sdk_helpers-0.4.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|