openai-sdk-helpers 0.6.2__tar.gz → 0.6.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/PKG-INFO +1 -1
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/pyproject.toml +1 -1
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/classifier.py +54 -103
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/classifier.jinja +5 -6
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/__init__.py +4 -2
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/base.py +100 -5
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/classification.py +194 -113
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/.gitignore +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/LICENSE +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/README.md +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/base.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/configuration.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/coordinator.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/files.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/runner.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/search/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/search/base.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/search/vector.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/search/web.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/summarizer.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/translator.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/utils.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/validator.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/cli.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/enums/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/enums/base.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/environment.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/errors.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/extract/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/extract/extractor.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/extract/generator.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/files_api.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/logging.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/base.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/extractor_config_agent_instructions.jinja +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/extractor_config_generator.jinja +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/extractor_config_generator_instructions.jinja +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/extractor_prompt_optimizer_agent_instructions.jinja +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/extractor_prompt_optimizer_request.jinja +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/summarizer.jinja +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/translator.jinja +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/validator.jinja +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/vector_planner.jinja +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/vector_search.jinja +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/vector_writer.jinja +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/py.typed +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/response/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/response/base.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/response/configuration.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/response/files.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/response/messages.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/response/planner.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/response/prompter.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/response/runner.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/response/tool_call.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/response/vector_store.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/settings.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/streamlit_app/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/streamlit_app/app.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/streamlit_app/configuration.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/agent_blueprint.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/extraction.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/plan/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/plan/enum.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/plan/helpers.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/plan/plan.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/plan/task.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/plan/types.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/prompt.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/responses.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/summary.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/translation.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/validation.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/vector_search.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/web_search.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/tools.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/types.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/async_utils.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/coercion.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/encoding.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/instructions.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/json/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/json/base_model.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/json/data_class.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/json/ref.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/json/utils.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/langextract.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/output_validation.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/path_utils.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/registry.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/utils/validation.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/vector_storage/__init__.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/vector_storage/cleanup.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/vector_storage/storage.py +0 -0
- {openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/vector_storage/types.py +0 -0
{openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/agent/classifier.py
RENAMED
|
@@ -10,12 +10,17 @@ from enum import Enum
|
|
|
10
10
|
from pathlib import Path
|
|
11
11
|
from typing import Any, Awaitable, Dict, Iterable, Optional, Sequence, cast
|
|
12
12
|
|
|
13
|
+
from agents.model_settings import ModelSettings
|
|
14
|
+
|
|
13
15
|
from ..structure import (
|
|
14
16
|
ClassificationResult,
|
|
15
17
|
ClassificationStep,
|
|
16
18
|
ClassificationStopReason,
|
|
17
19
|
StructureBase,
|
|
20
|
+
Taxonomy,
|
|
18
21
|
TaxonomyNode,
|
|
22
|
+
format_path_identifier,
|
|
23
|
+
split_path_identifier,
|
|
19
24
|
)
|
|
20
25
|
from ..utils import ensure_list
|
|
21
26
|
from .base import AgentBase
|
|
@@ -31,14 +36,14 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
31
36
|
Optional template file path for prompt rendering.
|
|
32
37
|
model : str | None, default=None
|
|
33
38
|
Model identifier to use for classification.
|
|
39
|
+
model_settings : ModelSettings | None, default=None
|
|
40
|
+
Optional model settings to apply to the classifier agent.
|
|
34
41
|
|
|
35
42
|
Methods
|
|
36
43
|
-------
|
|
37
|
-
|
|
38
|
-
Classify text by recursively walking the taxonomy tree.
|
|
39
|
-
run_async(input, context, max_depth, confidence_threshold, single_class)
|
|
44
|
+
run_async(input, context, max_depth, confidence_threshold)
|
|
40
45
|
Classify text asynchronously using taxonomy traversal.
|
|
41
|
-
run_sync(input, context, max_depth, confidence_threshold
|
|
46
|
+
run_sync(input, context, max_depth, confidence_threshold)
|
|
42
47
|
Classify text synchronously using taxonomy traversal.
|
|
43
48
|
|
|
44
49
|
Examples
|
|
@@ -57,6 +62,7 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
57
62
|
*,
|
|
58
63
|
template_path: Path | str | None = None,
|
|
59
64
|
model: str | None = None,
|
|
65
|
+
model_settings: ModelSettings | None = None,
|
|
60
66
|
taxonomy: TaxonomyNode | Sequence[TaxonomyNode],
|
|
61
67
|
) -> None:
|
|
62
68
|
"""Initialize the taxonomy classifier agent configuration.
|
|
@@ -67,6 +73,8 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
67
73
|
Optional template file path for prompt rendering.
|
|
68
74
|
model : str | None, default=None
|
|
69
75
|
Model identifier to use for classification.
|
|
76
|
+
model_settings : ModelSettings | None, default=None
|
|
77
|
+
Optional model settings to apply to the classifier agent.
|
|
70
78
|
taxonomy : TaxonomyNode | Sequence[TaxonomyNode]
|
|
71
79
|
Root taxonomy node or list of root nodes.
|
|
72
80
|
|
|
@@ -91,10 +99,11 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
91
99
|
template_path=resolved_template_path,
|
|
92
100
|
output_structure=ClassificationStep,
|
|
93
101
|
model=model,
|
|
102
|
+
model_settings=model_settings,
|
|
94
103
|
)
|
|
95
104
|
super().__init__(configuration=configuration)
|
|
96
105
|
|
|
97
|
-
async def
|
|
106
|
+
async def _run_agent(
|
|
98
107
|
self,
|
|
99
108
|
text: str,
|
|
100
109
|
*,
|
|
@@ -102,7 +111,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
102
111
|
file_ids: str | Sequence[str] | None = None,
|
|
103
112
|
max_depth: Optional[int] = None,
|
|
104
113
|
confidence_threshold: float | None = None,
|
|
105
|
-
single_class: bool = False,
|
|
106
114
|
session: Optional[Any] = None,
|
|
107
115
|
) -> ClassificationResult:
|
|
108
116
|
"""Classify ``text`` by recursively walking taxonomy levels.
|
|
@@ -119,8 +127,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
119
127
|
Maximum depth to traverse before stopping.
|
|
120
128
|
confidence_threshold : float or None, default=None
|
|
121
129
|
Minimum confidence required to accept a classification step.
|
|
122
|
-
single_class : bool, default=False
|
|
123
|
-
Whether to keep only the highest-priority selection per step.
|
|
124
130
|
session : Session or None, default=None
|
|
125
131
|
Optional session for maintaining conversation history across runs.
|
|
126
132
|
|
|
@@ -147,21 +153,17 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
147
153
|
file_ids=file_ids,
|
|
148
154
|
max_depth=max_depth,
|
|
149
155
|
confidence_threshold=confidence_threshold,
|
|
150
|
-
single_class=single_class,
|
|
151
156
|
session=session,
|
|
152
157
|
state=state,
|
|
153
158
|
)
|
|
154
159
|
|
|
155
160
|
final_nodes_value = state.final_nodes or None
|
|
156
|
-
final_node = state.final_nodes[0] if state.final_nodes else None
|
|
157
161
|
stop_reason = _resolve_stop_reason(state)
|
|
158
162
|
return ClassificationResult(
|
|
159
|
-
final_node=final_node,
|
|
160
163
|
final_nodes=final_nodes_value,
|
|
161
164
|
confidence=state.best_confidence,
|
|
162
165
|
stop_reason=stop_reason,
|
|
163
|
-
|
|
164
|
-
path_nodes=state.path_nodes,
|
|
166
|
+
steps=state.steps,
|
|
165
167
|
)
|
|
166
168
|
|
|
167
169
|
async def run_async(
|
|
@@ -174,7 +176,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
174
176
|
file_ids: str | Sequence[str] | None = None,
|
|
175
177
|
max_depth: Optional[int] = None,
|
|
176
178
|
confidence_threshold: float | None = None,
|
|
177
|
-
single_class: bool = False,
|
|
178
179
|
) -> ClassificationResult:
|
|
179
180
|
"""Classify ``input`` asynchronously with taxonomy traversal.
|
|
180
181
|
|
|
@@ -194,8 +195,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
194
195
|
Maximum depth to traverse before stopping.
|
|
195
196
|
confidence_threshold : float or None, default=None
|
|
196
197
|
Minimum confidence required to accept a classification step.
|
|
197
|
-
single_class : bool, default=False
|
|
198
|
-
Whether to keep only the highest-priority selection per step.
|
|
199
198
|
|
|
200
199
|
Returns
|
|
201
200
|
-------
|
|
@@ -211,11 +210,10 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
211
210
|
"file_ids": file_ids,
|
|
212
211
|
"max_depth": max_depth,
|
|
213
212
|
"confidence_threshold": confidence_threshold,
|
|
214
|
-
"single_class": single_class,
|
|
215
213
|
}
|
|
216
214
|
if session is not None:
|
|
217
215
|
kwargs["session"] = session
|
|
218
|
-
return await self.
|
|
216
|
+
return await self._run_agent(input, **kwargs)
|
|
219
217
|
|
|
220
218
|
def run_sync(
|
|
221
219
|
self,
|
|
@@ -227,7 +225,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
227
225
|
file_ids: str | Sequence[str] | None = None,
|
|
228
226
|
max_depth: Optional[int] = None,
|
|
229
227
|
confidence_threshold: float | None = None,
|
|
230
|
-
single_class: bool = False,
|
|
231
228
|
) -> ClassificationResult:
|
|
232
229
|
"""Classify ``input`` synchronously with taxonomy traversal.
|
|
233
230
|
|
|
@@ -247,8 +244,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
247
244
|
Maximum depth to traverse before stopping.
|
|
248
245
|
confidence_threshold : float or None, default=None
|
|
249
246
|
Minimum confidence required to accept a classification step.
|
|
250
|
-
single_class : bool, default=False
|
|
251
|
-
Whether to keep only the highest-priority selection per step.
|
|
252
247
|
|
|
253
248
|
Returns
|
|
254
249
|
-------
|
|
@@ -264,13 +259,12 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
264
259
|
"file_ids": file_ids,
|
|
265
260
|
"max_depth": max_depth,
|
|
266
261
|
"confidence_threshold": confidence_threshold,
|
|
267
|
-
"single_class": single_class,
|
|
268
262
|
}
|
|
269
263
|
if session is not None:
|
|
270
264
|
kwargs["session"] = session
|
|
271
265
|
|
|
272
266
|
async def runner() -> ClassificationResult:
|
|
273
|
-
return await self.
|
|
267
|
+
return await self._run_agent(input, **kwargs)
|
|
274
268
|
|
|
275
269
|
try:
|
|
276
270
|
asyncio.get_running_loop()
|
|
@@ -342,7 +336,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
342
336
|
file_ids: str | Sequence[str] | None,
|
|
343
337
|
max_depth: Optional[int],
|
|
344
338
|
confidence_threshold: float | None,
|
|
345
|
-
single_class: bool,
|
|
346
339
|
session: Optional[Any],
|
|
347
340
|
state: "_TraversalState",
|
|
348
341
|
) -> None:
|
|
@@ -364,8 +357,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
364
357
|
Maximum traversal depth before stopping.
|
|
365
358
|
confidence_threshold : float or None
|
|
366
359
|
Minimum confidence required to accept a classification step.
|
|
367
|
-
single_class : bool
|
|
368
|
-
Whether to keep only the highest-priority selection per step.
|
|
369
360
|
session : Session or None
|
|
370
361
|
Optional session for maintaining conversation history across runs.
|
|
371
362
|
state : _TraversalState
|
|
@@ -380,7 +371,7 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
380
371
|
node_paths = _build_node_path_map(nodes, parent_path)
|
|
381
372
|
template_context = _build_context(
|
|
382
373
|
node_descriptors=_build_node_descriptors(node_paths),
|
|
383
|
-
|
|
374
|
+
steps=state.steps,
|
|
384
375
|
depth=depth,
|
|
385
376
|
context=context,
|
|
386
377
|
)
|
|
@@ -392,7 +383,7 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
392
383
|
session=session,
|
|
393
384
|
)
|
|
394
385
|
step = _normalize_step_output(raw_step, step_structure)
|
|
395
|
-
state.
|
|
386
|
+
state.steps.append(step)
|
|
396
387
|
|
|
397
388
|
if (
|
|
398
389
|
confidence_threshold is not None
|
|
@@ -402,10 +393,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
402
393
|
return
|
|
403
394
|
|
|
404
395
|
resolved_nodes = _resolve_nodes(node_paths, step)
|
|
405
|
-
if resolved_nodes:
|
|
406
|
-
if single_class:
|
|
407
|
-
resolved_nodes = resolved_nodes[:1]
|
|
408
|
-
state.path_nodes.extend(resolved_nodes)
|
|
409
396
|
|
|
410
397
|
if step.stop_reason.is_terminal:
|
|
411
398
|
if resolved_nodes:
|
|
@@ -419,8 +406,7 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
419
406
|
if not resolved_nodes:
|
|
420
407
|
return
|
|
421
408
|
|
|
422
|
-
|
|
423
|
-
base_path_nodes_len = len(state.path_nodes)
|
|
409
|
+
base_steps_len = len(state.steps)
|
|
424
410
|
child_tasks: list[tuple[Awaitable["_TraversalState"], int]] = []
|
|
425
411
|
for node in resolved_nodes:
|
|
426
412
|
if node.children:
|
|
@@ -439,7 +425,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
439
425
|
file_ids=file_ids,
|
|
440
426
|
max_depth=max_depth,
|
|
441
427
|
confidence_threshold=confidence_threshold,
|
|
442
|
-
single_class=single_class,
|
|
443
428
|
session=session,
|
|
444
429
|
state=sub_state,
|
|
445
430
|
),
|
|
@@ -459,8 +444,7 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
459
444
|
for child_state, (_, base_final_nodes_len) in zip(
|
|
460
445
|
child_states, child_tasks, strict=True
|
|
461
446
|
):
|
|
462
|
-
state.
|
|
463
|
-
state.path_nodes.extend(child_state.path_nodes[base_path_nodes_len:])
|
|
447
|
+
state.steps.extend(child_state.steps[base_steps_len:])
|
|
464
448
|
state.final_nodes.extend(child_state.final_nodes[base_final_nodes_len:])
|
|
465
449
|
state.best_confidence = _max_confidence(
|
|
466
450
|
state.best_confidence, child_state.best_confidence
|
|
@@ -514,6 +498,7 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
514
498
|
sub_agent = TaxonomyClassifierAgent(
|
|
515
499
|
template_path=self._template_path,
|
|
516
500
|
model=self._model,
|
|
501
|
+
model_settings=self._model_settings,
|
|
517
502
|
taxonomy=list(nodes),
|
|
518
503
|
)
|
|
519
504
|
sub_agent._run_step_async = self._run_step_async
|
|
@@ -531,7 +516,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
531
516
|
file_ids: str | Sequence[str] | None,
|
|
532
517
|
max_depth: Optional[int],
|
|
533
518
|
confidence_threshold: float | None,
|
|
534
|
-
single_class: bool,
|
|
535
519
|
session: Optional[Any],
|
|
536
520
|
state: "_TraversalState",
|
|
537
521
|
) -> "_TraversalState":
|
|
@@ -557,8 +541,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
557
541
|
Maximum traversal depth before stopping.
|
|
558
542
|
confidence_threshold : float or None
|
|
559
543
|
Minimum confidence required to accept a classification step.
|
|
560
|
-
single_class : bool
|
|
561
|
-
Whether to keep only the highest-priority selection per step.
|
|
562
544
|
session : Session or None
|
|
563
545
|
Optional session for maintaining conversation history across runs.
|
|
564
546
|
state : _TraversalState
|
|
@@ -578,7 +560,6 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
578
560
|
file_ids=file_ids,
|
|
579
561
|
max_depth=max_depth,
|
|
580
562
|
confidence_threshold=confidence_threshold,
|
|
581
|
-
single_class=single_class,
|
|
582
563
|
session=session,
|
|
583
564
|
state=state,
|
|
584
565
|
)
|
|
@@ -589,8 +570,7 @@ class TaxonomyClassifierAgent(AgentBase):
|
|
|
589
570
|
class _TraversalState:
|
|
590
571
|
"""Track recursive traversal state."""
|
|
591
572
|
|
|
592
|
-
|
|
593
|
-
path_nodes: list[TaxonomyNode] = field(default_factory=list)
|
|
573
|
+
steps: list[ClassificationStep] = field(default_factory=list)
|
|
594
574
|
final_nodes: list[TaxonomyNode] = field(default_factory=list)
|
|
595
575
|
best_confidence: float | None = None
|
|
596
576
|
saw_max_depth: bool = False
|
|
@@ -612,8 +592,7 @@ def _copy_traversal_state(state: _TraversalState) -> _TraversalState:
|
|
|
612
592
|
Cloned traversal state with copied collections.
|
|
613
593
|
"""
|
|
614
594
|
return _TraversalState(
|
|
615
|
-
|
|
616
|
-
path_nodes=list(state.path_nodes),
|
|
595
|
+
steps=list(state.steps),
|
|
617
596
|
final_nodes=list(state.final_nodes),
|
|
618
597
|
best_confidence=state.best_confidence,
|
|
619
598
|
saw_max_depth=state.saw_max_depth,
|
|
@@ -663,6 +642,8 @@ def _normalize_roots(
|
|
|
663
642
|
list[TaxonomyNode]
|
|
664
643
|
Normalized list of root nodes.
|
|
665
644
|
"""
|
|
645
|
+
if isinstance(taxonomy, Taxonomy):
|
|
646
|
+
return [node for node in taxonomy.children if node is not None]
|
|
666
647
|
if isinstance(taxonomy, TaxonomyNode):
|
|
667
648
|
return [taxonomy]
|
|
668
649
|
return [node for node in taxonomy if node is not None]
|
|
@@ -682,7 +663,7 @@ def _default_template_path() -> Path:
|
|
|
682
663
|
def _build_context(
|
|
683
664
|
*,
|
|
684
665
|
node_descriptors: Iterable[dict[str, Any]],
|
|
685
|
-
|
|
666
|
+
steps: Sequence[ClassificationStep],
|
|
686
667
|
depth: int,
|
|
687
668
|
context: Optional[Dict[str, Any]],
|
|
688
669
|
) -> Dict[str, Any]:
|
|
@@ -692,7 +673,7 @@ def _build_context(
|
|
|
692
673
|
----------
|
|
693
674
|
node_descriptors : Iterable[dict[str, Any]]
|
|
694
675
|
Node descriptors available at the current taxonomy level.
|
|
695
|
-
|
|
676
|
+
steps : Sequence[ClassificationStep]
|
|
696
677
|
Steps recorded so far in the traversal.
|
|
697
678
|
depth : int
|
|
698
679
|
Current traversal depth.
|
|
@@ -704,9 +685,14 @@ def _build_context(
|
|
|
704
685
|
dict[str, Any]
|
|
705
686
|
Context dictionary for prompt rendering.
|
|
706
687
|
"""
|
|
688
|
+
summarized_steps = [
|
|
689
|
+
step.as_summary()
|
|
690
|
+
for step in steps
|
|
691
|
+
if step.selected_nodes and any(node is not None for node in step.selected_nodes)
|
|
692
|
+
]
|
|
707
693
|
template_context: Dict[str, Any] = {
|
|
708
694
|
"taxonomy_nodes": list(node_descriptors),
|
|
709
|
-
"
|
|
695
|
+
"steps": summarized_steps,
|
|
710
696
|
"depth": depth,
|
|
711
697
|
}
|
|
712
698
|
if context:
|
|
@@ -754,7 +740,7 @@ def _build_node_path_map(
|
|
|
754
740
|
path_map: dict[str, TaxonomyNode] = {}
|
|
755
741
|
seen: dict[str, int] = {}
|
|
756
742
|
for node in nodes:
|
|
757
|
-
base_path =
|
|
743
|
+
base_path = format_path_identifier([*parent_path, node.label])
|
|
758
744
|
count = seen.get(base_path, 0) + 1
|
|
759
745
|
seen[base_path] = count
|
|
760
746
|
path = f"{base_path} ({count})" if count > 1 else base_path
|
|
@@ -783,33 +769,12 @@ def _build_node_descriptors(
|
|
|
783
769
|
{
|
|
784
770
|
"identifier": path_id,
|
|
785
771
|
"label": node.label,
|
|
786
|
-
"
|
|
772
|
+
"computed_description": node.computed_description,
|
|
787
773
|
}
|
|
788
774
|
)
|
|
789
775
|
return descriptors
|
|
790
776
|
|
|
791
777
|
|
|
792
|
-
def _format_path_identifier(path_segments: Sequence[str]) -> str:
|
|
793
|
-
"""Format path segments into a safe identifier string.
|
|
794
|
-
|
|
795
|
-
Parameters
|
|
796
|
-
----------
|
|
797
|
-
path_segments : Sequence[str]
|
|
798
|
-
Path segments to format.
|
|
799
|
-
|
|
800
|
-
Returns
|
|
801
|
-
-------
|
|
802
|
-
str
|
|
803
|
-
Escaped path identifier string.
|
|
804
|
-
"""
|
|
805
|
-
delimiter = " > "
|
|
806
|
-
escape_token = "\\>"
|
|
807
|
-
escaped_segments = [
|
|
808
|
-
segment.replace(delimiter, escape_token) for segment in path_segments
|
|
809
|
-
]
|
|
810
|
-
return delimiter.join(escaped_segments)
|
|
811
|
-
|
|
812
|
-
|
|
813
778
|
def _build_taxonomy_enum(name: str, values: Sequence[str]) -> type[Enum]:
|
|
814
779
|
"""Build a safe Enum from taxonomy node values.
|
|
815
780
|
|
|
@@ -834,25 +799,6 @@ def _build_taxonomy_enum(name: str, values: Sequence[str]) -> type[Enum]:
|
|
|
834
799
|
return cast(type[Enum], Enum(name, members))
|
|
835
800
|
|
|
836
801
|
|
|
837
|
-
def _split_taxonomy_path(value: str) -> list[str]:
|
|
838
|
-
"""Split a taxonomy identifier into its path segments.
|
|
839
|
-
|
|
840
|
-
Parameters
|
|
841
|
-
----------
|
|
842
|
-
value : str
|
|
843
|
-
Taxonomy path identifier to split.
|
|
844
|
-
|
|
845
|
-
Returns
|
|
846
|
-
-------
|
|
847
|
-
list[str]
|
|
848
|
-
Path segments with escaped delimiters restored.
|
|
849
|
-
"""
|
|
850
|
-
delimiter = " > "
|
|
851
|
-
escape_token = "\\>"
|
|
852
|
-
segments = value.split(delimiter)
|
|
853
|
-
return [segment.replace(escape_token, delimiter) for segment in segments]
|
|
854
|
-
|
|
855
|
-
|
|
856
802
|
def _sanitize_enum_member(
|
|
857
803
|
value: str,
|
|
858
804
|
index: int,
|
|
@@ -875,7 +821,7 @@ def _sanitize_enum_member(
|
|
|
875
821
|
Sanitized enum member name.
|
|
876
822
|
"""
|
|
877
823
|
normalized_segments: list[str] = []
|
|
878
|
-
for segment in
|
|
824
|
+
for segment in split_path_identifier(value):
|
|
879
825
|
normalized = re.sub(r"[^0-9a-zA-Z]+", "_", segment).strip("_").upper()
|
|
880
826
|
if not normalized:
|
|
881
827
|
normalized = "VALUE"
|
|
@@ -933,7 +879,9 @@ def _build_input_payload(
|
|
|
933
879
|
str or list[dict[str, Any]]
|
|
934
880
|
Input payload suitable for the Agents SDK.
|
|
935
881
|
"""
|
|
936
|
-
normalized_file_ids = [
|
|
882
|
+
normalized_file_ids = [
|
|
883
|
+
file_id for file_id in dict.fromkeys(ensure_list(file_ids)) if file_id
|
|
884
|
+
]
|
|
937
885
|
if not normalized_file_ids:
|
|
938
886
|
return text
|
|
939
887
|
attachments = [
|
|
@@ -1038,17 +986,20 @@ def _selected_nodes(step: ClassificationStep) -> list[str]:
|
|
|
1038
986
|
list[str]
|
|
1039
987
|
Selected identifiers in priority order.
|
|
1040
988
|
"""
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
989
|
+
enum_cls: type[Enum] | None = None
|
|
990
|
+
step_cls = step.__class__
|
|
991
|
+
if hasattr(step_cls, "model_fields"):
|
|
992
|
+
field = step_cls.model_fields.get("selected_nodes")
|
|
993
|
+
if field is not None:
|
|
994
|
+
enum_cls = step_cls._extract_enum_class(field.annotation)
|
|
995
|
+
if enum_cls is None:
|
|
996
|
+
enum_cls = Enum
|
|
997
|
+
selected_nodes = [
|
|
998
|
+
str(_normalize_enum_value(selected_node, enum_cls))
|
|
999
|
+
for selected_node in step.selected_nodes or []
|
|
1000
|
+
if selected_node
|
|
1001
|
+
]
|
|
1002
|
+
return selected_nodes
|
|
1052
1003
|
|
|
1053
1004
|
|
|
1054
1005
|
def _max_confidence(
|
{openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/prompt/classifier.jinja
RENAMED
|
@@ -3,7 +3,6 @@ You are a taxonomy classification assistant.
|
|
|
3
3
|
Instructions:
|
|
4
4
|
- Review the text and select all matching taxonomy nodes from the list.
|
|
5
5
|
- Populate selected_nodes as a list of taxonomy node ids for multi-class matches.
|
|
6
|
-
- Use selected_node when a single best match is appropriate.
|
|
7
6
|
- Provide a confidence score between 0 and 1 for the selections; higher means more certain.
|
|
8
7
|
- Interpret confidence as:
|
|
9
8
|
- 0.90–1.00: explicit lexical match.
|
|
@@ -30,10 +29,10 @@ Instructions:
|
|
|
30
29
|
|
|
31
30
|
Current depth: {{ depth }}
|
|
32
31
|
|
|
33
|
-
Previous
|
|
34
|
-
{% if
|
|
35
|
-
{% for step in
|
|
36
|
-
- {{ step.
|
|
32
|
+
Previous steps:
|
|
33
|
+
{% if steps %}
|
|
34
|
+
{% for step in steps %}
|
|
35
|
+
- {{ step.selected_nodes | map('string') | join(', ') }} (confidence={{ step.confidence }}, stop_reason={{ step.stop_reason }})
|
|
37
36
|
{% endfor %}
|
|
38
37
|
{% else %}
|
|
39
38
|
- None
|
|
@@ -43,5 +42,5 @@ Candidate taxonomy nodes:
|
|
|
43
42
|
{% for node in taxonomy_nodes %}
|
|
44
43
|
- identifier: {{ node.identifier }}
|
|
45
44
|
label: {{ node.label }}
|
|
46
|
-
description: {{ node.
|
|
45
|
+
description: {{ node.computed_description }}
|
|
47
46
|
{% endfor %}
|
{openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/__init__.py
RENAMED
|
@@ -82,7 +82,8 @@ from .classification import (
|
|
|
82
82
|
ClassificationStopReason,
|
|
83
83
|
Taxonomy,
|
|
84
84
|
TaxonomyNode,
|
|
85
|
-
|
|
85
|
+
format_path_identifier,
|
|
86
|
+
split_path_identifier,
|
|
86
87
|
taxonomy_enum_path,
|
|
87
88
|
)
|
|
88
89
|
from .extraction import (
|
|
@@ -112,7 +113,8 @@ __all__ = [
|
|
|
112
113
|
"ClassificationStopReason",
|
|
113
114
|
"Taxonomy",
|
|
114
115
|
"TaxonomyNode",
|
|
115
|
-
"
|
|
116
|
+
"format_path_identifier",
|
|
117
|
+
"split_path_identifier",
|
|
116
118
|
"taxonomy_enum_path",
|
|
117
119
|
"TaskStructure",
|
|
118
120
|
"PlanStructure",
|
{openai_sdk_helpers-0.6.2 → openai_sdk_helpers-0.6.5}/src/openai_sdk_helpers/structure/base.py
RENAMED
|
@@ -174,6 +174,100 @@ def _ensure_schema_has_type(schema: dict[str, Any]) -> None:
|
|
|
174
174
|
schema.update(_build_any_value_schema())
|
|
175
175
|
|
|
176
176
|
|
|
177
|
+
def _strip_ref_types(
|
|
178
|
+
schema: dict[str, Any],
|
|
179
|
+
*,
|
|
180
|
+
nullable_fields: set[str] | None = None,
|
|
181
|
+
) -> None:
|
|
182
|
+
"""Remove type entries from enum $ref nodes when non-nullable.
|
|
183
|
+
|
|
184
|
+
Parameters
|
|
185
|
+
----------
|
|
186
|
+
schema : dict[str, Any]
|
|
187
|
+
Root schema to clean in place.
|
|
188
|
+
nullable_fields : set[str] | None, optional
|
|
189
|
+
Field names that should remain nullable. Defaults to None.
|
|
190
|
+
"""
|
|
191
|
+
field_names = nullable_fields or set()
|
|
192
|
+
|
|
193
|
+
def _resolve_ref(ref: str) -> dict[str, Any] | None:
|
|
194
|
+
if not ref.startswith("#/"):
|
|
195
|
+
return None
|
|
196
|
+
pointer = ref.removeprefix("#/")
|
|
197
|
+
current: Any = schema
|
|
198
|
+
for part in pointer.split("/"):
|
|
199
|
+
if not isinstance(current, dict):
|
|
200
|
+
return None
|
|
201
|
+
current = current.get(part)
|
|
202
|
+
if isinstance(current, dict):
|
|
203
|
+
return current
|
|
204
|
+
return None
|
|
205
|
+
|
|
206
|
+
def _is_enum_ref(ref: str) -> bool:
|
|
207
|
+
ref_target = _resolve_ref(ref)
|
|
208
|
+
if not isinstance(ref_target, dict):
|
|
209
|
+
return False
|
|
210
|
+
return "enum" in ref_target
|
|
211
|
+
|
|
212
|
+
def _is_null_schema(entry: Any) -> bool:
|
|
213
|
+
if not isinstance(entry, dict):
|
|
214
|
+
return False
|
|
215
|
+
entry_type = entry.get("type")
|
|
216
|
+
if entry_type == "null":
|
|
217
|
+
return True
|
|
218
|
+
if isinstance(entry_type, list) and "null" in entry_type:
|
|
219
|
+
return True
|
|
220
|
+
return False
|
|
221
|
+
|
|
222
|
+
def _walk(
|
|
223
|
+
node: Any,
|
|
224
|
+
*,
|
|
225
|
+
nullable_context: bool = False,
|
|
226
|
+
nullable_property: bool = False,
|
|
227
|
+
) -> None:
|
|
228
|
+
if isinstance(node, dict):
|
|
229
|
+
ref = node.get("$ref")
|
|
230
|
+
if (
|
|
231
|
+
isinstance(ref, str)
|
|
232
|
+
and _is_enum_ref(ref)
|
|
233
|
+
and not nullable_context
|
|
234
|
+
and not nullable_property
|
|
235
|
+
):
|
|
236
|
+
node.pop("type", None)
|
|
237
|
+
for key, value in node.items():
|
|
238
|
+
if key == "anyOf" and isinstance(value, list):
|
|
239
|
+
anyof_nullable = any(_is_null_schema(entry) for entry in value)
|
|
240
|
+
for entry in value:
|
|
241
|
+
_walk(
|
|
242
|
+
entry,
|
|
243
|
+
nullable_context=anyof_nullable or nullable_context,
|
|
244
|
+
nullable_property=nullable_property,
|
|
245
|
+
)
|
|
246
|
+
continue
|
|
247
|
+
if key == "properties" and isinstance(value, dict):
|
|
248
|
+
for prop_name, prop_schema in value.items():
|
|
249
|
+
_walk(
|
|
250
|
+
prop_schema,
|
|
251
|
+
nullable_context=nullable_context,
|
|
252
|
+
nullable_property=prop_name in field_names,
|
|
253
|
+
)
|
|
254
|
+
continue
|
|
255
|
+
_walk(
|
|
256
|
+
value,
|
|
257
|
+
nullable_context=nullable_context,
|
|
258
|
+
nullable_property=nullable_property,
|
|
259
|
+
)
|
|
260
|
+
elif isinstance(node, list):
|
|
261
|
+
for item in node:
|
|
262
|
+
_walk(
|
|
263
|
+
item,
|
|
264
|
+
nullable_context=nullable_context,
|
|
265
|
+
nullable_property=nullable_property,
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
_walk(schema)
|
|
269
|
+
|
|
270
|
+
|
|
177
271
|
def _hydrate_ref_types(schema: dict[str, Any]) -> None:
|
|
178
272
|
"""Attach explicit types to $ref nodes when available.
|
|
179
273
|
|
|
@@ -556,17 +650,18 @@ class StructureBase(BaseModelJSONSerializable):
|
|
|
556
650
|
|
|
557
651
|
cleaned_schema = cast(dict[str, Any], clean_refs(schema))
|
|
558
652
|
|
|
559
|
-
cleaned_schema = cast(dict[str, Any], cleaned_schema)
|
|
560
|
-
_hydrate_ref_types(cleaned_schema)
|
|
561
|
-
_ensure_items_have_schema(cleaned_schema)
|
|
562
|
-
_ensure_schema_has_type(cleaned_schema)
|
|
563
|
-
|
|
564
653
|
nullable_fields = {
|
|
565
654
|
name
|
|
566
655
|
for name, model_field in getattr(cls, "model_fields", {}).items()
|
|
567
656
|
if getattr(model_field, "default", inspect.Signature.empty) is None
|
|
568
657
|
}
|
|
569
658
|
|
|
659
|
+
cleaned_schema = cast(dict[str, Any], cleaned_schema)
|
|
660
|
+
_hydrate_ref_types(cleaned_schema)
|
|
661
|
+
_ensure_items_have_schema(cleaned_schema)
|
|
662
|
+
_ensure_schema_has_type(cleaned_schema)
|
|
663
|
+
_strip_ref_types(cleaned_schema, nullable_fields=nullable_fields)
|
|
664
|
+
|
|
570
665
|
properties = cleaned_schema.get("properties", {})
|
|
571
666
|
if isinstance(properties, dict) and nullable_fields:
|
|
572
667
|
for field_name in nullable_fields:
|