openai-sdk-helpers 0.6.1__py3-none-any.whl → 0.6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_sdk_helpers/agent/__init__.py +2 -0
- openai_sdk_helpers/agent/base.py +75 -7
- openai_sdk_helpers/agent/classifier.py +284 -102
- openai_sdk_helpers/agent/configuration.py +42 -0
- openai_sdk_helpers/agent/files.py +120 -0
- openai_sdk_helpers/agent/runner.py +9 -9
- openai_sdk_helpers/agent/translator.py +2 -2
- openai_sdk_helpers/files_api.py +46 -1
- openai_sdk_helpers/prompt/classifier.jinja +25 -10
- openai_sdk_helpers/structure/__init__.py +8 -2
- openai_sdk_helpers/structure/classification.py +240 -85
- {openai_sdk_helpers-0.6.1.dist-info → openai_sdk_helpers-0.6.4.dist-info}/METADATA +1 -1
- {openai_sdk_helpers-0.6.1.dist-info → openai_sdk_helpers-0.6.4.dist-info}/RECORD +16 -15
- {openai_sdk_helpers-0.6.1.dist-info → openai_sdk_helpers-0.6.4.dist-info}/WHEEL +0 -0
- {openai_sdk_helpers-0.6.1.dist-info → openai_sdk_helpers-0.6.4.dist-info}/entry_points.txt +0 -0
- {openai_sdk_helpers-0.6.1.dist-info → openai_sdk_helpers-0.6.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -13,6 +13,7 @@ from ..utils.json.data_class import DataclassJSONSerializable
|
|
|
13
13
|
from ..utils.registry import RegistryBase
|
|
14
14
|
from ..utils.instructions import resolve_instructions_from_path
|
|
15
15
|
from ..structure.base import StructureBase
|
|
16
|
+
from ..settings import OpenAISettings
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
class AgentRegistry(RegistryBase["AgentConfiguration"]):
|
|
@@ -152,6 +153,8 @@ class AgentConfiguration(DataclassJSONSerializable):
|
|
|
152
153
|
Resolve the prompt template path for this configuration.
|
|
153
154
|
gen_agent(run_context_wrapper)
|
|
154
155
|
Create a AgentBase instance from this configuration.
|
|
156
|
+
to_openai_settings(dotenv_path=None, **overrides)
|
|
157
|
+
Build OpenAISettings using this configuration as defaults.
|
|
155
158
|
replace(**changes)
|
|
156
159
|
Create a new AgentConfiguration with specified fields replaced.
|
|
157
160
|
to_json()
|
|
@@ -272,6 +275,45 @@ class AgentConfiguration(DataclassJSONSerializable):
|
|
|
272
275
|
"""Resolve instructions from string or file path."""
|
|
273
276
|
return resolve_instructions_from_path(self.instructions)
|
|
274
277
|
|
|
278
|
+
def to_openai_settings(
|
|
279
|
+
self, *, dotenv_path: Path | None = None, **overrides: Any
|
|
280
|
+
) -> OpenAISettings:
|
|
281
|
+
"""Build OpenAI settings using this configuration as defaults.
|
|
282
|
+
|
|
283
|
+
Parameters
|
|
284
|
+
----------
|
|
285
|
+
dotenv_path : Path or None, optional
|
|
286
|
+
Optional dotenv file path for loading environment variables.
|
|
287
|
+
overrides : Any
|
|
288
|
+
Keyword overrides applied on top of environment values. Use this
|
|
289
|
+
to supply API credentials and override defaults.
|
|
290
|
+
|
|
291
|
+
Returns
|
|
292
|
+
-------
|
|
293
|
+
OpenAISettings
|
|
294
|
+
OpenAI settings instance with defaults derived from this
|
|
295
|
+
configuration.
|
|
296
|
+
|
|
297
|
+
Raises
|
|
298
|
+
------
|
|
299
|
+
ValueError
|
|
300
|
+
If no API key is supplied via overrides or environment variables.
|
|
301
|
+
|
|
302
|
+
Examples
|
|
303
|
+
--------
|
|
304
|
+
>>> configuration = AgentConfiguration(
|
|
305
|
+
... name="summarizer",
|
|
306
|
+
... instructions="Summarize text",
|
|
307
|
+
... model="gpt-4o-mini",
|
|
308
|
+
... )
|
|
309
|
+
>>> settings = configuration.to_openai_settings(api_key="sk-...")
|
|
310
|
+
>>> # Or rely on environment variables like OPENAI_API_KEY
|
|
311
|
+
>>> settings = configuration.to_openai_settings()
|
|
312
|
+
"""
|
|
313
|
+
if self.model and "default_model" not in overrides:
|
|
314
|
+
overrides["default_model"] = self.model
|
|
315
|
+
return OpenAISettings.from_env(dotenv_path=dotenv_path, **overrides)
|
|
316
|
+
|
|
275
317
|
def resolve_prompt_path(self, prompt_dir: Path | None = None) -> Path | None:
|
|
276
318
|
"""Resolve the prompt template path for this configuration.
|
|
277
319
|
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
"""File attachment helpers for the Agents SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Literal
|
|
6
|
+
|
|
7
|
+
from ..files_api import FilePurpose, FilesAPIManager
|
|
8
|
+
from ..settings import OpenAISettings
|
|
9
|
+
from ..utils import create_image_data_url, ensure_list, is_image_file
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def build_agent_input_messages(
|
|
13
|
+
content: str | list[str],
|
|
14
|
+
files: str | list[str] | None = None,
|
|
15
|
+
*,
|
|
16
|
+
files_manager: FilesAPIManager | None = None,
|
|
17
|
+
openai_settings: OpenAISettings | None = None,
|
|
18
|
+
file_purpose: FilePurpose = "user_data",
|
|
19
|
+
image_detail: Literal["low", "high", "auto"] = "auto",
|
|
20
|
+
) -> list[dict[str, Any]]:
|
|
21
|
+
"""Build Agents SDK input messages with file attachments.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
content : str or list[str]
|
|
26
|
+
Prompt text or list of prompt texts to send.
|
|
27
|
+
files : str, list[str], or None, default None
|
|
28
|
+
Optional file path or list of file paths. Image files are sent as
|
|
29
|
+
base64-encoded ``input_image`` entries. Document files are uploaded
|
|
30
|
+
using ``files_manager`` and sent as ``input_file`` entries.
|
|
31
|
+
files_manager : FilesAPIManager or None, default None
|
|
32
|
+
File upload helper used to create file IDs for document uploads.
|
|
33
|
+
Required when ``files`` contains non-image documents.
|
|
34
|
+
openai_settings : OpenAISettings or None, default None
|
|
35
|
+
Optional OpenAI settings used to build a FilesAPIManager when one is
|
|
36
|
+
not provided. When supplied, ``openai_settings.create_client()`` is
|
|
37
|
+
used to initialize the Files API manager.
|
|
38
|
+
file_purpose : FilePurpose, default "user_data"
|
|
39
|
+
Purpose passed to the Files API when uploading document files.
|
|
40
|
+
image_detail : {"low", "high", "auto"}, default "auto"
|
|
41
|
+
Detail hint passed along with base64-encoded image inputs.
|
|
42
|
+
|
|
43
|
+
Returns
|
|
44
|
+
-------
|
|
45
|
+
list[dict[str, Any]]
|
|
46
|
+
Agents SDK input messages that include text and optional file entries.
|
|
47
|
+
|
|
48
|
+
Raises
|
|
49
|
+
------
|
|
50
|
+
ValueError
|
|
51
|
+
If document files are provided without a ``files_manager``.
|
|
52
|
+
|
|
53
|
+
Examples
|
|
54
|
+
--------
|
|
55
|
+
>>> from openai import OpenAI
|
|
56
|
+
>>> from openai_sdk_helpers.files_api import FilesAPIManager
|
|
57
|
+
>>> from openai_sdk_helpers.agent.files import build_agent_input_messages
|
|
58
|
+
>>> client = OpenAI()
|
|
59
|
+
>>> files_manager = FilesAPIManager(client)
|
|
60
|
+
>>> messages = build_agent_input_messages(
|
|
61
|
+
... "Summarize this document",
|
|
62
|
+
... files="report.pdf",
|
|
63
|
+
... files_manager=files_manager,
|
|
64
|
+
... )
|
|
65
|
+
"""
|
|
66
|
+
contents = ensure_list(content)
|
|
67
|
+
all_files = ensure_list(files)
|
|
68
|
+
|
|
69
|
+
image_files: list[str] = []
|
|
70
|
+
document_files: list[str] = []
|
|
71
|
+
for file_path in all_files:
|
|
72
|
+
if is_image_file(file_path):
|
|
73
|
+
image_files.append(file_path)
|
|
74
|
+
else:
|
|
75
|
+
document_files.append(file_path)
|
|
76
|
+
|
|
77
|
+
attachments: list[dict[str, Any]] = []
|
|
78
|
+
|
|
79
|
+
if document_files:
|
|
80
|
+
if files_manager is None and openai_settings is not None:
|
|
81
|
+
files_manager = FilesAPIManager(openai_settings.create_client())
|
|
82
|
+
if files_manager is None:
|
|
83
|
+
raise ValueError(
|
|
84
|
+
"files_manager is required to upload document files for agent input."
|
|
85
|
+
)
|
|
86
|
+
expires_after = 86400 if file_purpose == "user_data" else None
|
|
87
|
+
if hasattr(files_manager, "batch_upload"):
|
|
88
|
+
uploaded_files = files_manager.batch_upload(
|
|
89
|
+
document_files,
|
|
90
|
+
purpose=file_purpose,
|
|
91
|
+
expires_after=expires_after,
|
|
92
|
+
)
|
|
93
|
+
else:
|
|
94
|
+
uploaded_files = [
|
|
95
|
+
files_manager.create(
|
|
96
|
+
file_path, purpose=file_purpose, expires_after=expires_after
|
|
97
|
+
)
|
|
98
|
+
for file_path in document_files
|
|
99
|
+
]
|
|
100
|
+
for uploaded_file in uploaded_files:
|
|
101
|
+
attachments.append({"type": "input_file", "file_id": uploaded_file.id})
|
|
102
|
+
|
|
103
|
+
for image_path in image_files:
|
|
104
|
+
image_url, detail = create_image_data_url(image_path, detail=image_detail)
|
|
105
|
+
attachments.append(
|
|
106
|
+
{"type": "input_image", "image_url": image_url, "detail": detail}
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
messages: list[dict[str, Any]] = []
|
|
110
|
+
for index, raw_content in enumerate(contents):
|
|
111
|
+
text = raw_content.strip()
|
|
112
|
+
content_items: list[dict[str, Any]] = [{"type": "input_text", "text": text}]
|
|
113
|
+
if index == 0:
|
|
114
|
+
content_items.extend(attachments)
|
|
115
|
+
messages.append({"role": "user", "content": content_items})
|
|
116
|
+
|
|
117
|
+
return messages
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
__all__ = ["build_agent_input_messages"]
|
|
@@ -7,7 +7,7 @@ signatures whether they need asynchronous or synchronous results.
|
|
|
7
7
|
|
|
8
8
|
from __future__ import annotations
|
|
9
9
|
|
|
10
|
-
from typing import Any, Dict, Optional
|
|
10
|
+
from typing import Any, Dict, Optional, cast
|
|
11
11
|
|
|
12
12
|
from agents import Agent, RunResult, Runner, Session
|
|
13
13
|
|
|
@@ -17,7 +17,7 @@ from ..structure.base import StructureBase
|
|
|
17
17
|
|
|
18
18
|
async def run_async(
|
|
19
19
|
agent: Agent,
|
|
20
|
-
input: str,
|
|
20
|
+
input: str | list[dict[str, Any]],
|
|
21
21
|
*,
|
|
22
22
|
context: Optional[Dict[str, Any]] = None,
|
|
23
23
|
output_structure: Optional[type[StructureBase]] = None,
|
|
@@ -29,8 +29,8 @@ async def run_async(
|
|
|
29
29
|
----------
|
|
30
30
|
agent : Agent
|
|
31
31
|
Configured agent instance to execute.
|
|
32
|
-
input : str
|
|
33
|
-
Prompt or
|
|
32
|
+
input : str or list[dict[str, Any]]
|
|
33
|
+
Prompt text or structured input for the agent.
|
|
34
34
|
context : dict or None, default=None
|
|
35
35
|
Optional context dictionary passed to the agent.
|
|
36
36
|
output_structure : type[StructureBase] or None, default=None
|
|
@@ -53,7 +53,7 @@ async def run_async(
|
|
|
53
53
|
... return result
|
|
54
54
|
>>> asyncio.run(example()) # doctest: +SKIP
|
|
55
55
|
"""
|
|
56
|
-
result = await Runner.run(agent, input, context=context, session=session)
|
|
56
|
+
result = await Runner.run(agent, cast(Any, input), context=context, session=session)
|
|
57
57
|
if output_structure is not None:
|
|
58
58
|
return result.final_output_as(output_structure)
|
|
59
59
|
return result
|
|
@@ -61,7 +61,7 @@ async def run_async(
|
|
|
61
61
|
|
|
62
62
|
def run_sync(
|
|
63
63
|
agent: Agent,
|
|
64
|
-
input: str,
|
|
64
|
+
input: str | list[dict[str, Any]],
|
|
65
65
|
*,
|
|
66
66
|
context: Optional[Dict[str, Any]] = None,
|
|
67
67
|
output_structure: Optional[type[StructureBase]] = None,
|
|
@@ -77,8 +77,8 @@ def run_sync(
|
|
|
77
77
|
----------
|
|
78
78
|
agent : Agent
|
|
79
79
|
Configured agent instance to execute.
|
|
80
|
-
input : str
|
|
81
|
-
Prompt or
|
|
80
|
+
input : str or list[dict[str, Any]]
|
|
81
|
+
Prompt text or structured input for the agent.
|
|
82
82
|
context : dict or None, default=None
|
|
83
83
|
Optional context dictionary passed to the agent.
|
|
84
84
|
output_structure : type[StructureBase] or None, default=None
|
|
@@ -102,7 +102,7 @@ def run_sync(
|
|
|
102
102
|
>>> agent = Agent(name="test", instructions="test", model="gpt-4o-mini")
|
|
103
103
|
>>> result = run_sync(agent, "What is 2+2?") # doctest: +SKIP
|
|
104
104
|
"""
|
|
105
|
-
coro = Runner.run(agent, input, context=context, session=session)
|
|
105
|
+
coro = Runner.run(agent, cast(Any, input), context=context, session=session)
|
|
106
106
|
result: RunResult = run_coroutine_with_fallback(coro)
|
|
107
107
|
if output_structure is not None:
|
|
108
108
|
return result.final_output_as(output_structure)
|
|
@@ -138,7 +138,7 @@ class TranslatorAgent(AgentBase):
|
|
|
138
138
|
|
|
139
139
|
def run_sync(
|
|
140
140
|
self,
|
|
141
|
-
input: str,
|
|
141
|
+
input: str | list[dict[str, Any]],
|
|
142
142
|
*,
|
|
143
143
|
context: Optional[Dict[str, Any]] = None,
|
|
144
144
|
output_structure: Optional[type[StructureBase]] = None,
|
|
@@ -149,7 +149,7 @@ class TranslatorAgent(AgentBase):
|
|
|
149
149
|
|
|
150
150
|
Parameters
|
|
151
151
|
----------
|
|
152
|
-
input : str
|
|
152
|
+
input : str or list[dict[str, Any]]
|
|
153
153
|
Source content to translate.
|
|
154
154
|
context : dict or None, default=None
|
|
155
155
|
Additional context values to merge into the prompt.
|
openai_sdk_helpers/files_api.py
CHANGED
|
@@ -12,7 +12,7 @@ from __future__ import annotations
|
|
|
12
12
|
|
|
13
13
|
import logging
|
|
14
14
|
from pathlib import Path
|
|
15
|
-
from typing import Any, BinaryIO, Literal, cast
|
|
15
|
+
from typing import Any, BinaryIO, Literal, Sequence, cast
|
|
16
16
|
|
|
17
17
|
from openai import OpenAI, NOT_GIVEN
|
|
18
18
|
from openai.types import FileDeleted, FileObject
|
|
@@ -62,6 +62,8 @@ class FilesAPIManager:
|
|
|
62
62
|
Delete a specific file.
|
|
63
63
|
retrieve_content(file_id)
|
|
64
64
|
Download file content.
|
|
65
|
+
batch_upload(files, purpose, track, expires_after)
|
|
66
|
+
Upload multiple files to the Files API.
|
|
65
67
|
cleanup()
|
|
66
68
|
Delete all tracked files.
|
|
67
69
|
|
|
@@ -350,6 +352,49 @@ class FilesAPIManager:
|
|
|
350
352
|
"""
|
|
351
353
|
return self._client.files.content(file_id).read()
|
|
352
354
|
|
|
355
|
+
def batch_upload(
|
|
356
|
+
self,
|
|
357
|
+
files: Sequence[BinaryIO | Path | str],
|
|
358
|
+
purpose: FilePurpose,
|
|
359
|
+
track: bool | None = None,
|
|
360
|
+
expires_after: int | None = None,
|
|
361
|
+
) -> list[FileObject]:
|
|
362
|
+
"""Upload multiple files to the OpenAI Files API.
|
|
363
|
+
|
|
364
|
+
Parameters
|
|
365
|
+
----------
|
|
366
|
+
files : Sequence[BinaryIO | Path | str]
|
|
367
|
+
File-like objects or file paths to upload.
|
|
368
|
+
purpose : FilePurpose
|
|
369
|
+
The intended purpose of the uploaded files.
|
|
370
|
+
track : bool or None, default None
|
|
371
|
+
Override auto_track for these uploads. If None, uses instance setting.
|
|
372
|
+
expires_after : int or None, default None
|
|
373
|
+
Number of seconds after which files expire. See ``create`` for details.
|
|
374
|
+
|
|
375
|
+
Returns
|
|
376
|
+
-------
|
|
377
|
+
list[FileObject]
|
|
378
|
+
Uploaded file objects in the same order as ``files``.
|
|
379
|
+
|
|
380
|
+
Examples
|
|
381
|
+
--------
|
|
382
|
+
>>> files = ["doc1.pdf", "doc2.pdf"]
|
|
383
|
+
>>> uploaded = manager.batch_upload(files, purpose="user_data")
|
|
384
|
+
>>> [file.id for file in uploaded]
|
|
385
|
+
"""
|
|
386
|
+
if not files:
|
|
387
|
+
return []
|
|
388
|
+
return [
|
|
389
|
+
self.create(
|
|
390
|
+
file_path,
|
|
391
|
+
purpose=purpose,
|
|
392
|
+
track=track,
|
|
393
|
+
expires_after=expires_after,
|
|
394
|
+
)
|
|
395
|
+
for file_path in files
|
|
396
|
+
]
|
|
397
|
+
|
|
353
398
|
def cleanup(self) -> dict[str, bool]:
|
|
354
399
|
"""Delete all tracked files.
|
|
355
400
|
|
|
@@ -3,21 +3,36 @@ You are a taxonomy classification assistant.
|
|
|
3
3
|
Instructions:
|
|
4
4
|
- Review the text and select all matching taxonomy nodes from the list.
|
|
5
5
|
- Populate selected_nodes as a list of taxonomy node ids for multi-class matches.
|
|
6
|
-
- Use selected_node when a single best match is appropriate.
|
|
7
6
|
- Provide a confidence score between 0 and 1 for the selections; higher means more certain.
|
|
7
|
+
- Interpret confidence as:
|
|
8
|
+
- 0.90–1.00: explicit lexical match.
|
|
9
|
+
- 0.70–0.89: strong semantic alignment.
|
|
10
|
+
- 0.40–0.69: weak or ambiguous alignment.
|
|
11
|
+
- <0.40: low-confidence inference.
|
|
8
12
|
- Use only taxonomy identifiers from the candidate list for any selections.
|
|
9
13
|
- Use the stop_reason enum values only: "continue", "stop", "no_match", "max_depth", "no_children".
|
|
10
|
-
-
|
|
11
|
-
-
|
|
12
|
-
-
|
|
13
|
-
-
|
|
14
|
+
- Stop reason semantics:
|
|
15
|
+
- continue: valid match exists and deeper traversal is required.
|
|
16
|
+
- stop: low confidence, terminate to avoid false precision.
|
|
17
|
+
- no_match: no semantic fit in candidates.
|
|
18
|
+
- max_depth: taxonomy depth limit reached.
|
|
19
|
+
- no_children: matched node has no children.
|
|
20
|
+
- Decision mapping:
|
|
21
|
+
- High or medium confidence with children available: continue.
|
|
22
|
+
- High confidence with terminal node: no_children.
|
|
23
|
+
- Low confidence match: stop.
|
|
24
|
+
- No semantic alignment: no_match.
|
|
25
|
+
- Depth limit reached: max_depth.
|
|
26
|
+
- Provide a concise rationale in one sentence.
|
|
27
|
+
- Keep rationale evidence-based and avoid restating taxonomy labels.
|
|
28
|
+
- Avoid verbosity, speculation, stylistic language, narrative explanation, redundancy, or creativity.
|
|
14
29
|
|
|
15
30
|
Current depth: {{ depth }}
|
|
16
31
|
|
|
17
|
-
Previous
|
|
18
|
-
{% if
|
|
19
|
-
{% for step in
|
|
20
|
-
- {{ step.
|
|
32
|
+
Previous steps:
|
|
33
|
+
{% if steps %}
|
|
34
|
+
{% for step in steps %}
|
|
35
|
+
- {{ step.selected_nodes | map('string') | join(', ') }} (confidence={{ step.confidence }}, stop_reason={{ step.stop_reason }})
|
|
21
36
|
{% endfor %}
|
|
22
37
|
{% else %}
|
|
23
38
|
- None
|
|
@@ -27,5 +42,5 @@ Candidate taxonomy nodes:
|
|
|
27
42
|
{% for node in taxonomy_nodes %}
|
|
28
43
|
- identifier: {{ node.identifier }}
|
|
29
44
|
label: {{ node.label }}
|
|
30
|
-
description: {{ node.
|
|
45
|
+
description: {{ node.computed_description }}
|
|
31
46
|
{% endfor %}
|
|
@@ -80,8 +80,11 @@ from .classification import (
|
|
|
80
80
|
ClassificationResult,
|
|
81
81
|
ClassificationStep,
|
|
82
82
|
ClassificationStopReason,
|
|
83
|
+
Taxonomy,
|
|
83
84
|
TaxonomyNode,
|
|
84
|
-
|
|
85
|
+
format_path_identifier,
|
|
86
|
+
split_path_identifier,
|
|
87
|
+
taxonomy_enum_path,
|
|
85
88
|
)
|
|
86
89
|
from .extraction import (
|
|
87
90
|
AnnotatedDocumentStructure,
|
|
@@ -108,8 +111,11 @@ __all__ = [
|
|
|
108
111
|
"ClassificationResult",
|
|
109
112
|
"ClassificationStep",
|
|
110
113
|
"ClassificationStopReason",
|
|
114
|
+
"Taxonomy",
|
|
111
115
|
"TaxonomyNode",
|
|
112
|
-
"
|
|
116
|
+
"format_path_identifier",
|
|
117
|
+
"split_path_identifier",
|
|
118
|
+
"taxonomy_enum_path",
|
|
113
119
|
"TaskStructure",
|
|
114
120
|
"PlanStructure",
|
|
115
121
|
"create_plan",
|