openai-sdk-helpers 0.6.1__tar.gz → 0.6.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/PKG-INFO +1 -1
  2. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/pyproject.toml +1 -1
  3. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/__init__.py +2 -0
  4. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/base.py +75 -7
  5. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/classifier.py +244 -13
  6. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/configuration.py +42 -0
  7. openai_sdk_helpers-0.6.2/src/openai_sdk_helpers/agent/files.py +120 -0
  8. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/runner.py +9 -9
  9. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/translator.py +2 -2
  10. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/files_api.py +46 -1
  11. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/classifier.jinja +20 -4
  12. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/__init__.py +4 -0
  13. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/classification.py +74 -0
  14. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/.gitignore +0 -0
  15. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/LICENSE +0 -0
  16. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/README.md +0 -0
  17. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/__init__.py +0 -0
  18. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/coordinator.py +0 -0
  19. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/search/__init__.py +0 -0
  20. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/search/base.py +0 -0
  21. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/search/vector.py +0 -0
  22. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/search/web.py +0 -0
  23. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/summarizer.py +0 -0
  24. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/utils.py +0 -0
  25. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/agent/validator.py +0 -0
  26. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/cli.py +0 -0
  27. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/enums/__init__.py +0 -0
  28. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/enums/base.py +0 -0
  29. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/environment.py +0 -0
  30. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/errors.py +0 -0
  31. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/extract/__init__.py +0 -0
  32. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/extract/extractor.py +0 -0
  33. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/extract/generator.py +0 -0
  34. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/logging.py +0 -0
  35. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/__init__.py +0 -0
  36. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/base.py +0 -0
  37. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/extractor_config_agent_instructions.jinja +0 -0
  38. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/extractor_config_generator.jinja +0 -0
  39. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/extractor_config_generator_instructions.jinja +0 -0
  40. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/extractor_prompt_optimizer_agent_instructions.jinja +0 -0
  41. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/extractor_prompt_optimizer_request.jinja +0 -0
  42. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/summarizer.jinja +0 -0
  43. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/translator.jinja +0 -0
  44. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/validator.jinja +0 -0
  45. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/vector_planner.jinja +0 -0
  46. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/vector_search.jinja +0 -0
  47. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/prompt/vector_writer.jinja +0 -0
  48. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/py.typed +0 -0
  49. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/response/__init__.py +0 -0
  50. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/response/base.py +0 -0
  51. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/response/configuration.py +0 -0
  52. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/response/files.py +0 -0
  53. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/response/messages.py +0 -0
  54. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/response/planner.py +0 -0
  55. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/response/prompter.py +0 -0
  56. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/response/runner.py +0 -0
  57. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/response/tool_call.py +0 -0
  58. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/response/vector_store.py +0 -0
  59. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/settings.py +0 -0
  60. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/streamlit_app/__init__.py +0 -0
  61. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/streamlit_app/app.py +0 -0
  62. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/streamlit_app/configuration.py +0 -0
  63. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/agent_blueprint.py +0 -0
  64. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/base.py +0 -0
  65. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/extraction.py +0 -0
  66. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/plan/__init__.py +0 -0
  67. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/plan/enum.py +0 -0
  68. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/plan/helpers.py +0 -0
  69. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/plan/plan.py +0 -0
  70. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/plan/task.py +0 -0
  71. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/plan/types.py +0 -0
  72. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/prompt.py +0 -0
  73. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/responses.py +0 -0
  74. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/summary.py +0 -0
  75. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/translation.py +0 -0
  76. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/validation.py +0 -0
  77. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/vector_search.py +0 -0
  78. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/structure/web_search.py +0 -0
  79. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/tools.py +0 -0
  80. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/types.py +0 -0
  81. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/__init__.py +0 -0
  82. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/async_utils.py +0 -0
  83. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/coercion.py +0 -0
  84. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/encoding.py +0 -0
  85. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/instructions.py +0 -0
  86. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/json/__init__.py +0 -0
  87. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/json/base_model.py +0 -0
  88. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/json/data_class.py +0 -0
  89. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/json/ref.py +0 -0
  90. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/json/utils.py +0 -0
  91. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/langextract.py +0 -0
  92. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/output_validation.py +0 -0
  93. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/path_utils.py +0 -0
  94. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/registry.py +0 -0
  95. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/utils/validation.py +0 -0
  96. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/vector_storage/__init__.py +0 -0
  97. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/vector_storage/cleanup.py +0 -0
  98. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/vector_storage/storage.py +0 -0
  99. {openai_sdk_helpers-0.6.1 → openai_sdk_helpers-0.6.2}/src/openai_sdk_helpers/vector_storage/types.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-sdk-helpers
3
- Version: 0.6.1
3
+ Version: 0.6.2
4
4
  Summary: Composable helpers for OpenAI SDK agents, prompts, and storage
5
5
  Author: openai-sdk-helpers maintainers
6
6
  License: MIT
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "openai-sdk-helpers"
3
- version = "0.6.1"
3
+ version = "0.6.2"
4
4
  requires-python = ">=3.10"
5
5
  readme = "README.md"
6
6
  description = "Composable helpers for OpenAI SDK agents, prompts, and storage"
@@ -14,6 +14,7 @@ from .validator import ValidatorAgent
14
14
  from .utils import run_coroutine_agent_sync
15
15
  from .search.vector import VectorAgentSearch
16
16
  from .search.web import WebAgentSearch
17
+ from .files import build_agent_input_messages
17
18
 
18
19
  __all__ = [
19
20
  "AgentBase",
@@ -34,4 +35,5 @@ __all__ = [
34
35
  "ValidatorAgent",
35
36
  "VectorAgentSearch",
36
37
  "WebAgentSearch",
38
+ "build_agent_input_messages",
37
39
  ]
@@ -6,7 +6,7 @@ import logging
6
6
  import traceback
7
7
  import uuid
8
8
  from pathlib import Path
9
- from typing import TYPE_CHECKING, Any, Dict, Optional, Protocol, cast
9
+ from typing import TYPE_CHECKING, Any, Dict, Literal, Optional, Protocol, cast
10
10
 
11
11
  from agents import Agent, Handoff, InputGuardrail, OutputGuardrail, Session
12
12
  from agents.model_settings import ModelSettings
@@ -33,6 +33,7 @@ from .runner import run_async, run_sync
33
33
  if TYPE_CHECKING:
34
34
  from ..settings import OpenAISettings
35
35
  from ..response.base import ResponseBase
36
+ from ..files_api import FilePurpose, FilesAPIManager
36
37
 
37
38
 
38
39
  class AgentConfigurationProtocol(Protocol):
@@ -184,6 +185,8 @@ class AgentBase(DataclassJSONSerializable):
184
185
  Return response tool handler and definition for Responses API use.
185
186
  build_response(openai_settings, data_path=None, tool_handlers=None, system_vector_store=None)
186
187
  Build a ResponseBase instance based on this agent.
188
+ build_input_messages(content, files=None, files_manager=None, file_purpose="user_data", image_detail="auto")
189
+ Build Agents SDK input messages with optional file attachments.
187
190
  save_error(exc)
188
191
  Persist error details to a file named with the agent UUID.
189
192
  close()
@@ -467,7 +470,7 @@ class AgentBase(DataclassJSONSerializable):
467
470
 
468
471
  async def run_async(
469
472
  self,
470
- input: str,
473
+ input: str | list[dict[str, Any]],
471
474
  *,
472
475
  context: Optional[Dict[str, Any]] = None,
473
476
  output_structure: Optional[type[StructureBase]] = None,
@@ -477,8 +480,8 @@ class AgentBase(DataclassJSONSerializable):
477
480
 
478
481
  Parameters
479
482
  ----------
480
- input : str
481
- Prompt or query for the agent.
483
+ input : str or list[dict[str, Any]]
484
+ Prompt text or structured input for the agent.
482
485
  context : dict or None, default=None
483
486
  Optional dictionary passed to the agent.
484
487
  output_structure : type[StructureBase] or None, default=None
@@ -522,7 +525,7 @@ class AgentBase(DataclassJSONSerializable):
522
525
 
523
526
  def run_sync(
524
527
  self,
525
- input: str,
528
+ input: str | list[dict[str, Any]],
526
529
  *,
527
530
  context: Optional[Dict[str, Any]] = None,
528
531
  output_structure: Optional[type[StructureBase]] = None,
@@ -532,8 +535,8 @@ class AgentBase(DataclassJSONSerializable):
532
535
 
533
536
  Parameters
534
537
  ----------
535
- input : str
536
- Prompt or query for the agent.
538
+ input : str or list[dict[str, Any]]
539
+ Prompt text or structured input for the agent.
537
540
  context : dict or None, default=None
538
541
  Optional dictionary passed to the agent.
539
542
  output_structure : type[StructureBase] or None, default=None
@@ -660,6 +663,71 @@ class AgentBase(DataclassJSONSerializable):
660
663
  openai_settings=openai_settings,
661
664
  )
662
665
 
666
+ @staticmethod
667
+ def build_input_messages(
668
+ content: str | list[str],
669
+ files: str | list[str] | None = None,
670
+ *,
671
+ files_manager: FilesAPIManager | None = None,
672
+ openai_settings: OpenAISettings | None = None,
673
+ file_purpose: FilePurpose = "user_data",
674
+ image_detail: Literal["low", "high", "auto"] = "auto",
675
+ ) -> list[dict[str, Any]]:
676
+ """Build Agents SDK input messages with file attachments.
677
+
678
+ Parameters
679
+ ----------
680
+ content : str or list[str]
681
+ Prompt text or list of prompt texts to send.
682
+ files : str, list[str], or None, default None
683
+ Optional file path or list of file paths. Image files are sent as
684
+ base64-encoded ``input_image`` entries. Document files are uploaded
685
+ using ``files_manager`` and sent as ``input_file`` entries.
686
+ files_manager : FilesAPIManager or None, default None
687
+ File upload helper used to create file IDs for document uploads.
688
+ Required when ``files`` contains non-image documents.
689
+ openai_settings : OpenAISettings or None, default None
690
+ Optional OpenAI settings used to build a FilesAPIManager when one is
691
+ not provided. When supplied, ``openai_settings.create_client()`` is
692
+ used to initialize the Files API manager.
693
+ file_purpose : FilePurpose, default "user_data"
694
+ Purpose passed to the Files API when uploading document files.
695
+ image_detail : {"low", "high", "auto"}, default "auto"
696
+ Detail hint passed along with base64-encoded image inputs.
697
+
698
+ Returns
699
+ -------
700
+ list[dict[str, Any]]
701
+ Agents SDK input messages that include text and optional file entries.
702
+
703
+ Raises
704
+ ------
705
+ ValueError
706
+ If document files are provided without a ``files_manager``.
707
+
708
+ Examples
709
+ --------
710
+ >>> from openai import OpenAI
711
+ >>> from openai_sdk_helpers.files_api import FilesAPIManager
712
+ >>> client = OpenAI()
713
+ >>> files_manager = FilesAPIManager(client)
714
+ >>> messages = AgentBase.build_input_messages(
715
+ ... "Summarize this document",
716
+ ... files="report.pdf",
717
+ ... files_manager=files_manager,
718
+ ... )
719
+ """
720
+ from .files import build_agent_input_messages
721
+
722
+ return build_agent_input_messages(
723
+ content=content,
724
+ files=files,
725
+ files_manager=files_manager,
726
+ openai_settings=openai_settings,
727
+ file_purpose=file_purpose,
728
+ image_detail=image_detail,
729
+ )
730
+
663
731
  def _build_response_parameters(self) -> dict[str, Any]:
664
732
  """Build the Responses API parameter schema for this agent tool.
665
733
 
@@ -3,6 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import asyncio
6
+ import threading
6
7
  import re
7
8
  from dataclasses import dataclass, field
8
9
  from enum import Enum
@@ -16,6 +17,7 @@ from ..structure import (
16
17
  StructureBase,
17
18
  TaxonomyNode,
18
19
  )
20
+ from ..utils import ensure_list
19
21
  from .base import AgentBase
20
22
  from .configuration import AgentConfiguration
21
23
 
@@ -32,8 +34,12 @@ class TaxonomyClassifierAgent(AgentBase):
32
34
 
33
35
  Methods
34
36
  -------
35
- run_agent(text, taxonomy, context, max_depth)
37
+ run_agent(text, taxonomy, context, max_depth, session)
36
38
  Classify text by recursively walking the taxonomy tree.
39
+ run_async(input, context, max_depth, confidence_threshold, single_class)
40
+ Classify text asynchronously using taxonomy traversal.
41
+ run_sync(input, context, max_depth, confidence_threshold, single_class)
42
+ Classify text synchronously using taxonomy traversal.
37
43
 
38
44
  Examples
39
45
  --------
@@ -93,9 +99,11 @@ class TaxonomyClassifierAgent(AgentBase):
93
99
  text: str,
94
100
  *,
95
101
  context: Optional[Dict[str, Any]] = None,
102
+ file_ids: str | Sequence[str] | None = None,
96
103
  max_depth: Optional[int] = None,
97
104
  confidence_threshold: float | None = None,
98
105
  single_class: bool = False,
106
+ session: Optional[Any] = None,
99
107
  ) -> ClassificationResult:
100
108
  """Classify ``text`` by recursively walking taxonomy levels.
101
109
 
@@ -105,12 +113,16 @@ class TaxonomyClassifierAgent(AgentBase):
105
113
  Source text to classify.
106
114
  context : dict or None, default=None
107
115
  Additional context values to merge into the prompt.
116
+ file_ids : str or Sequence[str] or None, default=None
117
+ Optional file IDs to attach to each classification step.
108
118
  max_depth : int or None, default=None
109
119
  Maximum depth to traverse before stopping.
110
120
  confidence_threshold : float or None, default=None
111
121
  Minimum confidence required to accept a classification step.
112
122
  single_class : bool, default=False
113
123
  Whether to keep only the highest-priority selection per step.
124
+ session : Session or None, default=None
125
+ Optional session for maintaining conversation history across runs.
114
126
 
115
127
  Returns
116
128
  -------
@@ -125,15 +137,18 @@ class TaxonomyClassifierAgent(AgentBase):
125
137
  True
126
138
  """
127
139
  state = _TraversalState()
140
+ input_payload = _build_input_payload(text, file_ids)
128
141
  await self._classify_nodes(
129
- text=text,
142
+ input_payload=input_payload,
130
143
  nodes=list(self._root_nodes),
131
144
  depth=0,
132
145
  parent_path=[],
133
146
  context=context,
147
+ file_ids=file_ids,
134
148
  max_depth=max_depth,
135
149
  confidence_threshold=confidence_threshold,
136
150
  single_class=single_class,
151
+ session=session,
137
152
  state=state,
138
153
  )
139
154
 
@@ -149,37 +164,210 @@ class TaxonomyClassifierAgent(AgentBase):
149
164
  path_nodes=state.path_nodes,
150
165
  )
151
166
 
167
+ async def run_async(
168
+ self,
169
+ input: str | list[dict[str, Any]],
170
+ *,
171
+ context: Optional[Dict[str, Any]] = None,
172
+ output_structure: Optional[type[StructureBase]] = None,
173
+ session: Optional[Any] = None,
174
+ file_ids: str | Sequence[str] | None = None,
175
+ max_depth: Optional[int] = None,
176
+ confidence_threshold: float | None = None,
177
+ single_class: bool = False,
178
+ ) -> ClassificationResult:
179
+ """Classify ``input`` asynchronously with taxonomy traversal.
180
+
181
+ Parameters
182
+ ----------
183
+ input : str or list[dict[str, Any]]
184
+ Source text to classify.
185
+ context : dict or None, default=None
186
+ Additional context values to merge into the prompt.
187
+ output_structure : type[StructureBase] or None, default=None
188
+ Unused in taxonomy traversal. Present for API compatibility.
189
+ session : Session or None, default=None
190
+ Optional session for maintaining conversation history across runs.
191
+ file_ids : str or Sequence[str] or None, default=None
192
+ Optional file IDs to attach to each classification step.
193
+ max_depth : int or None, default=None
194
+ Maximum depth to traverse before stopping.
195
+ confidence_threshold : float or None, default=None
196
+ Minimum confidence required to accept a classification step.
197
+ single_class : bool, default=False
198
+ Whether to keep only the highest-priority selection per step.
199
+
200
+ Returns
201
+ -------
202
+ ClassificationResult
203
+ Structured classification result describing the traversal.
204
+ """
205
+ _ = output_structure
206
+ if not isinstance(input, str):
207
+ msg = "TaxonomyClassifierAgent run_async requires text input."
208
+ raise TypeError(msg)
209
+ kwargs: Dict[str, Any] = {
210
+ "context": context,
211
+ "file_ids": file_ids,
212
+ "max_depth": max_depth,
213
+ "confidence_threshold": confidence_threshold,
214
+ "single_class": single_class,
215
+ }
216
+ if session is not None:
217
+ kwargs["session"] = session
218
+ return await self.run_agent(input, **kwargs)
219
+
220
+ def run_sync(
221
+ self,
222
+ input: str | list[dict[str, Any]],
223
+ *,
224
+ context: Optional[Dict[str, Any]] = None,
225
+ output_structure: Optional[type[StructureBase]] = None,
226
+ session: Optional[Any] = None,
227
+ file_ids: str | Sequence[str] | None = None,
228
+ max_depth: Optional[int] = None,
229
+ confidence_threshold: float | None = None,
230
+ single_class: bool = False,
231
+ ) -> ClassificationResult:
232
+ """Classify ``input`` synchronously with taxonomy traversal.
233
+
234
+ Parameters
235
+ ----------
236
+ input : str or list[dict[str, Any]]
237
+ Source text to classify.
238
+ context : dict or None, default=None
239
+ Additional context values to merge into the prompt.
240
+ output_structure : type[StructureBase] or None, default=None
241
+ Unused in taxonomy traversal. Present for API compatibility.
242
+ session : Session or None, default=None
243
+ Optional session for maintaining conversation history across runs.
244
+ file_ids : str or Sequence[str] or None, default=None
245
+ Optional file IDs to attach to each classification step.
246
+ max_depth : int or None, default=None
247
+ Maximum depth to traverse before stopping.
248
+ confidence_threshold : float or None, default=None
249
+ Minimum confidence required to accept a classification step.
250
+ single_class : bool, default=False
251
+ Whether to keep only the highest-priority selection per step.
252
+
253
+ Returns
254
+ -------
255
+ ClassificationResult
256
+ Structured classification result describing the traversal.
257
+ """
258
+ _ = output_structure
259
+ if not isinstance(input, str):
260
+ msg = "TaxonomyClassifierAgent run_sync requires text input."
261
+ raise TypeError(msg)
262
+ kwargs: Dict[str, Any] = {
263
+ "context": context,
264
+ "file_ids": file_ids,
265
+ "max_depth": max_depth,
266
+ "confidence_threshold": confidence_threshold,
267
+ "single_class": single_class,
268
+ }
269
+ if session is not None:
270
+ kwargs["session"] = session
271
+
272
+ async def runner() -> ClassificationResult:
273
+ return await self.run_agent(input, **kwargs)
274
+
275
+ try:
276
+ asyncio.get_running_loop()
277
+ except RuntimeError:
278
+ return asyncio.run(runner())
279
+
280
+ result: ClassificationResult | None = None
281
+ error: Exception | None = None
282
+
283
+ def _thread_func() -> None:
284
+ nonlocal error, result
285
+ try:
286
+ result = asyncio.run(runner())
287
+ except Exception as exc:
288
+ error = exc
289
+
290
+ thread = threading.Thread(target=_thread_func)
291
+ thread.start()
292
+ thread.join()
293
+
294
+ if error is not None:
295
+ raise error
296
+ if result is None:
297
+ msg = "Classification did not return a result"
298
+ raise RuntimeError(msg)
299
+ return result
300
+
301
+ async def _run_step_async(
302
+ self,
303
+ *,
304
+ input: str | list[dict[str, Any]],
305
+ context: Optional[Dict[str, Any]] = None,
306
+ output_structure: Optional[type[StructureBase]] = None,
307
+ session: Optional[Any] = None,
308
+ ) -> StructureBase:
309
+ """Execute a single classification step asynchronously.
310
+
311
+ Parameters
312
+ ----------
313
+ input : str or list[dict[str, Any]]
314
+ Prompt or structured input for the agent.
315
+ context : dict or None, default=None
316
+ Optional dictionary passed to the agent.
317
+ output_structure : type[StructureBase] or None, default=None
318
+ Optional type used to cast the final output.
319
+ session : Session or None, default=None
320
+ Optional session for maintaining conversation history across runs.
321
+
322
+ Returns
323
+ -------
324
+ StructureBase
325
+ Parsed result for the classification step.
326
+ """
327
+ return await super().run_async(
328
+ input=input,
329
+ context=context,
330
+ output_structure=output_structure,
331
+ session=session,
332
+ )
333
+
152
334
  async def _classify_nodes(
153
335
  self,
154
336
  *,
155
- text: str,
337
+ input_payload: str | list[dict[str, Any]],
156
338
  nodes: list[TaxonomyNode],
157
339
  depth: int,
158
340
  parent_path: list[str],
159
341
  context: Optional[Dict[str, Any]],
342
+ file_ids: str | Sequence[str] | None,
160
343
  max_depth: Optional[int],
161
344
  confidence_threshold: float | None,
162
345
  single_class: bool,
346
+ session: Optional[Any],
163
347
  state: "_TraversalState",
164
348
  ) -> None:
165
349
  """Classify a taxonomy level and recursively traverse children.
166
350
 
167
351
  Parameters
168
352
  ----------
169
- text : str
170
- Source text to classify.
353
+ input_payload : str or list[dict[str, Any]]
354
+ Input payload used to prompt the agent.
171
355
  nodes : list[TaxonomyNode]
172
356
  Candidate taxonomy nodes for the current level.
173
357
  depth : int
174
358
  Current traversal depth.
175
359
  context : dict or None
176
360
  Additional context values to merge into the prompt.
361
+ file_ids : str or Sequence[str] or None
362
+ Optional file IDs attached to each classification step.
177
363
  max_depth : int or None
178
364
  Maximum traversal depth before stopping.
179
365
  confidence_threshold : float or None
180
366
  Minimum confidence required to accept a classification step.
181
367
  single_class : bool
182
368
  Whether to keep only the highest-priority selection per step.
369
+ session : Session or None
370
+ Optional session for maintaining conversation history across runs.
183
371
  state : _TraversalState
184
372
  Aggregated traversal state.
185
373
  """
@@ -197,10 +385,11 @@ class TaxonomyClassifierAgent(AgentBase):
197
385
  context=context,
198
386
  )
199
387
  step_structure = _build_step_structure(list(node_paths.keys()))
200
- raw_step = await self.run_async(
201
- input=text,
388
+ raw_step = await self._run_step_async(
389
+ input=input_payload,
202
390
  context=template_context,
203
391
  output_structure=step_structure,
392
+ session=session,
204
393
  )
205
394
  step = _normalize_step_output(raw_step, step_structure)
206
395
  state.path.append(step)
@@ -242,14 +431,16 @@ class TaxonomyClassifierAgent(AgentBase):
242
431
  (
243
432
  self._classify_subtree(
244
433
  sub_agent=sub_agent,
245
- text=text,
434
+ input_payload=input_payload,
246
435
  nodes=list(node.children),
247
436
  depth=depth + 1,
248
437
  parent_path=[*parent_path, node.label],
249
438
  context=context,
439
+ file_ids=file_ids,
250
440
  max_depth=max_depth,
251
441
  confidence_threshold=confidence_threshold,
252
442
  single_class=single_class,
443
+ session=session,
253
444
  state=sub_state,
254
445
  ),
255
446
  base_final_nodes_len,
@@ -325,21 +516,23 @@ class TaxonomyClassifierAgent(AgentBase):
325
516
  model=self._model,
326
517
  taxonomy=list(nodes),
327
518
  )
328
- sub_agent.run_async = self.run_async
519
+ sub_agent._run_step_async = self._run_step_async
329
520
  return sub_agent
330
521
 
331
522
  async def _classify_subtree(
332
523
  self,
333
524
  *,
334
525
  sub_agent: "TaxonomyClassifierAgent",
335
- text: str,
526
+ input_payload: str | list[dict[str, Any]],
336
527
  nodes: list[TaxonomyNode],
337
528
  depth: int,
338
529
  parent_path: list[str],
339
530
  context: Optional[Dict[str, Any]],
531
+ file_ids: str | Sequence[str] | None,
340
532
  max_depth: Optional[int],
341
533
  confidence_threshold: float | None,
342
534
  single_class: bool,
535
+ session: Optional[Any],
343
536
  state: "_TraversalState",
344
537
  ) -> "_TraversalState":
345
538
  """Classify a taxonomy subtree and return the traversal state.
@@ -348,8 +541,8 @@ class TaxonomyClassifierAgent(AgentBase):
348
541
  ----------
349
542
  sub_agent : TaxonomyClassifierAgent
350
543
  Sub-agent configured for the subtree traversal.
351
- text : str
352
- Source text to classify.
544
+ input_payload : str or list[dict[str, Any]]
545
+ Input payload used to prompt the agent.
353
546
  nodes : list[TaxonomyNode]
354
547
  Candidate taxonomy nodes for the subtree.
355
548
  depth : int
@@ -358,12 +551,16 @@ class TaxonomyClassifierAgent(AgentBase):
358
551
  Path segments leading to the current subtree.
359
552
  context : dict or None
360
553
  Additional context values to merge into the prompt.
554
+ file_ids : str or Sequence[str] or None
555
+ Optional file IDs attached to each classification step.
361
556
  max_depth : int or None
362
557
  Maximum traversal depth before stopping.
363
558
  confidence_threshold : float or None
364
559
  Minimum confidence required to accept a classification step.
365
560
  single_class : bool
366
561
  Whether to keep only the highest-priority selection per step.
562
+ session : Session or None
563
+ Optional session for maintaining conversation history across runs.
367
564
  state : _TraversalState
368
565
  Traversal state to populate for the subtree.
369
566
 
@@ -373,14 +570,16 @@ class TaxonomyClassifierAgent(AgentBase):
373
570
  Populated traversal state for the subtree.
374
571
  """
375
572
  await sub_agent._classify_nodes(
376
- text=text,
573
+ input_payload=input_payload,
377
574
  nodes=nodes,
378
575
  depth=depth,
379
576
  parent_path=parent_path,
380
577
  context=context,
578
+ file_ids=file_ids,
381
579
  max_depth=max_depth,
382
580
  confidence_threshold=confidence_threshold,
383
581
  single_class=single_class,
582
+ session=session,
384
583
  state=state,
385
584
  )
386
585
  return state
@@ -716,6 +915,38 @@ def _normalize_step_output(
716
915
  return ClassificationStep.from_json(payload)
717
916
 
718
917
 
918
+ def _build_input_payload(
919
+ text: str,
920
+ file_ids: str | Sequence[str] | None,
921
+ ) -> str | list[dict[str, Any]]:
922
+ """Build input payloads with optional file attachments.
923
+
924
+ Parameters
925
+ ----------
926
+ text : str
927
+ Prompt text to send to the agent.
928
+ file_ids : str or Sequence[str] or None
929
+ Optional file IDs to include as ``input_file`` attachments.
930
+
931
+ Returns
932
+ -------
933
+ str or list[dict[str, Any]]
934
+ Input payload suitable for the Agents SDK.
935
+ """
936
+ normalized_file_ids = [file_id for file_id in ensure_list(file_ids) if file_id]
937
+ if not normalized_file_ids:
938
+ return text
939
+ attachments = [
940
+ {"type": "input_file", "file_id": file_id} for file_id in normalized_file_ids
941
+ ]
942
+ return [
943
+ {
944
+ "role": "user",
945
+ "content": [{"type": "input_text", "text": text}, *attachments],
946
+ }
947
+ ]
948
+
949
+
719
950
  def _extract_enum_fields(
720
951
  step_structure: type[StructureBase],
721
952
  ) -> dict[str, type[Enum]]:
@@ -13,6 +13,7 @@ from ..utils.json.data_class import DataclassJSONSerializable
13
13
  from ..utils.registry import RegistryBase
14
14
  from ..utils.instructions import resolve_instructions_from_path
15
15
  from ..structure.base import StructureBase
16
+ from ..settings import OpenAISettings
16
17
 
17
18
 
18
19
  class AgentRegistry(RegistryBase["AgentConfiguration"]):
@@ -152,6 +153,8 @@ class AgentConfiguration(DataclassJSONSerializable):
152
153
  Resolve the prompt template path for this configuration.
153
154
  gen_agent(run_context_wrapper)
154
155
  Create a AgentBase instance from this configuration.
156
+ to_openai_settings(dotenv_path=None, **overrides)
157
+ Build OpenAISettings using this configuration as defaults.
155
158
  replace(**changes)
156
159
  Create a new AgentConfiguration with specified fields replaced.
157
160
  to_json()
@@ -272,6 +275,45 @@ class AgentConfiguration(DataclassJSONSerializable):
272
275
  """Resolve instructions from string or file path."""
273
276
  return resolve_instructions_from_path(self.instructions)
274
277
 
278
+ def to_openai_settings(
279
+ self, *, dotenv_path: Path | None = None, **overrides: Any
280
+ ) -> OpenAISettings:
281
+ """Build OpenAI settings using this configuration as defaults.
282
+
283
+ Parameters
284
+ ----------
285
+ dotenv_path : Path or None, optional
286
+ Optional dotenv file path for loading environment variables.
287
+ overrides : Any
288
+ Keyword overrides applied on top of environment values. Use this
289
+ to supply API credentials and override defaults.
290
+
291
+ Returns
292
+ -------
293
+ OpenAISettings
294
+ OpenAI settings instance with defaults derived from this
295
+ configuration.
296
+
297
+ Raises
298
+ ------
299
+ ValueError
300
+ If no API key is supplied via overrides or environment variables.
301
+
302
+ Examples
303
+ --------
304
+ >>> configuration = AgentConfiguration(
305
+ ... name="summarizer",
306
+ ... instructions="Summarize text",
307
+ ... model="gpt-4o-mini",
308
+ ... )
309
+ >>> settings = configuration.to_openai_settings(api_key="sk-...")
310
+ >>> # Or rely on environment variables like OPENAI_API_KEY
311
+ >>> settings = configuration.to_openai_settings()
312
+ """
313
+ if self.model and "default_model" not in overrides:
314
+ overrides["default_model"] = self.model
315
+ return OpenAISettings.from_env(dotenv_path=dotenv_path, **overrides)
316
+
275
317
  def resolve_prompt_path(self, prompt_dir: Path | None = None) -> Path | None:
276
318
  """Resolve the prompt template path for this configuration.
277
319