llmframe 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. llmframe/__init__.py +5 -0
  2. llmframe/adapters/__init__.py +1 -0
  3. llmframe/adapters/input/__init__.py +1 -0
  4. llmframe/adapters/output/__init__.py +5 -0
  5. llmframe/adapters/output/llm/__init__.py +55 -0
  6. llmframe/adapters/output/llm/llm_adapter/__init__.py +18 -0
  7. llmframe/adapters/output/llm/llm_adapter/adapter.py +301 -0
  8. llmframe/adapters/output/llm/llm_adapter/dto.py +26 -0
  9. llmframe/adapters/output/llm/llm_adapter/exceptions.py +25 -0
  10. llmframe/adapters/output/llm/llm_adapter/logging_utils.py +27 -0
  11. llmframe/adapters/output/llm/llm_adapter/protocols.py +14 -0
  12. llmframe/adapters/output/llm/llm_adapter/response_parser.py +38 -0
  13. llmframe/adapters/output/llm/openai_adapter/__init__.py +47 -0
  14. llmframe/adapters/output/llm/openai_adapter/client.py +41 -0
  15. llmframe/adapters/output/llm/openai_adapter/dto.py +32 -0
  16. llmframe/adapters/output/llm/openai_adapter/parsing/__init__.py +6 -0
  17. llmframe/adapters/output/llm/openai_adapter/parsing/message_content.py +80 -0
  18. llmframe/adapters/output/llm/openai_adapter/parsing/usage.py +56 -0
  19. llmframe/adapters/output/llm/openai_adapter/transport/__init__.py +35 -0
  20. llmframe/adapters/output/llm/openai_adapter/transport/adapter.py +477 -0
  21. llmframe/adapters/output/llm/openai_adapter/transport/payload_builders.py +175 -0
  22. llmframe/adapters/output/llm/openai_adapter/transport/protocols.py +117 -0
  23. llmframe/adapters/output/llm/usage_tracker/__init__.py +6 -0
  24. llmframe/adapters/output/llm/usage_tracker/adapter.py +305 -0
  25. llmframe/adapters/output/llm/usage_tracker/dto.py +36 -0
  26. llmframe/adapters/output/persistence/__init__.py +5 -0
  27. llmframe/adapters/output/persistence/protocols.py +21 -0
  28. llmframe/application/__init__.py +1 -0
  29. llmframe/application/ports/__init__.py +1 -0
  30. llmframe/domain/__init__.py +1 -0
  31. llmframe/json_types.py +9 -0
  32. llmframe-0.1.0.dist-info/METADATA +108 -0
  33. llmframe-0.1.0.dist-info/RECORD +35 -0
  34. llmframe-0.1.0.dist-info/WHEEL +4 -0
  35. llmframe-0.1.0.dist-info/licenses/LICENSE +21 -0
llmframe/__init__.py ADDED
@@ -0,0 +1,5 @@
1
+ """llmframe package."""
2
+
3
+ from .adapters.output import llm
4
+
5
+ __all__ = ["llm"]
@@ -0,0 +1 @@
1
+ """Adapters layer for external integrations."""
@@ -0,0 +1 @@
1
+ """Driving adapters that invoke application use cases."""
@@ -0,0 +1,5 @@
1
+ """Driven adapters that implement output ports."""
2
+
3
+ from . import llm
4
+
5
+ __all__ = ["llm"]
@@ -0,0 +1,55 @@
1
+ """Public exports for shared LLM adapters."""
2
+
3
+ from .llm_adapter import (
4
+ LlmAdapter,
5
+ LlmTextCompletionResult,
6
+ StructuredLlmError,
7
+ StructuredLlmInvalidJsonError,
8
+ StructuredLlmJsonCompletionResult,
9
+ StructuredLlmResponseError,
10
+ )
11
+ from .openai_adapter import (
12
+ ChatCompletionJsonProtocol,
13
+ ChatCompletionStructuredProtocol,
14
+ ChatCompletionTextProtocol,
15
+ OpenAIClient,
16
+ OpenAIClientProtocol,
17
+ OpenAIClientSettings,
18
+ OpenAILlmProtocol,
19
+ OpenAIResponseError,
20
+ OpenAIResponseUsage,
21
+ ResponseJsonProtocol,
22
+ ResponseStructuredProtocol,
23
+ ResponseTextProtocol,
24
+ build_client,
25
+ extract_message_content,
26
+ extract_usage,
27
+ )
28
+ from .usage_tracker import LlmUsageSummary, LlmUsageTrackerConfig, OpenAILlmUsageTracker
29
+
30
+ __all__ = [
31
+ "ChatCompletionJsonProtocol",
32
+ "ChatCompletionStructuredProtocol",
33
+ "ChatCompletionTextProtocol",
34
+ "LlmAdapter",
35
+ "LlmTextCompletionResult",
36
+ "LlmUsageSummary",
37
+ "LlmUsageTrackerConfig",
38
+ "OpenAIClient",
39
+ "OpenAIClientProtocol",
40
+ "OpenAIClientSettings",
41
+ "OpenAILlmProtocol",
42
+ "OpenAILlmUsageTracker",
43
+ "OpenAIResponseError",
44
+ "OpenAIResponseUsage",
45
+ "ResponseJsonProtocol",
46
+ "ResponseStructuredProtocol",
47
+ "ResponseTextProtocol",
48
+ "StructuredLlmError",
49
+ "StructuredLlmInvalidJsonError",
50
+ "StructuredLlmJsonCompletionResult",
51
+ "StructuredLlmResponseError",
52
+ "build_client",
53
+ "extract_message_content",
54
+ "extract_usage",
55
+ ]
@@ -0,0 +1,18 @@
1
+ """Public exports for the shared LLM adapter."""
2
+
3
+ from .adapter import LlmAdapter
4
+ from .dto import LlmTextCompletionResult, StructuredLlmJsonCompletionResult
5
+ from .exceptions import (
6
+ StructuredLlmError,
7
+ StructuredLlmInvalidJsonError,
8
+ StructuredLlmResponseError,
9
+ )
10
+
11
+ __all__ = [
12
+ "LlmAdapter",
13
+ "LlmTextCompletionResult",
14
+ "StructuredLlmError",
15
+ "StructuredLlmInvalidJsonError",
16
+ "StructuredLlmJsonCompletionResult",
17
+ "StructuredLlmResponseError",
18
+ ]
@@ -0,0 +1,301 @@
1
+ """Shared LLM adapter for structured extraction and text generation."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ from typing import TYPE_CHECKING, cast
7
+
8
+ from llmframe.adapters.output.llm.openai_adapter import extract_usage
9
+
10
+ from .dto import LlmTextCompletionResult, StructuredLlmJsonCompletionResult
11
+ from .exceptions import StructuredLlmError
12
+ from .logging_utils import build_json_payload_log_extra, build_text_payload_log_extra
13
+ from .response_parser import extract_structured_content, parse_json_object
14
+
15
+ if TYPE_CHECKING:
16
+ from pydantic import BaseModel
17
+
18
+ from llmframe.adapters.output.llm.openai_adapter import OpenAILlmProtocol, ReasoningEffort
19
+ from llmframe.adapters.output.persistence import JsonWriterProtocol
20
+ from llmframe.json_types import JsonValue
21
+
22
+ from .protocols import StructuredOutputSchema
23
+
24
+ LOGGER = logging.getLogger(__name__)
25
+
26
+ REQUEST_DEBUG_LABEL = "request_payload"
27
+ RESPONSE_TEXT_DEBUG_LABEL = "response_text"
28
+ PARSED_RESPONSE_DEBUG_LABEL = "parsed_response_payload"
29
+
30
+
31
+ class LlmAdapter:
32
+ """Generate structured JSON payloads and plain-text responses."""
33
+
34
+ def __init__(
35
+ self,
36
+ *,
37
+ client: OpenAILlmProtocol,
38
+ model: str,
39
+ debug_json_writer: JsonWriterProtocol | None = None,
40
+ debug_json_enabled: bool = False,
41
+ ) -> None:
42
+ """Store transport client and runtime configuration."""
43
+ self._client = client
44
+ self._model = model
45
+ self._api_surface = "responses"
46
+ self._debug_json_writer = debug_json_writer
47
+ self._debug_json_enabled = debug_json_enabled
48
+
49
+ def generate_text(
50
+ self,
51
+ *,
52
+ developer_prompt: str,
53
+ user_prompt: str,
54
+ temperature: float | None = None,
55
+ reasoning_effort: str | None = None,
56
+ ) -> LlmTextCompletionResult:
57
+ """Return plain-text content together with token-usage metadata."""
58
+ inputs = self._build_inputs(developer_prompt=developer_prompt, user_prompt=user_prompt)
59
+ self._log_json_stage(
60
+ label=REQUEST_DEBUG_LABEL,
61
+ payload=self._build_text_request_payload(
62
+ inputs=inputs,
63
+ temperature=temperature,
64
+ reasoning_effort=reasoning_effort,
65
+ ),
66
+ message="LLM request payload",
67
+ )
68
+ response = self._client.create_response(
69
+ model=self._model,
70
+ input_items=inputs,
71
+ temperature=temperature,
72
+ reasoning_effort=cast("ReasoningEffort | None", reasoning_effort),
73
+ )
74
+ content = extract_structured_content(response)
75
+ usage = extract_usage(response)
76
+ self._log_text_stage(
77
+ label=RESPONSE_TEXT_DEBUG_LABEL,
78
+ content=content,
79
+ message="LLM response content",
80
+ )
81
+ return LlmTextCompletionResult(content=content, usage=usage)
82
+
83
+ def extract_json(
84
+ self,
85
+ *,
86
+ developer_prompt: str,
87
+ user_prompt: str,
88
+ response_schema: StructuredOutputSchema | None = None,
89
+ ) -> StructuredLlmJsonCompletionResult:
90
+ """Return parsed JSON payload together with token usage metadata."""
91
+ inputs = self._build_inputs(developer_prompt=developer_prompt, user_prompt=user_prompt)
92
+ schema_type = self._require_response_schema(response_schema)
93
+ schema_name = self._schema_name(schema_type)
94
+ schema = self._build_response_schema(schema_type)
95
+ self._log_json_stage(
96
+ label=REQUEST_DEBUG_LABEL,
97
+ payload=self._build_structured_request_payload(
98
+ inputs=inputs,
99
+ schema_name=schema_name,
100
+ schema=schema,
101
+ ),
102
+ message="LLM request payload",
103
+ )
104
+
105
+ response = self._client.create_structured_response(
106
+ model=self._model,
107
+ input_items=inputs,
108
+ json_schema_name=schema_name,
109
+ schema=schema,
110
+ temperature=0,
111
+ reasoning_effort="none",
112
+ )
113
+ content = extract_structured_content(response)
114
+ usage = extract_usage(response)
115
+ self._log_text_stage(
116
+ label=RESPONSE_TEXT_DEBUG_LABEL,
117
+ content=content,
118
+ message="LLM response content",
119
+ )
120
+
121
+ payload = parse_json_object(content)
122
+ self._log_json_stage(
123
+ label=PARSED_RESPONSE_DEBUG_LABEL,
124
+ payload=payload,
125
+ message="LLM parsed JSON payload",
126
+ extra={"payload_keys": list(payload.keys())},
127
+ )
128
+ return StructuredLlmJsonCompletionResult(payload=payload, usage=usage)
129
+
130
+ def _build_inputs(self, *, developer_prompt: str, user_prompt: str) -> list[dict[str, str]]:
131
+ """Build structured LLM inputs for the completion request."""
132
+ return [
133
+ {"role": "developer", "content": developer_prompt},
134
+ {"role": "user", "content": user_prompt},
135
+ ]
136
+
137
+ def _build_text_request_payload(
138
+ self,
139
+ *,
140
+ inputs: list[dict[str, str]],
141
+ temperature: float | None,
142
+ reasoning_effort: str | None,
143
+ ) -> dict[str, object]:
144
+ """Build metadata-safe text request payload details for logging/debug output."""
145
+ payload: dict[str, object] = {
146
+ "model": self._model,
147
+ "input": inputs,
148
+ "text": {"format": {"type": "text"}},
149
+ }
150
+ if temperature is not None:
151
+ payload["temperature"] = temperature
152
+ if reasoning_effort is not None:
153
+ payload["reasoning"] = {"effort": reasoning_effort}
154
+ return payload
155
+
156
+ def _build_structured_request_payload(
157
+ self,
158
+ *,
159
+ inputs: list[dict[str, str]],
160
+ schema_name: str,
161
+ schema: dict[str, object],
162
+ ) -> dict[str, object]:
163
+ """Build metadata-safe structured request payload details for logging/debug output."""
164
+ return {
165
+ "model": self._model,
166
+ "input": inputs,
167
+ "reasoning": {"effort": "none"},
168
+ "temperature": 0,
169
+ "text": {
170
+ "format": {
171
+ "type": "json_schema",
172
+ "name": schema_name,
173
+ "strict": True,
174
+ "schema": schema,
175
+ }
176
+ },
177
+ }
178
+
179
+ def _require_response_schema(self, response_schema: StructuredOutputSchema | None) -> StructuredOutputSchema:
180
+ """Return the schema required for structured-output requests."""
181
+ if response_schema is None:
182
+ msg = "Structured output requests require a response schema"
183
+ raise StructuredLlmError(msg, suggestion="Pass a Pydantic response schema to the LLM adapter")
184
+ return response_schema
185
+
186
+ def _schema_name(self, schema_type: type[BaseModel]) -> str:
187
+ """Return a stable schema name for Structured Outputs requests."""
188
+ return schema_type.__name__
189
+
190
+ def _build_response_schema(self, schema_type: type[BaseModel]) -> dict[str, object]:
191
+ """Return a JSON schema suitable for Structured Outputs requests."""
192
+ raw_schema = cast("dict[str, object]", schema_type.model_json_schema())
193
+ return cast("dict[str, object]", self._normalize_schema_node(raw_schema))
194
+
195
+ def _normalize_schema_node(self, node: object) -> object:
196
+ """Normalize one JSON Schema node for OpenAI Structured Outputs."""
197
+ if isinstance(node, list):
198
+ return [self._normalize_schema_node(item) for item in node]
199
+
200
+ if not isinstance(node, dict):
201
+ return node
202
+
203
+ normalized: dict[str, object] = {}
204
+ for key, value in node.items():
205
+ if key == "properties" and isinstance(value, dict):
206
+ filtered_properties: dict[str, object] = {}
207
+ for field_name, field_schema in value.items():
208
+ if isinstance(field_schema, dict) and field_schema.get("internal") is True:
209
+ continue
210
+ filtered_properties[field_name] = self._normalize_schema_node(field_schema)
211
+ normalized[key] = filtered_properties
212
+ continue
213
+
214
+ normalized[key] = self._normalize_schema_node(value)
215
+
216
+ if "$ref" in normalized:
217
+ return {"$ref": normalized["$ref"]}
218
+
219
+ properties = normalized.get("properties")
220
+ if isinstance(properties, dict):
221
+ normalized["additionalProperties"] = False
222
+ required_fields = normalized.get("required")
223
+ if isinstance(required_fields, list):
224
+ normalized["required"] = [field_name for field_name in required_fields if field_name in properties]
225
+
226
+ return normalized
227
+
228
+ def _log_json_stage(
229
+ self,
230
+ *,
231
+ label: str,
232
+ payload: object,
233
+ message: str,
234
+ extra: dict[str, object] | None = None,
235
+ ) -> None:
236
+ """Write and log JSON-stage metadata without logging raw payload content."""
237
+ self._write_debug_payload(label=label, payload=payload)
238
+ payload_extra = build_json_payload_log_extra(payload=payload)
239
+ if extra is not None:
240
+ payload_extra.update(extra)
241
+ self._log_payload_metadata(message=message, extra={"debug_label": label, **payload_extra})
242
+
243
+ def _log_text_stage(
244
+ self,
245
+ *,
246
+ label: str,
247
+ content: str,
248
+ message: str,
249
+ extra: dict[str, object] | None = None,
250
+ ) -> None:
251
+ """Write and log text-stage metadata without logging raw content."""
252
+ self._write_debug_payload(label=label, payload={"content": content})
253
+ payload_extra = build_text_payload_log_extra(content=content)
254
+ if extra is not None:
255
+ payload_extra.update(extra)
256
+ self._log_payload_metadata(message=message, extra={"debug_label": label, **payload_extra})
257
+
258
+ def _log_payload_metadata(
259
+ self,
260
+ *,
261
+ message: str,
262
+ extra: dict[str, object],
263
+ ) -> None:
264
+ """Log payload metadata without logging raw payload content."""
265
+ log_extra: dict[str, object] = {
266
+ "component": self.__class__.__name__,
267
+ "model": self._model,
268
+ "api_surface": self._api_surface,
269
+ }
270
+ log_extra.update(extra)
271
+
272
+ LOGGER.debug(message, extra=log_extra)
273
+
274
+ def _write_debug_payload(self, *, label: str, payload: object) -> None:
275
+ """Write a labeled debug payload when debug JSON output is enabled."""
276
+ if not self._debug_json_enabled or self._debug_json_writer is None:
277
+ return
278
+
279
+ try:
280
+ written_path = self._debug_json_writer.write_json(label=label, payload=cast("JsonValue", payload))
281
+ except (OSError, TypeError, ValueError) as err:
282
+ LOGGER.warning(
283
+ "Failed to write LLM debug payload",
284
+ exc_info=err,
285
+ extra={
286
+ "component": self.__class__.__name__,
287
+ "model": self._model,
288
+ "debug_label": label,
289
+ },
290
+ )
291
+ return
292
+
293
+ LOGGER.debug(
294
+ "LLM debug payload written",
295
+ extra={
296
+ "component": self.__class__.__name__,
297
+ "model": self._model,
298
+ "debug_label": label,
299
+ "file_path": str(written_path),
300
+ },
301
+ )
@@ -0,0 +1,26 @@
1
+ """DTOs returned by the shared LLM adapter."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from typing import TYPE_CHECKING
7
+
8
+ if TYPE_CHECKING:
9
+ from llmframe.adapters.output.llm.openai_adapter import OpenAIResponseUsage
10
+ from llmframe.json_types import JsonValue
11
+
12
+
13
+ @dataclass(frozen=True)
14
+ class StructuredLlmJsonCompletionResult:
15
+ """Structured JSON result plus token-usage metadata."""
16
+
17
+ payload: dict[str, JsonValue]
18
+ usage: OpenAIResponseUsage | None
19
+
20
+
21
+ @dataclass(frozen=True)
22
+ class LlmTextCompletionResult:
23
+ """Plain-text result plus token-usage metadata."""
24
+
25
+ content: str
26
+ usage: OpenAIResponseUsage | None
@@ -0,0 +1,25 @@
1
+ """Exception types for the shared LLM adapter."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+
7
+
8
+ @dataclass
9
+ class StructuredLlmError(Exception):
10
+ """Base exception for shared LLM adapter failures."""
11
+
12
+ message: str
13
+ suggestion: str | None = None
14
+
15
+ def __str__(self) -> str:
16
+ """Return the user-facing error message."""
17
+ return self.message
18
+
19
+
20
+ class StructuredLlmResponseError(StructuredLlmError):
21
+ """Raised when an LLM response is missing required content."""
22
+
23
+
24
+ class StructuredLlmInvalidJsonError(StructuredLlmError):
25
+ """Raised when the LLM response cannot be parsed as a JSON object."""
@@ -0,0 +1,27 @@
1
+ """Logging helpers for LLM adapter payload metadata."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+
7
+
8
+ def build_json_payload_log_extra(*, payload: object) -> dict[str, object]:
9
+ """Return metadata-only logging fields for a JSON-serializable payload."""
10
+ serialized_payload = json.dumps(payload, ensure_ascii=False, default=str)
11
+ metadata: dict[str, object] = {
12
+ "payload_length": len(serialized_payload),
13
+ "payload_kind": type(payload).__name__,
14
+ "payload_preview_omitted": True,
15
+ }
16
+ if isinstance(payload, dict):
17
+ metadata["payload_keys"] = sorted(str(key) for key in payload)
18
+ return metadata
19
+
20
+
21
+ def build_text_payload_log_extra(*, content: str) -> dict[str, object]:
22
+ """Return metadata-only logging fields for a text payload."""
23
+ return {
24
+ "payload_length": len(content),
25
+ "payload_kind": "text",
26
+ "payload_preview_omitted": True,
27
+ }
@@ -0,0 +1,14 @@
1
+ """Narrow type alias used by the shared LLM adapter."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import TypeAlias
6
+
7
+ from pydantic import BaseModel
8
+
9
+ StructuredOutputSchema: TypeAlias = type[BaseModel]
10
+
11
+
12
+ __all__ = [
13
+ "StructuredOutputSchema",
14
+ ]
@@ -0,0 +1,38 @@
1
+ """Response parsing helpers for structured LLM outputs."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from typing import TYPE_CHECKING, cast
7
+
8
+ from llmframe.adapters.output.llm.openai_adapter import (
9
+ OpenAIResponseError,
10
+ extract_message_content,
11
+ )
12
+
13
+ from .exceptions import StructuredLlmInvalidJsonError, StructuredLlmResponseError
14
+
15
+ if TYPE_CHECKING:
16
+ from llmframe.json_types import JsonValue
17
+
18
+
19
+ def extract_structured_content(response: object) -> str:
20
+ """Extract textual content from an OpenAI response object."""
21
+ try:
22
+ return extract_message_content(response)
23
+ except OpenAIResponseError as err:
24
+ msg = "LLM response is missing content or has an invalid shape"
25
+ raise StructuredLlmResponseError(msg, suggestion=str(err)) from err
26
+
27
+
28
+ def parse_json_object(content: str) -> dict[str, JsonValue]:
29
+ """Parse model content and require a top-level JSON object result."""
30
+ try:
31
+ payload = json.loads(content)
32
+ except json.JSONDecodeError as err:
33
+ msg = "LLM returned invalid JSON payload"
34
+ raise StructuredLlmInvalidJsonError(msg, suggestion="Inspect the model output and prompts") from err
35
+ if not isinstance(payload, dict):
36
+ msg = "LLM payload must be a JSON object"
37
+ raise StructuredLlmInvalidJsonError(msg, suggestion="Ensure the prompt requests a top-level JSON object")
38
+ return cast("dict[str, JsonValue]", payload)
@@ -0,0 +1,47 @@
1
+ """Public exports for the shared OpenAI client and helpers."""
2
+
3
+ from .client import build_client
4
+ from .dto import (
5
+ OpenAIClientSettings,
6
+ OpenAIResponseError,
7
+ OpenAIResponseUsage,
8
+ )
9
+ from .parsing import (
10
+ extract_message_content,
11
+ extract_usage,
12
+ )
13
+ from .transport import (
14
+ ChatCompletionJsonProtocol,
15
+ ChatCompletionStructuredProtocol,
16
+ ChatCompletionTextProtocol,
17
+ OpenAIClient,
18
+ OpenAIClientProtocol,
19
+ OpenAILlmProtocol,
20
+ OpenAIRequestConfigError,
21
+ ReasoningEffort,
22
+ ResponseJsonProtocol,
23
+ ResponseStructuredProtocol,
24
+ ResponseTextProtocol,
25
+ build_structured_schema_definition,
26
+ )
27
+
28
+ __all__ = [
29
+ "ChatCompletionJsonProtocol",
30
+ "ChatCompletionStructuredProtocol",
31
+ "ChatCompletionTextProtocol",
32
+ "OpenAIClient",
33
+ "OpenAIClientProtocol",
34
+ "OpenAIClientSettings",
35
+ "OpenAILlmProtocol",
36
+ "OpenAIRequestConfigError",
37
+ "OpenAIResponseError",
38
+ "OpenAIResponseUsage",
39
+ "ReasoningEffort",
40
+ "ResponseJsonProtocol",
41
+ "ResponseStructuredProtocol",
42
+ "ResponseTextProtocol",
43
+ "build_client",
44
+ "build_structured_schema_definition",
45
+ "extract_message_content",
46
+ "extract_usage",
47
+ ]
@@ -0,0 +1,41 @@
1
+ """Helpers for constructing the shared OpenAI transport client."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import TYPE_CHECKING
6
+
7
+ import httpx
8
+ from openai import OpenAI
9
+
10
+ from .transport import OpenAIClient
11
+
12
+ if TYPE_CHECKING:
13
+ from llmframe.adapters.output.persistence import JsonWriterProtocol
14
+
15
+ from .dto import OpenAIClientSettings
16
+
17
+
18
+ def build_client(
19
+ settings: OpenAIClientSettings,
20
+ *,
21
+ debug_json_writer: JsonWriterProtocol | None = None,
22
+ debug_json_enabled: bool = False,
23
+ ) -> OpenAIClient:
24
+ """Build an ``OpenAIClient`` from explicit settings."""
25
+ timeout = httpx.Timeout(settings.timeout_seconds)
26
+ http_client = httpx.Client(
27
+ verify=settings.verify_ssl,
28
+ timeout=timeout,
29
+ )
30
+ sdk_client = OpenAI(
31
+ base_url=settings.base_url,
32
+ api_key=settings.api_key,
33
+ http_client=http_client,
34
+ )
35
+ return OpenAIClient(
36
+ sdk_client=sdk_client,
37
+ max_retries=settings.max_retries,
38
+ backoff_factor=settings.backoff_factor,
39
+ debug_json_writer=debug_json_writer,
40
+ debug_json_enabled=debug_json_enabled,
41
+ )
@@ -0,0 +1,32 @@
1
+ """Public DTOs and exceptions for shared OpenAI response handling."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+
7
+ from .transport import DEFAULT_BACKOFF_FACTOR, DEFAULT_MAX_RETRIES
8
+
9
+
10
+ class OpenAIResponseError(ValueError):
11
+ """Raised when the OpenAI response shape is invalid."""
12
+
13
+
14
+ @dataclass(frozen=True)
15
+ class OpenAIClientSettings:
16
+ """Configuration for creating an OpenAI transport client."""
17
+
18
+ base_url: str
19
+ api_key: str
20
+ max_retries: int = DEFAULT_MAX_RETRIES
21
+ backoff_factor: float = DEFAULT_BACKOFF_FACTOR
22
+ verify_ssl: bool = True
23
+ timeout_seconds: float = 30.0
24
+
25
+
26
+ @dataclass(frozen=True)
27
+ class OpenAIResponseUsage:
28
+ """Normalized token usage metadata extracted from an OpenAI response."""
29
+
30
+ input_tokens: int | None
31
+ output_tokens: int | None
32
+ total_tokens: int | None
@@ -0,0 +1,6 @@
1
+ """Public parsing helpers for OpenAI response payloads."""
2
+
3
+ from .message_content import extract_message_content
4
+ from .usage import extract_usage
5
+
6
+ __all__ = ["extract_message_content", "extract_usage"]