mirascope 2.0.0a6__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/api/_generated/__init__.py +186 -5
- mirascope/api/_generated/annotations/client.py +38 -6
- mirascope/api/_generated/annotations/raw_client.py +366 -47
- mirascope/api/_generated/annotations/types/annotations_create_response.py +19 -6
- mirascope/api/_generated/annotations/types/annotations_get_response.py +19 -6
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +22 -7
- mirascope/api/_generated/annotations/types/annotations_update_response.py +19 -6
- mirascope/api/_generated/api_keys/__init__.py +12 -2
- mirascope/api/_generated/api_keys/client.py +107 -6
- mirascope/api/_generated/api_keys/raw_client.py +486 -38
- mirascope/api/_generated/api_keys/types/__init__.py +7 -1
- mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
- mirascope/api/_generated/client.py +36 -0
- mirascope/api/_generated/docs/raw_client.py +71 -9
- mirascope/api/_generated/environment.py +3 -3
- mirascope/api/_generated/environments/__init__.py +6 -0
- mirascope/api/_generated/environments/client.py +158 -9
- mirascope/api/_generated/environments/raw_client.py +620 -52
- mirascope/api/_generated/environments/types/__init__.py +10 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
- mirascope/api/_generated/{organizations/types/organizations_credits_response.py → environments/types/environments_get_analytics_response_top_models_item.py} +6 -3
- mirascope/api/_generated/errors/__init__.py +6 -0
- mirascope/api/_generated/errors/bad_request_error.py +5 -2
- mirascope/api/_generated/errors/conflict_error.py +5 -2
- mirascope/api/_generated/errors/payment_required_error.py +15 -0
- mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
- mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
- mirascope/api/_generated/functions/__init__.py +10 -0
- mirascope/api/_generated/functions/client.py +222 -8
- mirascope/api/_generated/functions/raw_client.py +975 -134
- mirascope/api/_generated/functions/types/__init__.py +28 -4
- mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
- mirascope/api/_generated/health/raw_client.py +74 -10
- mirascope/api/_generated/organization_invitations/__init__.py +33 -0
- mirascope/api/_generated/organization_invitations/client.py +546 -0
- mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
- mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
- mirascope/api/_generated/organization_memberships/__init__.py +19 -0
- mirascope/api/_generated/organization_memberships/client.py +302 -0
- mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
- mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/organizations/__init__.py +26 -2
- mirascope/api/_generated/organizations/client.py +442 -20
- mirascope/api/_generated/organizations/raw_client.py +1763 -164
- mirascope/api/_generated/organizations/types/__init__.py +48 -2
- mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
- mirascope/api/_generated/project_memberships/__init__.py +25 -0
- mirascope/api/_generated/project_memberships/client.py +437 -0
- mirascope/api/_generated/project_memberships/raw_client.py +1039 -0
- mirascope/api/_generated/project_memberships/types/__init__.py +29 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/projects/raw_client.py +415 -58
- mirascope/api/_generated/reference.md +2767 -397
- mirascope/api/_generated/tags/__init__.py +19 -0
- mirascope/api/_generated/tags/client.py +504 -0
- mirascope/api/_generated/tags/raw_client.py +1288 -0
- mirascope/api/_generated/tags/types/__init__.py +17 -0
- mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
- mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
- mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
- mirascope/api/_generated/token_cost/__init__.py +7 -0
- mirascope/api/_generated/token_cost/client.py +160 -0
- mirascope/api/_generated/token_cost/raw_client.py +264 -0
- mirascope/api/_generated/token_cost/types/__init__.py +8 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
- mirascope/api/_generated/traces/__init__.py +20 -0
- mirascope/api/_generated/traces/client.py +543 -0
- mirascope/api/_generated/traces/raw_client.py +1366 -96
- mirascope/api/_generated/traces/types/__init__.py +28 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +6 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +0 -2
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +10 -1
- mirascope/api/_generated/types/__init__.py +32 -2
- mirascope/api/_generated/types/bad_request_error_body.py +50 -0
- mirascope/api/_generated/types/date.py +3 -0
- mirascope/api/_generated/types/immutable_resource_error.py +22 -0
- mirascope/api/_generated/types/internal_server_error_body.py +3 -3
- mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
- mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
- mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
- mirascope/api/_generated/types/rate_limit_error.py +31 -0
- mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
- mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
- mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
- mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
- mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
- mirascope/api/settings.py +19 -1
- mirascope/llm/__init__.py +53 -10
- mirascope/llm/calls/__init__.py +2 -1
- mirascope/llm/calls/calls.py +3 -1
- mirascope/llm/calls/decorator.py +21 -7
- mirascope/llm/content/tool_output.py +22 -5
- mirascope/llm/exceptions.py +284 -71
- mirascope/llm/formatting/__init__.py +17 -0
- mirascope/llm/formatting/format.py +112 -35
- mirascope/llm/formatting/output_parser.py +178 -0
- mirascope/llm/formatting/partial.py +80 -7
- mirascope/llm/formatting/primitives.py +192 -0
- mirascope/llm/formatting/types.py +20 -8
- mirascope/llm/messages/__init__.py +3 -0
- mirascope/llm/messages/_utils.py +34 -0
- mirascope/llm/models/__init__.py +5 -0
- mirascope/llm/models/models.py +137 -69
- mirascope/llm/{providers/base → models}/params.py +7 -57
- mirascope/llm/models/thinking_config.py +61 -0
- mirascope/llm/prompts/_utils.py +0 -32
- mirascope/llm/prompts/decorator.py +16 -5
- mirascope/llm/prompts/prompts.py +131 -68
- mirascope/llm/providers/__init__.py +1 -4
- mirascope/llm/providers/anthropic/_utils/__init__.py +2 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +18 -9
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +62 -13
- mirascope/llm/providers/anthropic/_utils/decode.py +18 -9
- mirascope/llm/providers/anthropic/_utils/encode.py +26 -7
- mirascope/llm/providers/anthropic/_utils/errors.py +2 -2
- mirascope/llm/providers/anthropic/beta_provider.py +64 -18
- mirascope/llm/providers/anthropic/provider.py +91 -33
- mirascope/llm/providers/base/__init__.py +0 -4
- mirascope/llm/providers/base/_utils.py +55 -6
- mirascope/llm/providers/base/base_provider.py +116 -37
- mirascope/llm/providers/google/_utils/__init__.py +2 -0
- mirascope/llm/providers/google/_utils/decode.py +20 -7
- mirascope/llm/providers/google/_utils/encode.py +26 -7
- mirascope/llm/providers/google/_utils/errors.py +3 -2
- mirascope/llm/providers/google/provider.py +64 -18
- mirascope/llm/providers/mirascope/_utils.py +13 -17
- mirascope/llm/providers/mirascope/provider.py +49 -18
- mirascope/llm/providers/mlx/_utils.py +7 -2
- mirascope/llm/providers/mlx/encoding/base.py +5 -2
- mirascope/llm/providers/mlx/encoding/transformers.py +5 -2
- mirascope/llm/providers/mlx/mlx.py +23 -6
- mirascope/llm/providers/mlx/provider.py +42 -13
- mirascope/llm/providers/openai/_utils/errors.py +2 -2
- mirascope/llm/providers/openai/completions/_utils/encode.py +20 -16
- mirascope/llm/providers/openai/completions/base_provider.py +40 -11
- mirascope/llm/providers/openai/provider.py +40 -10
- mirascope/llm/providers/openai/responses/_utils/__init__.py +2 -0
- mirascope/llm/providers/openai/responses/_utils/decode.py +19 -6
- mirascope/llm/providers/openai/responses/_utils/encode.py +22 -10
- mirascope/llm/providers/openai/responses/provider.py +56 -18
- mirascope/llm/providers/provider_registry.py +93 -19
- mirascope/llm/responses/__init__.py +6 -1
- mirascope/llm/responses/_utils.py +102 -12
- mirascope/llm/responses/base_response.py +5 -2
- mirascope/llm/responses/base_stream_response.py +115 -25
- mirascope/llm/responses/response.py +2 -1
- mirascope/llm/responses/root_response.py +89 -17
- mirascope/llm/responses/stream_response.py +6 -9
- mirascope/llm/tools/decorator.py +9 -4
- mirascope/llm/tools/tool_schema.py +12 -6
- mirascope/llm/tools/toolkit.py +35 -27
- mirascope/llm/tools/tools.py +45 -20
- mirascope/ops/__init__.py +4 -0
- mirascope/ops/_internal/configuration.py +82 -31
- mirascope/ops/_internal/exporters/exporters.py +64 -11
- mirascope/ops/_internal/instrumentation/llm/common.py +530 -0
- mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +1 -1
- mirascope/ops/_internal/instrumentation/llm/llm.py +116 -1242
- mirascope/ops/_internal/instrumentation/llm/model.py +1798 -0
- mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
- mirascope/ops/_internal/instrumentation/llm/serialize.py +300 -0
- mirascope/ops/_internal/protocols.py +83 -1
- mirascope/ops/_internal/traced_calls.py +4 -0
- mirascope/ops/_internal/traced_functions.py +118 -8
- mirascope/ops/_internal/tracing.py +78 -1
- mirascope/ops/_internal/utils.py +52 -4
- {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/METADATA +12 -11
- mirascope-2.0.1.dist-info/RECORD +423 -0
- {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/licenses/LICENSE +1 -1
- mirascope-2.0.0a6.dist-info/RECORD +0 -316
- {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/WHEEL +0 -0
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
"""OpenAI Responses message encoding and request preparation."""
|
|
2
2
|
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
3
5
|
from collections.abc import Sequence
|
|
4
|
-
from typing import TypedDict, cast
|
|
6
|
+
from typing import TYPE_CHECKING, TypedDict, cast
|
|
5
7
|
|
|
6
8
|
from openai import Omit
|
|
7
9
|
from openai.types.responses import (
|
|
@@ -33,11 +35,12 @@ from .....exceptions import FeatureNotSupportedError
|
|
|
33
35
|
from .....formatting import (
|
|
34
36
|
Format,
|
|
35
37
|
FormattableT,
|
|
38
|
+
OutputParser,
|
|
36
39
|
resolve_format,
|
|
37
40
|
)
|
|
38
41
|
from .....messages import AssistantMessage, Message, UserMessage
|
|
39
42
|
from .....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
|
|
40
|
-
from ....base import
|
|
43
|
+
from ....base import _utils as _base_utils
|
|
41
44
|
from ...model_id import OpenAIModelId, model_name
|
|
42
45
|
from ...model_info import (
|
|
43
46
|
MODELS_WITHOUT_JSON_OBJECT_SUPPORT,
|
|
@@ -45,6 +48,9 @@ from ...model_info import (
|
|
|
45
48
|
NON_REASONING_MODELS,
|
|
46
49
|
)
|
|
47
50
|
|
|
51
|
+
if TYPE_CHECKING:
|
|
52
|
+
from .....models import Params, ThinkingLevel
|
|
53
|
+
|
|
48
54
|
# Thinking level to a float multiplier % of max tokens
|
|
49
55
|
THINKING_LEVEL_TO_EFFORT: dict[ThinkingLevel, ReasoningEffort] = {
|
|
50
56
|
"default": "medium",
|
|
@@ -113,7 +119,7 @@ def _encode_user_message(
|
|
|
113
119
|
result.append(
|
|
114
120
|
FunctionCallOutput(
|
|
115
121
|
call_id=part.id,
|
|
116
|
-
output=str(part.
|
|
122
|
+
output=str(part.result),
|
|
117
123
|
type="function_call_output",
|
|
118
124
|
)
|
|
119
125
|
)
|
|
@@ -211,13 +217,16 @@ def _convert_tool_to_function_tool_param(tool: AnyToolSchema) -> FunctionToolPar
|
|
|
211
217
|
schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
|
|
212
218
|
schema_dict["type"] = "object"
|
|
213
219
|
_base_utils.ensure_additional_properties_false(schema_dict)
|
|
220
|
+
strict = True if tool.strict is None else tool.strict
|
|
221
|
+
if strict:
|
|
222
|
+
_base_utils.ensure_all_properties_required(schema_dict)
|
|
214
223
|
|
|
215
224
|
return FunctionToolParam(
|
|
216
225
|
type="function",
|
|
217
226
|
name=tool.name,
|
|
218
227
|
description=tool.description,
|
|
219
228
|
parameters=schema_dict,
|
|
220
|
-
strict=
|
|
229
|
+
strict=strict,
|
|
221
230
|
)
|
|
222
231
|
|
|
223
232
|
|
|
@@ -249,20 +258,20 @@ def _create_strict_response_format(
|
|
|
249
258
|
|
|
250
259
|
def _compute_reasoning(
|
|
251
260
|
level: ThinkingLevel,
|
|
252
|
-
|
|
261
|
+
include_thoughts: bool,
|
|
253
262
|
) -> Reasoning:
|
|
254
263
|
"""Compute the OpenAI `Reasoning` config based on ThinkingConfig.
|
|
255
264
|
|
|
256
265
|
Args:
|
|
257
266
|
level: The thinking level
|
|
258
|
-
|
|
267
|
+
include_thoughts: Whether to include summary (True/False for auto)
|
|
259
268
|
|
|
260
269
|
Returns:
|
|
261
270
|
OpenAI Reasoning configuration
|
|
262
271
|
"""
|
|
263
272
|
reasoning: Reasoning = {"effort": THINKING_LEVEL_TO_EFFORT.get(level) or "medium"}
|
|
264
273
|
|
|
265
|
-
if
|
|
274
|
+
if include_thoughts:
|
|
266
275
|
reasoning["summary"] = "auto"
|
|
267
276
|
|
|
268
277
|
return reasoning
|
|
@@ -273,7 +282,10 @@ def encode_request(
|
|
|
273
282
|
model_id: OpenAIModelId,
|
|
274
283
|
messages: Sequence[Message],
|
|
275
284
|
tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
|
|
276
|
-
format: type[FormattableT]
|
|
285
|
+
format: type[FormattableT]
|
|
286
|
+
| Format[FormattableT]
|
|
287
|
+
| OutputParser[FormattableT]
|
|
288
|
+
| None,
|
|
277
289
|
params: Params,
|
|
278
290
|
) -> tuple[Sequence[Message], Format[FormattableT] | None, ResponseCreateKwargs]:
|
|
279
291
|
"""Prepares a request for the `OpenAI.responses.create` method."""
|
|
@@ -315,8 +327,8 @@ def encode_request(
|
|
|
315
327
|
# Assume model supports reasoning unless explicitly listed as non-reasoning
|
|
316
328
|
# This ensures new reasoning models work immediately without code updates
|
|
317
329
|
level = thinking_config.get("level")
|
|
318
|
-
|
|
319
|
-
kwargs["reasoning"] = _compute_reasoning(level,
|
|
330
|
+
include_thoughts = thinking_config.get("include_thoughts", False)
|
|
331
|
+
kwargs["reasoning"] = _compute_reasoning(level, include_thoughts)
|
|
320
332
|
|
|
321
333
|
# Handle encode_thoughts_as_text from ThinkingConfig
|
|
322
334
|
if thinking_config.get("encode_thoughts_as_text"):
|
|
@@ -1,13 +1,16 @@
|
|
|
1
1
|
"""OpenAI Responses API client implementation."""
|
|
2
2
|
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
3
5
|
from collections.abc import Sequence
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
4
7
|
from typing_extensions import Unpack
|
|
5
8
|
|
|
6
9
|
from openai import AsyncOpenAI, BadRequestError as OpenAIBadRequestError, OpenAI
|
|
7
10
|
|
|
8
11
|
from ....context import Context, DepsT
|
|
9
12
|
from ....exceptions import BadRequestError, NotFoundError
|
|
10
|
-
from ....formatting import Format, FormattableT
|
|
13
|
+
from ....formatting import Format, FormattableT, OutputParser
|
|
11
14
|
from ....messages import Message
|
|
12
15
|
from ....responses import (
|
|
13
16
|
AsyncContextResponse,
|
|
@@ -29,11 +32,14 @@ from ....tools import (
|
|
|
29
32
|
Tool,
|
|
30
33
|
Toolkit,
|
|
31
34
|
)
|
|
32
|
-
from ...base import BaseProvider
|
|
35
|
+
from ...base import BaseProvider
|
|
33
36
|
from .. import _utils as _shared_utils
|
|
34
37
|
from ..model_id import OpenAIModelId, model_name
|
|
35
38
|
from . import _utils
|
|
36
39
|
|
|
40
|
+
if TYPE_CHECKING:
|
|
41
|
+
from ....models import Params
|
|
42
|
+
|
|
37
43
|
|
|
38
44
|
class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
39
45
|
"""The client for the OpenAI Responses API."""
|
|
@@ -67,7 +73,10 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
67
73
|
model_id: OpenAIModelId,
|
|
68
74
|
messages: Sequence[Message],
|
|
69
75
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
70
|
-
format: type[FormattableT]
|
|
76
|
+
format: type[FormattableT]
|
|
77
|
+
| Format[FormattableT]
|
|
78
|
+
| OutputParser[FormattableT]
|
|
79
|
+
| None = None,
|
|
71
80
|
**params: Unpack[Params],
|
|
72
81
|
) -> Response | Response[FormattableT]:
|
|
73
82
|
"""Generate an `llm.Response` by synchronously calling the OpenAI Responses API.
|
|
@@ -91,8 +100,9 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
91
100
|
)
|
|
92
101
|
openai_response = self.client.responses.create(**kwargs)
|
|
93
102
|
|
|
103
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
94
104
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
95
|
-
openai_response, model_id, self.id
|
|
105
|
+
openai_response, model_id, self.id, include_thoughts=include_thoughts
|
|
96
106
|
)
|
|
97
107
|
provider_model_name = model_name(model_id, "responses")
|
|
98
108
|
|
|
@@ -116,7 +126,10 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
116
126
|
model_id: OpenAIModelId,
|
|
117
127
|
messages: Sequence[Message],
|
|
118
128
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
119
|
-
format: type[FormattableT]
|
|
129
|
+
format: type[FormattableT]
|
|
130
|
+
| Format[FormattableT]
|
|
131
|
+
| OutputParser[FormattableT]
|
|
132
|
+
| None = None,
|
|
120
133
|
**params: Unpack[Params],
|
|
121
134
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
122
135
|
"""Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI Responses API.
|
|
@@ -140,8 +153,9 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
140
153
|
)
|
|
141
154
|
openai_response = await self.async_client.responses.create(**kwargs)
|
|
142
155
|
|
|
156
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
143
157
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
144
|
-
openai_response, model_id, self.id
|
|
158
|
+
openai_response, model_id, self.id, include_thoughts=include_thoughts
|
|
145
159
|
)
|
|
146
160
|
provider_model_name = model_name(model_id, "responses")
|
|
147
161
|
|
|
@@ -165,7 +179,10 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
165
179
|
model_id: OpenAIModelId,
|
|
166
180
|
messages: Sequence[Message],
|
|
167
181
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
168
|
-
format: type[FormattableT]
|
|
182
|
+
format: type[FormattableT]
|
|
183
|
+
| Format[FormattableT]
|
|
184
|
+
| OutputParser[FormattableT]
|
|
185
|
+
| None = None,
|
|
169
186
|
**params: Unpack[Params],
|
|
170
187
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
171
188
|
"""Generate a `llm.StreamResponse` by synchronously streaming from the OpenAI Responses API.
|
|
@@ -192,8 +209,9 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
192
209
|
stream=True,
|
|
193
210
|
)
|
|
194
211
|
|
|
212
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
195
213
|
chunk_iterator = _utils.decode_stream(
|
|
196
|
-
openai_stream,
|
|
214
|
+
openai_stream, include_thoughts=include_thoughts
|
|
197
215
|
)
|
|
198
216
|
provider_model_name = model_name(model_id, "responses")
|
|
199
217
|
|
|
@@ -214,7 +232,10 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
214
232
|
model_id: OpenAIModelId,
|
|
215
233
|
messages: Sequence[Message],
|
|
216
234
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
217
|
-
format: type[FormattableT]
|
|
235
|
+
format: type[FormattableT]
|
|
236
|
+
| Format[FormattableT]
|
|
237
|
+
| OutputParser[FormattableT]
|
|
238
|
+
| None = None,
|
|
218
239
|
**params: Unpack[Params],
|
|
219
240
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
220
241
|
"""Generate a `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI Responses API.
|
|
@@ -241,8 +262,9 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
241
262
|
stream=True,
|
|
242
263
|
)
|
|
243
264
|
|
|
265
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
244
266
|
chunk_iterator = _utils.decode_async_stream(
|
|
245
|
-
openai_stream,
|
|
267
|
+
openai_stream, include_thoughts=include_thoughts
|
|
246
268
|
)
|
|
247
269
|
provider_model_name = model_name(model_id, "responses")
|
|
248
270
|
|
|
@@ -266,7 +288,10 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
266
288
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
267
289
|
| ContextToolkit[DepsT]
|
|
268
290
|
| None = None,
|
|
269
|
-
format: type[FormattableT]
|
|
291
|
+
format: type[FormattableT]
|
|
292
|
+
| Format[FormattableT]
|
|
293
|
+
| OutputParser[FormattableT]
|
|
294
|
+
| None = None,
|
|
270
295
|
**params: Unpack[Params],
|
|
271
296
|
) -> ContextResponse[DepsT] | ContextResponse[DepsT, FormattableT]:
|
|
272
297
|
"""Generate a `llm.ContextResponse` by synchronously calling the OpenAI Responses API with context.
|
|
@@ -291,8 +316,9 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
291
316
|
)
|
|
292
317
|
openai_response = self.client.responses.create(**kwargs)
|
|
293
318
|
|
|
319
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
294
320
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
295
|
-
openai_response, model_id, self.id
|
|
321
|
+
openai_response, model_id, self.id, include_thoughts=include_thoughts
|
|
296
322
|
)
|
|
297
323
|
provider_model_name = model_name(model_id, "responses")
|
|
298
324
|
|
|
@@ -319,7 +345,10 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
319
345
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
320
346
|
| AsyncContextToolkit[DepsT]
|
|
321
347
|
| None = None,
|
|
322
|
-
format: type[FormattableT]
|
|
348
|
+
format: type[FormattableT]
|
|
349
|
+
| Format[FormattableT]
|
|
350
|
+
| OutputParser[FormattableT]
|
|
351
|
+
| None = None,
|
|
323
352
|
**params: Unpack[Params],
|
|
324
353
|
) -> AsyncContextResponse[DepsT] | AsyncContextResponse[DepsT, FormattableT]:
|
|
325
354
|
"""Generate a `llm.AsyncContextResponse` by asynchronously calling the OpenAI Responses API with context.
|
|
@@ -344,8 +373,9 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
344
373
|
)
|
|
345
374
|
openai_response = await self.async_client.responses.create(**kwargs)
|
|
346
375
|
|
|
376
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
347
377
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
348
|
-
openai_response, model_id, self.id
|
|
378
|
+
openai_response, model_id, self.id, include_thoughts=include_thoughts
|
|
349
379
|
)
|
|
350
380
|
provider_model_name = model_name(model_id, "responses")
|
|
351
381
|
|
|
@@ -372,7 +402,10 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
372
402
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
373
403
|
| ContextToolkit[DepsT]
|
|
374
404
|
| None = None,
|
|
375
|
-
format: type[FormattableT]
|
|
405
|
+
format: type[FormattableT]
|
|
406
|
+
| Format[FormattableT]
|
|
407
|
+
| OutputParser[FormattableT]
|
|
408
|
+
| None = None,
|
|
376
409
|
**params: Unpack[Params],
|
|
377
410
|
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
378
411
|
"""Generate a `llm.ContextStreamResponse` by synchronously streaming from the OpenAI Responses API with context.
|
|
@@ -401,8 +434,9 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
401
434
|
stream=True,
|
|
402
435
|
)
|
|
403
436
|
|
|
437
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
404
438
|
chunk_iterator = _utils.decode_stream(
|
|
405
|
-
openai_stream,
|
|
439
|
+
openai_stream, include_thoughts=include_thoughts
|
|
406
440
|
)
|
|
407
441
|
provider_model_name = model_name(model_id, "responses")
|
|
408
442
|
|
|
@@ -426,7 +460,10 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
426
460
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
427
461
|
| AsyncContextToolkit[DepsT]
|
|
428
462
|
| None = None,
|
|
429
|
-
format: type[FormattableT]
|
|
463
|
+
format: type[FormattableT]
|
|
464
|
+
| Format[FormattableT]
|
|
465
|
+
| OutputParser[FormattableT]
|
|
466
|
+
| None = None,
|
|
430
467
|
**params: Unpack[Params],
|
|
431
468
|
) -> (
|
|
432
469
|
AsyncContextStreamResponse[DepsT]
|
|
@@ -458,8 +495,9 @@ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
|
|
|
458
495
|
stream=True,
|
|
459
496
|
)
|
|
460
497
|
|
|
498
|
+
include_thoughts = _utils.get_include_thoughts(params)
|
|
461
499
|
chunk_iterator = _utils.decode_async_stream(
|
|
462
|
-
openai_stream,
|
|
500
|
+
openai_stream, include_thoughts=include_thoughts
|
|
463
501
|
)
|
|
464
502
|
provider_model_name = model_name(model_id, "responses")
|
|
465
503
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Provider registry for managing provider instances and scopes."""
|
|
2
2
|
|
|
3
|
+
import os
|
|
4
|
+
from collections.abc import Sequence
|
|
5
|
+
from dataclasses import dataclass
|
|
3
6
|
from functools import lru_cache
|
|
4
7
|
from typing import overload
|
|
5
8
|
|
|
6
|
-
from ..exceptions import NoRegisteredProviderError
|
|
9
|
+
from ..exceptions import MissingAPIKeyError, NoRegisteredProviderError
|
|
7
10
|
from .anthropic import AnthropicProvider
|
|
8
11
|
from .base import Provider
|
|
9
12
|
from .google import GoogleProvider
|
|
@@ -27,18 +30,64 @@ def reset_provider_registry() -> None:
|
|
|
27
30
|
provider_singleton.cache_clear()
|
|
28
31
|
|
|
29
32
|
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
"
|
|
37
|
-
|
|
38
|
-
|
|
33
|
+
@dataclass(frozen=True)
|
|
34
|
+
class ProviderDefault:
|
|
35
|
+
"""Configuration for a provider in the auto-registration fallback chain.
|
|
36
|
+
|
|
37
|
+
When auto-registering a provider for a scope, the fallback chain is tried
|
|
38
|
+
in order. The first provider whose API key is available will be used.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
provider_id: ProviderId
|
|
42
|
+
"""The provider identifier."""
|
|
43
|
+
|
|
44
|
+
api_key_env_var: str | None
|
|
45
|
+
"""Environment variable for the API key, or None if no key is required."""
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# Fallback chain for auto-registration: try providers in order until one has
|
|
49
|
+
# its API key available. This enables automatic fallback to Mirascope Router
|
|
50
|
+
# when direct provider keys are not set.
|
|
51
|
+
DEFAULT_AUTO_REGISTER_SCOPES: dict[str, Sequence[ProviderDefault]] = {
|
|
52
|
+
"anthropic/": [
|
|
53
|
+
ProviderDefault("anthropic", "ANTHROPIC_API_KEY"),
|
|
54
|
+
ProviderDefault("mirascope", "MIRASCOPE_API_KEY"),
|
|
55
|
+
],
|
|
56
|
+
"google/": [
|
|
57
|
+
ProviderDefault("google", "GOOGLE_API_KEY"),
|
|
58
|
+
ProviderDefault("mirascope", "MIRASCOPE_API_KEY"),
|
|
59
|
+
],
|
|
60
|
+
"openai/": [
|
|
61
|
+
ProviderDefault("openai", "OPENAI_API_KEY"),
|
|
62
|
+
ProviderDefault("mirascope", "MIRASCOPE_API_KEY"),
|
|
63
|
+
],
|
|
64
|
+
"together/": [
|
|
65
|
+
ProviderDefault("together", "TOGETHER_API_KEY"),
|
|
66
|
+
# No Mirascope fallback for together
|
|
67
|
+
],
|
|
68
|
+
"ollama/": [
|
|
69
|
+
ProviderDefault("ollama", None), # No API key required
|
|
70
|
+
],
|
|
71
|
+
"mlx-community/": [
|
|
72
|
+
ProviderDefault("mlx", None), # No API key required
|
|
73
|
+
],
|
|
39
74
|
}
|
|
40
75
|
|
|
41
76
|
|
|
77
|
+
def _has_api_key(default: ProviderDefault) -> bool:
|
|
78
|
+
"""Check if the API key for a provider default is available.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
default: The provider default configuration to check.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
True if the API key is available or not required, False otherwise.
|
|
85
|
+
"""
|
|
86
|
+
if default.api_key_env_var is None:
|
|
87
|
+
return True # Provider doesn't require API key
|
|
88
|
+
return os.environ.get(default.api_key_env_var) is not None
|
|
89
|
+
|
|
90
|
+
|
|
42
91
|
@lru_cache(maxsize=256)
|
|
43
92
|
def provider_singleton(
|
|
44
93
|
provider_id: ProviderId, *, api_key: str | None = None, base_url: str | None = None
|
|
@@ -175,6 +224,10 @@ def get_provider_for_model(model_id: str) -> Provider:
|
|
|
175
224
|
If no explicit registration is found, checks for auto-registration defaults
|
|
176
225
|
and automatically registers the provider on first use.
|
|
177
226
|
|
|
227
|
+
When auto-registering, providers are tried in fallback order. For example,
|
|
228
|
+
if ANTHROPIC_API_KEY is not set but MIRASCOPE_API_KEY is, the Mirascope
|
|
229
|
+
Router will be used as a fallback for anthropic/ models.
|
|
230
|
+
|
|
178
231
|
Args:
|
|
179
232
|
model_id: The full model ID (e.g., "anthropic/claude-4-5-sonnet").
|
|
180
233
|
|
|
@@ -182,7 +235,8 @@ def get_provider_for_model(model_id: str) -> Provider:
|
|
|
182
235
|
The provider instance registered for this model.
|
|
183
236
|
|
|
184
237
|
Raises:
|
|
185
|
-
|
|
238
|
+
NoRegisteredProviderError: If no provider scope matches the model_id.
|
|
239
|
+
MissingAPIKeyError: If no provider in the fallback chain has its API key set.
|
|
186
240
|
|
|
187
241
|
Example:
|
|
188
242
|
```python
|
|
@@ -199,6 +253,11 @@ def get_provider_for_model(model_id: str) -> Provider:
|
|
|
199
253
|
# Auto-registration on first use:
|
|
200
254
|
provider = get_provider_for_model("openai/gpt-4")
|
|
201
255
|
# Automatically loads and registers OpenAIProvider() for "openai/"
|
|
256
|
+
|
|
257
|
+
# Fallback to Mirascope Router if direct key missing:
|
|
258
|
+
# (with MIRASCOPE_API_KEY set but not ANTHROPIC_API_KEY)
|
|
259
|
+
provider = get_provider_for_model("anthropic/claude-4-5-sonnet")
|
|
260
|
+
# Returns MirascopeProvider registered for "anthropic/" scope
|
|
202
261
|
```
|
|
203
262
|
"""
|
|
204
263
|
# Try explicit registry first (longest match wins)
|
|
@@ -209,17 +268,32 @@ def get_provider_for_model(model_id: str) -> Provider:
|
|
|
209
268
|
best_scope = max(matching_scopes, key=len)
|
|
210
269
|
return PROVIDER_REGISTRY[best_scope]
|
|
211
270
|
|
|
212
|
-
# Fall back to auto-registration
|
|
271
|
+
# Fall back to auto-registration with fallback chain
|
|
213
272
|
matching_defaults = [
|
|
214
273
|
scope for scope in DEFAULT_AUTO_REGISTER_SCOPES if model_id.startswith(scope)
|
|
215
274
|
]
|
|
216
275
|
if matching_defaults:
|
|
217
276
|
best_scope = max(matching_defaults, key=len)
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
#
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
277
|
+
fallback_chain = DEFAULT_AUTO_REGISTER_SCOPES[best_scope]
|
|
278
|
+
|
|
279
|
+
# Try each provider in the fallback chain
|
|
280
|
+
for default in fallback_chain:
|
|
281
|
+
if _has_api_key(default):
|
|
282
|
+
provider = provider_singleton(default.provider_id)
|
|
283
|
+
# Register for just this scope (not all provider's default scopes)
|
|
284
|
+
PROVIDER_REGISTRY[best_scope] = provider
|
|
285
|
+
return provider
|
|
286
|
+
|
|
287
|
+
# No provider in chain has API key - raise helpful error
|
|
288
|
+
primary = fallback_chain[0]
|
|
289
|
+
has_mirascope_fallback = any(
|
|
290
|
+
d.provider_id == "mirascope" for d in fallback_chain
|
|
291
|
+
)
|
|
292
|
+
raise MissingAPIKeyError(
|
|
293
|
+
provider_id=primary.provider_id,
|
|
294
|
+
env_var=primary.api_key_env_var or "",
|
|
295
|
+
has_mirascope_fallback=has_mirascope_fallback,
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
# No matching scope at all
|
|
225
299
|
raise NoRegisteredProviderError(model_id)
|
|
@@ -1,16 +1,18 @@
|
|
|
1
1
|
"""The Responses module for LLM responses."""
|
|
2
2
|
|
|
3
3
|
from . import _utils
|
|
4
|
+
from .base_response import ResponseT
|
|
4
5
|
from .base_stream_response import (
|
|
5
6
|
AsyncChunkIterator,
|
|
6
7
|
ChunkIterator,
|
|
7
8
|
RawMessageChunk,
|
|
8
9
|
RawStreamEventChunk,
|
|
9
10
|
StreamResponseChunk,
|
|
11
|
+
StreamResponseT,
|
|
10
12
|
)
|
|
11
13
|
from .finish_reason import FinishReason, FinishReasonChunk
|
|
12
14
|
from .response import AsyncContextResponse, AsyncResponse, ContextResponse, Response
|
|
13
|
-
from .root_response import RootResponse
|
|
15
|
+
from .root_response import AnyResponse, RootResponse
|
|
14
16
|
from .stream_response import (
|
|
15
17
|
AsyncContextStreamResponse,
|
|
16
18
|
AsyncStreamResponse,
|
|
@@ -30,6 +32,7 @@ from .streams import (
|
|
|
30
32
|
from .usage import Usage, UsageDeltaChunk
|
|
31
33
|
|
|
32
34
|
__all__ = [
|
|
35
|
+
"AnyResponse",
|
|
33
36
|
"AsyncChunkIterator",
|
|
34
37
|
"AsyncContextResponse",
|
|
35
38
|
"AsyncContextStreamResponse",
|
|
@@ -47,10 +50,12 @@ __all__ = [
|
|
|
47
50
|
"RawMessageChunk",
|
|
48
51
|
"RawStreamEventChunk",
|
|
49
52
|
"Response",
|
|
53
|
+
"ResponseT",
|
|
50
54
|
"RootResponse",
|
|
51
55
|
"Stream",
|
|
52
56
|
"StreamResponse",
|
|
53
57
|
"StreamResponseChunk",
|
|
58
|
+
"StreamResponseT",
|
|
54
59
|
"TextStream",
|
|
55
60
|
"ThoughtStream",
|
|
56
61
|
"ToolCallStream",
|
|
@@ -1,20 +1,34 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
"""Utilities for response classes."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from typing import cast
|
|
5
|
+
|
|
6
|
+
import jiter
|
|
7
|
+
from pydantic import BaseModel
|
|
8
|
+
|
|
9
|
+
from ..formatting import (
|
|
10
|
+
FormattableT,
|
|
11
|
+
Partial,
|
|
12
|
+
PrimitiveWrapperModel,
|
|
13
|
+
create_wrapper_model,
|
|
14
|
+
is_primitive_type,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _strip_json_preamble(text: str) -> str | None:
|
|
19
|
+
"""Strip preamble text before JSON content.
|
|
3
20
|
|
|
4
21
|
Handles cases where models output text before JSON like:
|
|
5
|
-
"Sure thing! Here's the JSON:\n{...
|
|
22
|
+
"Sure thing! Here's the JSON:\n{..."
|
|
6
23
|
|
|
7
24
|
Or cases where the model wraps the JSON in code blocks like:
|
|
8
|
-
"```json\n{...
|
|
25
|
+
"```json\n{..."
|
|
9
26
|
|
|
10
27
|
Args:
|
|
11
28
|
text: The raw text that may contain a JSON object
|
|
12
29
|
|
|
13
|
-
Raises:
|
|
14
|
-
ValueError: If no serialized json object string was found.
|
|
15
|
-
|
|
16
30
|
Returns:
|
|
17
|
-
|
|
31
|
+
Text starting from the opening `{`, or None if no `{` found.
|
|
18
32
|
"""
|
|
19
33
|
code_block_start_marker = "```json"
|
|
20
34
|
code_block_start = text.find(code_block_start_marker)
|
|
@@ -25,14 +39,39 @@ def extract_serialized_json(text: str) -> str:
|
|
|
25
39
|
|
|
26
40
|
json_start = text.find("{")
|
|
27
41
|
if json_start == -1:
|
|
28
|
-
|
|
42
|
+
return None
|
|
43
|
+
|
|
44
|
+
return text[json_start:]
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def extract_serialized_json(text: str) -> str:
|
|
48
|
+
"""Extract the serialized JSON string from text that may contain extra content.
|
|
49
|
+
|
|
50
|
+
Handles cases where models output text before JSON like:
|
|
51
|
+
"Sure thing! Here's the JSON:\n{...}"
|
|
52
|
+
|
|
53
|
+
Or cases where the model wraps the JSON in code blocks like:
|
|
54
|
+
"```json\n{...}\n```"
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
text: The raw text that may contain a JSON object
|
|
58
|
+
|
|
59
|
+
Raises:
|
|
60
|
+
json.JSONDecodeError: If no valid JSON object could be extracted.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
The extracted serialized JSON string
|
|
64
|
+
"""
|
|
65
|
+
stripped = _strip_json_preamble(text)
|
|
66
|
+
if stripped is None:
|
|
67
|
+
raise json.JSONDecodeError("No JSON object found: missing '{'", text, 0)
|
|
29
68
|
|
|
30
69
|
# Find the matching closing brace
|
|
31
70
|
brace_count = 0
|
|
32
71
|
in_string = False
|
|
33
72
|
escaped = False
|
|
34
73
|
|
|
35
|
-
for i, char in enumerate(
|
|
74
|
+
for i, char in enumerate(stripped):
|
|
36
75
|
if escaped:
|
|
37
76
|
escaped = False
|
|
38
77
|
continue
|
|
@@ -51,6 +90,57 @@ def extract_serialized_json(text: str) -> str:
|
|
|
51
90
|
elif char == "}":
|
|
52
91
|
brace_count -= 1
|
|
53
92
|
if brace_count == 0:
|
|
54
|
-
return
|
|
93
|
+
return stripped[: i + 1]
|
|
94
|
+
|
|
95
|
+
raise json.JSONDecodeError("No JSON object found: missing '}'", text, len(text))
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def parse_partial_json(
|
|
99
|
+
json_text: str, formattable: type[FormattableT]
|
|
100
|
+
) -> FormattableT | Partial[FormattableT] | None:
|
|
101
|
+
"""Parse incomplete JSON into a Partial model for structured streaming.
|
|
102
|
+
|
|
103
|
+
Uses jiter's partial mode to handle incomplete JSON gracefully.
|
|
104
|
+
Returns None if JSON cannot be parsed yet.
|
|
105
|
+
|
|
106
|
+
Handles cases where models output text before JSON like:
|
|
107
|
+
"Sure thing! Here's the JSON:\n{..."
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
json_text: The incomplete JSON string to parse
|
|
111
|
+
formattable: The target format type (BaseModel or PrimitiveType)
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Parsed partial object, or None if unparsable
|
|
115
|
+
|
|
116
|
+
Example:
|
|
117
|
+
>>> from pydantic import BaseModel
|
|
118
|
+
>>> class Book(BaseModel):
|
|
119
|
+
... title: str
|
|
120
|
+
... author: str
|
|
121
|
+
>>> parse_partial_json('{"title": "The Name"', Book)
|
|
122
|
+
PartialBook(title='The Name', author=None)
|
|
123
|
+
"""
|
|
124
|
+
# Strip preamble text before JSON
|
|
125
|
+
stripped = _strip_json_preamble(json_text)
|
|
126
|
+
if stripped is None:
|
|
127
|
+
return None
|
|
128
|
+
|
|
129
|
+
try:
|
|
130
|
+
parsed = jiter.from_json(stripped.encode(), partial_mode="trailing-strings")
|
|
131
|
+
except Exception:
|
|
132
|
+
return None
|
|
133
|
+
|
|
134
|
+
target_model = formattable
|
|
135
|
+
if is_primitive_type(target_model):
|
|
136
|
+
target_model = cast(BaseModel, create_wrapper_model(target_model))
|
|
137
|
+
|
|
138
|
+
try:
|
|
139
|
+
instance = cast(BaseModel, Partial[target_model]).model_validate(parsed)
|
|
140
|
+
except Exception:
|
|
141
|
+
return None
|
|
142
|
+
|
|
143
|
+
if is_primitive_type(formattable):
|
|
144
|
+
return cast(PrimitiveWrapperModel, instance).output
|
|
55
145
|
|
|
56
|
-
|
|
146
|
+
return cast(Partial[FormattableT], instance)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Shared base of Response and AsyncResponse."""
|
|
2
2
|
|
|
3
3
|
from collections.abc import Sequence
|
|
4
|
-
from typing import TYPE_CHECKING, Any
|
|
4
|
+
from typing import TYPE_CHECKING, Any, TypeVar
|
|
5
5
|
|
|
6
6
|
from ..content import Text, Thought, ToolCall
|
|
7
7
|
from ..formatting import Format, FormattableT
|
|
@@ -12,7 +12,10 @@ from .root_response import RootResponse
|
|
|
12
12
|
from .usage import Usage
|
|
13
13
|
|
|
14
14
|
if TYPE_CHECKING:
|
|
15
|
-
from ..
|
|
15
|
+
from ..models import Params
|
|
16
|
+
from ..providers import ModelId, ProviderId
|
|
17
|
+
|
|
18
|
+
ResponseT = TypeVar("ResponseT", bound="BaseResponse[Any, Any]")
|
|
16
19
|
|
|
17
20
|
|
|
18
21
|
class BaseResponse(RootResponse[ToolkitT, FormattableT]):
|