mirascope 2.0.0a4__py3-none-any.whl → 2.0.0a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +10 -1
- mirascope/_stubs.py +363 -0
- mirascope/api/__init__.py +8 -0
- mirascope/api/_generated/__init__.py +119 -1
- mirascope/api/_generated/annotations/__init__.py +33 -0
- mirascope/api/_generated/annotations/client.py +474 -0
- mirascope/api/_generated/annotations/raw_client.py +1095 -0
- mirascope/api/_generated/annotations/types/__init__.py +31 -0
- mirascope/api/_generated/annotations/types/annotations_create_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_create_response.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_create_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_get_response.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_get_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_response.py +21 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_response.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_update_response_label.py +5 -0
- mirascope/api/_generated/api_keys/__init__.py +7 -0
- mirascope/api/_generated/api_keys/client.py +429 -0
- mirascope/api/_generated/api_keys/raw_client.py +788 -0
- mirascope/api/_generated/api_keys/types/__init__.py +9 -0
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +28 -0
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +27 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +27 -0
- mirascope/api/_generated/client.py +12 -0
- mirascope/api/_generated/core/client_wrapper.py +2 -14
- mirascope/api/_generated/core/datetime_utils.py +1 -3
- mirascope/api/_generated/core/file.py +2 -5
- mirascope/api/_generated/core/http_client.py +36 -112
- mirascope/api/_generated/core/jsonable_encoder.py +1 -3
- mirascope/api/_generated/core/pydantic_utilities.py +19 -74
- mirascope/api/_generated/core/query_encoder.py +1 -3
- mirascope/api/_generated/core/serialization.py +4 -10
- mirascope/api/_generated/docs/client.py +2 -6
- mirascope/api/_generated/docs/raw_client.py +4 -20
- mirascope/api/_generated/environments/__init__.py +17 -0
- mirascope/api/_generated/environments/client.py +500 -0
- mirascope/api/_generated/environments/raw_client.py +999 -0
- mirascope/api/_generated/environments/types/__init__.py +15 -0
- mirascope/api/_generated/environments/types/environments_create_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_get_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_list_response_item.py +24 -0
- mirascope/api/_generated/environments/types/environments_update_response.py +24 -0
- mirascope/api/_generated/errors/__init__.py +2 -0
- mirascope/api/_generated/errors/bad_request_error.py +1 -5
- mirascope/api/_generated/errors/conflict_error.py +1 -5
- mirascope/api/_generated/errors/forbidden_error.py +1 -5
- mirascope/api/_generated/errors/internal_server_error.py +1 -6
- mirascope/api/_generated/errors/not_found_error.py +1 -5
- mirascope/api/_generated/errors/unauthorized_error.py +11 -0
- mirascope/api/_generated/functions/__init__.py +29 -0
- mirascope/api/_generated/functions/client.py +433 -0
- mirascope/api/_generated/functions/raw_client.py +1049 -0
- mirascope/api/_generated/functions/types/__init__.py +29 -0
- mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_create_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +39 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_get_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_list_response.py +21 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +41 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +20 -0
- mirascope/api/_generated/health/client.py +2 -6
- mirascope/api/_generated/health/raw_client.py +5 -23
- mirascope/api/_generated/health/types/health_check_response.py +1 -3
- mirascope/api/_generated/organizations/__init__.py +2 -0
- mirascope/api/_generated/organizations/client.py +94 -27
- mirascope/api/_generated/organizations/raw_client.py +246 -128
- mirascope/api/_generated/organizations/types/__init__.py +2 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_credits_response.py +19 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_update_response.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +1 -3
- mirascope/api/_generated/projects/__init__.py +2 -12
- mirascope/api/_generated/projects/client.py +38 -68
- mirascope/api/_generated/projects/raw_client.py +92 -163
- mirascope/api/_generated/projects/types/__init__.py +1 -6
- mirascope/api/_generated/projects/types/projects_create_response.py +4 -9
- mirascope/api/_generated/projects/types/projects_get_response.py +4 -9
- mirascope/api/_generated/projects/types/projects_list_response_item.py +4 -9
- mirascope/api/_generated/projects/types/projects_update_response.py +4 -9
- mirascope/api/_generated/reference.md +1862 -70
- mirascope/api/_generated/traces/__init__.py +22 -0
- mirascope/api/_generated/traces/client.py +398 -0
- mirascope/api/_generated/traces/raw_client.py +902 -18
- mirascope/api/_generated/traces/types/__init__.py +32 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +4 -11
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +4 -8
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +6 -18
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_response.py +2 -5
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +3 -9
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +54 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +90 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +5 -0
- mirascope/api/_generated/traces/types/traces_search_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +41 -0
- mirascope/api/_generated/types/__init__.py +18 -0
- mirascope/api/_generated/types/already_exists_error.py +1 -3
- mirascope/api/_generated/types/click_house_error.py +22 -0
- mirascope/api/_generated/types/database_error.py +1 -3
- mirascope/api/_generated/types/http_api_decode_error.py +1 -3
- mirascope/api/_generated/types/internal_server_error_body.py +49 -0
- mirascope/api/_generated/types/issue.py +1 -3
- mirascope/api/_generated/types/issue_tag.py +1 -8
- mirascope/api/_generated/types/not_found_error_body.py +1 -3
- mirascope/api/_generated/types/number_from_string.py +3 -0
- mirascope/api/_generated/types/permission_denied_error.py +1 -3
- mirascope/api/_generated/types/permission_denied_error_tag.py +1 -3
- mirascope/api/_generated/types/property_key_key.py +1 -3
- mirascope/api/_generated/types/stripe_error.py +20 -0
- mirascope/api/_generated/types/unauthorized_error_body.py +21 -0
- mirascope/api/_generated/types/unauthorized_error_tag.py +5 -0
- mirascope/llm/__init__.py +6 -2
- mirascope/llm/content/tool_call.py +6 -0
- mirascope/llm/exceptions.py +28 -0
- mirascope/llm/formatting/__init__.py +2 -2
- mirascope/llm/formatting/format.py +120 -8
- mirascope/llm/formatting/types.py +1 -56
- mirascope/llm/mcp/__init__.py +2 -2
- mirascope/llm/mcp/mcp_client.py +130 -0
- mirascope/llm/providers/__init__.py +26 -5
- mirascope/llm/providers/anthropic/__init__.py +3 -21
- mirascope/llm/providers/anthropic/_utils/__init__.py +2 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +4 -2
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +13 -12
- mirascope/llm/providers/anthropic/_utils/decode.py +4 -2
- mirascope/llm/providers/anthropic/_utils/encode.py +57 -14
- mirascope/llm/providers/anthropic/_utils/errors.py +46 -0
- mirascope/llm/providers/anthropic/beta_provider.py +6 -0
- mirascope/llm/providers/anthropic/provider.py +5 -0
- mirascope/llm/providers/base/__init__.py +5 -2
- mirascope/llm/providers/base/_utils.py +2 -7
- mirascope/llm/providers/base/base_provider.py +173 -58
- mirascope/llm/providers/base/params.py +63 -34
- mirascope/llm/providers/google/__init__.py +2 -17
- mirascope/llm/providers/google/_utils/__init__.py +2 -0
- mirascope/llm/providers/google/_utils/decode.py +17 -8
- mirascope/llm/providers/google/_utils/encode.py +105 -16
- mirascope/llm/providers/google/_utils/errors.py +49 -0
- mirascope/llm/providers/google/model_info.py +1 -0
- mirascope/llm/providers/google/provider.py +9 -5
- mirascope/llm/providers/mirascope/__init__.py +5 -0
- mirascope/llm/providers/mirascope/_utils.py +77 -0
- mirascope/llm/providers/mirascope/provider.py +318 -0
- mirascope/llm/providers/mlx/__init__.py +2 -17
- mirascope/llm/providers/mlx/_utils.py +9 -2
- mirascope/llm/providers/mlx/provider.py +8 -0
- mirascope/llm/providers/ollama/__init__.py +1 -13
- mirascope/llm/providers/openai/__init__.py +10 -1
- mirascope/llm/providers/openai/_utils/__init__.py +5 -0
- mirascope/llm/providers/openai/_utils/errors.py +46 -0
- mirascope/llm/providers/openai/completions/__init__.py +2 -20
- mirascope/llm/providers/openai/completions/_utils/decode.py +14 -3
- mirascope/llm/providers/openai/completions/_utils/encode.py +15 -12
- mirascope/llm/providers/openai/completions/base_provider.py +6 -6
- mirascope/llm/providers/openai/provider.py +14 -1
- mirascope/llm/providers/openai/responses/__init__.py +1 -17
- mirascope/llm/providers/openai/responses/_utils/decode.py +2 -2
- mirascope/llm/providers/openai/responses/_utils/encode.py +43 -15
- mirascope/llm/providers/openai/responses/provider.py +13 -7
- mirascope/llm/providers/provider_id.py +1 -0
- mirascope/llm/providers/provider_registry.py +59 -3
- mirascope/llm/providers/together/__init__.py +1 -13
- mirascope/llm/responses/base_stream_response.py +24 -20
- mirascope/llm/tools/decorator.py +8 -4
- mirascope/llm/tools/tool_schema.py +33 -6
- mirascope/llm/tools/tools.py +84 -16
- mirascope/ops/__init__.py +60 -109
- mirascope/ops/_internal/closure.py +62 -11
- mirascope/ops/_internal/instrumentation/llm/llm.py +1 -2
- mirascope/ops/_internal/traced_functions.py +23 -4
- mirascope/ops/_internal/versioned_functions.py +54 -43
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a6.dist-info}/METADATA +7 -7
- mirascope-2.0.0a6.dist-info/RECORD +316 -0
- mirascope/llm/formatting/_utils.py +0 -78
- mirascope/llm/mcp/client.py +0 -118
- mirascope/llm/providers/_missing_import_stubs.py +0 -49
- mirascope/llm/providers/load_provider.py +0 -54
- mirascope-2.0.0a4.dist-info/RECORD +0 -247
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a6.dist-info}/WHEEL +0 -0
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a6.dist-info}/licenses/LICENSE +0 -0
|
@@ -14,17 +14,105 @@ from ....exceptions import FeatureNotSupportedError
|
|
|
14
14
|
from ....formatting import (
|
|
15
15
|
Format,
|
|
16
16
|
FormattableT,
|
|
17
|
-
_utils as _formatting_utils,
|
|
18
17
|
resolve_format,
|
|
19
18
|
)
|
|
20
19
|
from ....messages import AssistantMessage, Message, UserMessage
|
|
21
20
|
from ....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
|
|
22
|
-
from ...base import Params, _utils as _base_utils
|
|
21
|
+
from ...base import Params, ThinkingConfig, ThinkingLevel, _utils as _base_utils
|
|
23
22
|
from ..model_id import GoogleModelId, model_name
|
|
24
23
|
from ..model_info import MODELS_WITHOUT_STRUCTURED_OUTPUT_AND_TOOLS_SUPPORT
|
|
25
24
|
|
|
26
25
|
UNKNOWN_TOOL_ID = "google_unknown_tool_id"
|
|
27
26
|
|
|
27
|
+
# Thinking level to a float multiplier % of max tokens (for 2.5 models using budget)
|
|
28
|
+
THINKING_LEVEL_TO_BUDGET_MULTIPLIER: dict[ThinkingLevel, float] = {
|
|
29
|
+
"none": 0,
|
|
30
|
+
"minimal": 0.1,
|
|
31
|
+
"low": 0.2,
|
|
32
|
+
"medium": 0.4,
|
|
33
|
+
"high": 0.6,
|
|
34
|
+
"max": 0.8,
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
# Gemini 3 Pro supports only LOW or HIGH
|
|
38
|
+
# https://ai.google.dev/gemini-api/docs/gemini-3#thinking_level
|
|
39
|
+
THINKING_LEVEL_FOR_GEMINI_3_PRO: dict[ThinkingLevel, genai_types.ThinkingLevel] = {
|
|
40
|
+
"default": genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED,
|
|
41
|
+
"none": genai_types.ThinkingLevel.LOW,
|
|
42
|
+
"minimal": genai_types.ThinkingLevel.LOW,
|
|
43
|
+
"low": genai_types.ThinkingLevel.LOW,
|
|
44
|
+
"medium": genai_types.ThinkingLevel.HIGH,
|
|
45
|
+
"high": genai_types.ThinkingLevel.HIGH,
|
|
46
|
+
"max": genai_types.ThinkingLevel.HIGH,
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
# Gemini 3 Flash supports MINIMAL, LOW, MEDIUM, HIGH
|
|
50
|
+
# https://ai.google.dev/gemini-api/docs/gemini-3#thinking_level
|
|
51
|
+
THINKING_LEVEL_FOR_GEMINI_3_FLASH: dict[ThinkingLevel, genai_types.ThinkingLevel] = {
|
|
52
|
+
"default": genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED,
|
|
53
|
+
"none": genai_types.ThinkingLevel.MINIMAL,
|
|
54
|
+
"minimal": genai_types.ThinkingLevel.MINIMAL,
|
|
55
|
+
"low": genai_types.ThinkingLevel.LOW,
|
|
56
|
+
"medium": genai_types.ThinkingLevel.MEDIUM,
|
|
57
|
+
"high": genai_types.ThinkingLevel.HIGH,
|
|
58
|
+
"max": genai_types.ThinkingLevel.HIGH,
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def google_thinking_config(
|
|
63
|
+
thinking_config: ThinkingConfig,
|
|
64
|
+
max_tokens: int | None,
|
|
65
|
+
model_id: GoogleModelId,
|
|
66
|
+
) -> genai_types.ThinkingConfigDict:
|
|
67
|
+
"""Compute Google thinking configuration based on model version.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
thinking_config: The ThinkingConfig from params
|
|
71
|
+
max_tokens: Max output tokens (used to compute budget for 2.5 models)
|
|
72
|
+
model_id: The Google model ID to determine version
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
ThinkingConfigDict with either thinking_level or thinking_budget set.
|
|
76
|
+
|
|
77
|
+
Notes:
|
|
78
|
+
- Gemini 2.5 models use thinking_budget (token count)
|
|
79
|
+
- Gemini 3.0 Pro supports thinking_level "low" or "high"
|
|
80
|
+
- Gemini 3.0 Flash supports thinking_level "minimal", "low", "medium", "high"
|
|
81
|
+
|
|
82
|
+
See: https://ai.google.dev/gemini-api/docs/gemini-3#thinking_level
|
|
83
|
+
"""
|
|
84
|
+
level: ThinkingLevel = thinking_config.get("level", "default")
|
|
85
|
+
include_summaries = thinking_config.get("include_summaries")
|
|
86
|
+
|
|
87
|
+
result = genai_types.ThinkingConfigDict()
|
|
88
|
+
|
|
89
|
+
if "gemini-3-flash" in model_id:
|
|
90
|
+
result["thinking_level"] = THINKING_LEVEL_FOR_GEMINI_3_FLASH.get(
|
|
91
|
+
level, genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED
|
|
92
|
+
)
|
|
93
|
+
elif "gemini-3-pro" in model_id:
|
|
94
|
+
result["thinking_level"] = THINKING_LEVEL_FOR_GEMINI_3_PRO.get(
|
|
95
|
+
level, genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED
|
|
96
|
+
)
|
|
97
|
+
else: # Fall back to 2.5-style budgets
|
|
98
|
+
# 2.5 models use thinking_budget
|
|
99
|
+
if level == "default":
|
|
100
|
+
budget = -1 # Dynamic budget
|
|
101
|
+
elif level == "none":
|
|
102
|
+
budget = 0 # Disable thinking
|
|
103
|
+
else:
|
|
104
|
+
# Compute budget as percentage of max_tokens
|
|
105
|
+
if max_tokens is None:
|
|
106
|
+
max_tokens = 16000
|
|
107
|
+
multiplier = THINKING_LEVEL_TO_BUDGET_MULTIPLIER.get(level, 0.4)
|
|
108
|
+
budget = int(multiplier * max_tokens)
|
|
109
|
+
|
|
110
|
+
result["thinking_budget"] = budget
|
|
111
|
+
if include_summaries is not None:
|
|
112
|
+
result["include_thoughts"] = include_summaries
|
|
113
|
+
|
|
114
|
+
return result
|
|
115
|
+
|
|
28
116
|
|
|
29
117
|
class GoogleKwargs(TypedDict, total=False):
|
|
30
118
|
"""Kwargs for Google's generate_content method."""
|
|
@@ -187,7 +275,7 @@ def encode_request(
|
|
|
187
275
|
google_config: genai_types.GenerateContentConfigDict = (
|
|
188
276
|
genai_types.GenerateContentConfigDict()
|
|
189
277
|
)
|
|
190
|
-
|
|
278
|
+
encode_thoughts_as_text = False
|
|
191
279
|
google_model_name = model_name(model_id)
|
|
192
280
|
|
|
193
281
|
with _base_utils.ensure_all_params_accessed(
|
|
@@ -206,17 +294,16 @@ def encode_request(
|
|
|
206
294
|
if param_accessor.stop_sequences is not None:
|
|
207
295
|
google_config["stop_sequences"] = param_accessor.stop_sequences
|
|
208
296
|
if param_accessor.thinking is not None:
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
encode_thoughts = True
|
|
297
|
+
thinking_config = param_accessor.thinking
|
|
298
|
+
|
|
299
|
+
# Compute thinking config based on model version
|
|
300
|
+
google_config["thinking_config"] = google_thinking_config(
|
|
301
|
+
thinking_config, param_accessor.max_tokens, model_id
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
# Handle encode_thoughts_as_text from ThinkingConfig
|
|
305
|
+
if thinking_config.get("encode_thoughts_as_text"):
|
|
306
|
+
encode_thoughts_as_text = True
|
|
220
307
|
|
|
221
308
|
tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
|
|
222
309
|
google_tools: list[genai_types.ToolDict] = []
|
|
@@ -244,7 +331,7 @@ def encode_request(
|
|
|
244
331
|
google_config["response_mime_type"] = "application/json"
|
|
245
332
|
google_config["response_schema"] = format.schema
|
|
246
333
|
elif format.mode == "tool":
|
|
247
|
-
format_tool_schema =
|
|
334
|
+
format_tool_schema = format.create_tool_schema()
|
|
248
335
|
format_tool = _convert_tool_to_function_declaration(format_tool_schema)
|
|
249
336
|
google_tools.append(
|
|
250
337
|
genai_types.ToolDict(function_declarations=[format_tool])
|
|
@@ -286,7 +373,9 @@ def encode_request(
|
|
|
286
373
|
|
|
287
374
|
kwargs = GoogleKwargs(
|
|
288
375
|
model=model_name(model_id),
|
|
289
|
-
contents=_encode_messages(
|
|
376
|
+
contents=_encode_messages(
|
|
377
|
+
remaining_messages, model_id, encode_thoughts_as_text
|
|
378
|
+
),
|
|
290
379
|
config=google_config,
|
|
291
380
|
)
|
|
292
381
|
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"""Google error handling utilities."""
|
|
2
|
+
|
|
3
|
+
from google.genai.errors import (
|
|
4
|
+
ClientError as GoogleClientError,
|
|
5
|
+
ServerError as GoogleServerError,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
from ....exceptions import (
|
|
9
|
+
APIError,
|
|
10
|
+
AuthenticationError,
|
|
11
|
+
BadRequestError,
|
|
12
|
+
NotFoundError,
|
|
13
|
+
PermissionError,
|
|
14
|
+
RateLimitError,
|
|
15
|
+
ServerError,
|
|
16
|
+
)
|
|
17
|
+
from ...base import ProviderErrorMap
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def map_google_error(e: Exception) -> type[APIError]:
|
|
21
|
+
"""Map Google error to appropriate Mirascope error type.
|
|
22
|
+
|
|
23
|
+
Google only provides ClientError (4xx) and ServerError (5xx) with status codes,
|
|
24
|
+
so we map based on status code and message patterns.
|
|
25
|
+
"""
|
|
26
|
+
if not isinstance(e, GoogleClientError | GoogleServerError):
|
|
27
|
+
return APIError
|
|
28
|
+
|
|
29
|
+
# Authentication errors (401) or 400 with "API key not valid"
|
|
30
|
+
if e.code == 401 or (e.code == 400 and "API key not valid" in str(e)):
|
|
31
|
+
return AuthenticationError
|
|
32
|
+
if e.code == 403:
|
|
33
|
+
return PermissionError
|
|
34
|
+
if e.code == 404:
|
|
35
|
+
return NotFoundError
|
|
36
|
+
if e.code == 429:
|
|
37
|
+
return RateLimitError
|
|
38
|
+
if e.code in (400, 422):
|
|
39
|
+
return BadRequestError
|
|
40
|
+
if isinstance(e, GoogleServerError) and e.code >= 500:
|
|
41
|
+
return ServerError
|
|
42
|
+
return APIError
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# Shared error mapping for Google provider
|
|
46
|
+
GOOGLE_ERROR_MAP: ProviderErrorMap = {
|
|
47
|
+
GoogleClientError: map_google_error,
|
|
48
|
+
GoogleServerError: map_google_error,
|
|
49
|
+
}
|
|
@@ -21,6 +21,7 @@ GoogleKnownModels = Literal[
|
|
|
21
21
|
"google/gemini-2.5-flash-lite-preview-09-2025",
|
|
22
22
|
"google/gemini-2.5-flash-preview-09-2025",
|
|
23
23
|
"google/gemini-2.5-pro",
|
|
24
|
+
"google/gemini-3-flash-preview",
|
|
24
25
|
"google/gemini-3-pro-image-preview",
|
|
25
26
|
"google/gemini-3-pro-preview",
|
|
26
27
|
"google/gemini-flash-latest",
|
|
@@ -39,6 +39,7 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
39
39
|
|
|
40
40
|
id = "google"
|
|
41
41
|
default_scope = "google/"
|
|
42
|
+
error_map = _utils.GOOGLE_ERROR_MAP
|
|
42
43
|
|
|
43
44
|
def __init__(
|
|
44
45
|
self, *, api_key: str | None = None, base_url: str | None = None
|
|
@@ -46,10 +47,17 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
46
47
|
"""Initialize the Google client."""
|
|
47
48
|
http_options = None
|
|
48
49
|
if base_url:
|
|
49
|
-
http_options = HttpOptions(
|
|
50
|
+
http_options = HttpOptions(
|
|
51
|
+
base_url=base_url,
|
|
52
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
53
|
+
)
|
|
50
54
|
|
|
51
55
|
self.client = Client(api_key=api_key, http_options=http_options)
|
|
52
56
|
|
|
57
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
58
|
+
"""Extract HTTP status code from Google exception."""
|
|
59
|
+
return getattr(e, "code", None)
|
|
60
|
+
|
|
53
61
|
def _call(
|
|
54
62
|
self,
|
|
55
63
|
*,
|
|
@@ -78,7 +86,6 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
78
86
|
format=format,
|
|
79
87
|
params=params,
|
|
80
88
|
)
|
|
81
|
-
|
|
82
89
|
google_response = self.client.models.generate_content(**kwargs)
|
|
83
90
|
|
|
84
91
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
@@ -131,7 +138,6 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
131
138
|
format=format,
|
|
132
139
|
params=params,
|
|
133
140
|
)
|
|
134
|
-
|
|
135
141
|
google_response = self.client.models.generate_content(**kwargs)
|
|
136
142
|
|
|
137
143
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
@@ -180,7 +186,6 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
180
186
|
format=format,
|
|
181
187
|
params=params,
|
|
182
188
|
)
|
|
183
|
-
|
|
184
189
|
google_response = await self.client.aio.models.generate_content(**kwargs)
|
|
185
190
|
|
|
186
191
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
@@ -233,7 +238,6 @@ class GoogleProvider(BaseProvider[Client]):
|
|
|
233
238
|
format=format,
|
|
234
239
|
params=params,
|
|
235
240
|
)
|
|
236
|
-
|
|
237
241
|
google_response = await self.client.aio.models.generate_content(**kwargs)
|
|
238
242
|
|
|
239
243
|
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""Utility functions for Mirascope Router provider."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import cast
|
|
5
|
+
|
|
6
|
+
from ..base import Provider
|
|
7
|
+
from ..provider_id import ProviderId
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def extract_provider_prefix(model_id: str) -> str | None:
|
|
11
|
+
"""Extract provider prefix from model ID.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
model_id: Model identifier in the format "provider/model-name"
|
|
15
|
+
e.g., "openai/gpt-4", "anthropic/claude-3", "google/gemini-pro"
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
The provider prefix (e.g., "openai", "anthropic", "google") or None if invalid format.
|
|
19
|
+
"""
|
|
20
|
+
if "/" not in model_id:
|
|
21
|
+
return None
|
|
22
|
+
return model_id.split("/", 1)[0]
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_default_router_base_url() -> str:
|
|
26
|
+
"""Get the default router base URL from environment or use default.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
The router base URL (without trailing provider path).
|
|
30
|
+
"""
|
|
31
|
+
return os.environ.get(
|
|
32
|
+
"MIRASCOPE_ROUTER_BASE_URL", "https://mirascope.com/router/v0"
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def create_underlying_provider(
|
|
37
|
+
provider_prefix: str, api_key: str, router_base_url: str
|
|
38
|
+
) -> Provider:
|
|
39
|
+
"""Create and cache an underlying provider instance using provider_singleton.
|
|
40
|
+
|
|
41
|
+
This function constructs the appropriate router URL for the provider and
|
|
42
|
+
delegates to provider_singleton for caching and instantiation.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
provider_prefix: The provider name (e.g., "openai", "anthropic", "google",
|
|
46
|
+
"openai:completions", "openai:responses")
|
|
47
|
+
api_key: The API key to use for authentication
|
|
48
|
+
router_base_url: The base router URL (e.g., "http://mirascope.com/router/v0")
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
A cached provider instance configured for the Mirascope Router.
|
|
52
|
+
|
|
53
|
+
Raises:
|
|
54
|
+
ValueError: If the provider is unsupported.
|
|
55
|
+
"""
|
|
56
|
+
# Extract base provider name (handles variants like "openai:completions")
|
|
57
|
+
base_provider = provider_prefix.split(":")[0]
|
|
58
|
+
|
|
59
|
+
if base_provider not in ["anthropic", "google", "openai"]:
|
|
60
|
+
raise ValueError(
|
|
61
|
+
f"Unsupported provider: {provider_prefix}. "
|
|
62
|
+
f"Mirascope Router currently supports: anthropic, google, openai"
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
base_url = f"{router_base_url}/{base_provider}"
|
|
66
|
+
if base_provider == "openai": # OpenAI expects /v1, which their SDK doesn't add
|
|
67
|
+
base_url = f"{base_url}/v1"
|
|
68
|
+
|
|
69
|
+
# Lazy import to avoid circular dependencies
|
|
70
|
+
from ..provider_registry import provider_singleton
|
|
71
|
+
|
|
72
|
+
# Use provider_singleton which provides caching
|
|
73
|
+
return provider_singleton(
|
|
74
|
+
cast(ProviderId, provider_prefix),
|
|
75
|
+
api_key=api_key,
|
|
76
|
+
base_url=base_url,
|
|
77
|
+
)
|
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
"""Mirascope Router provider that routes requests through the Mirascope Router API."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from collections.abc import Sequence
|
|
5
|
+
from typing_extensions import Unpack
|
|
6
|
+
|
|
7
|
+
from ...context import Context, DepsT
|
|
8
|
+
from ...formatting import Format, FormattableT
|
|
9
|
+
from ...messages import Message
|
|
10
|
+
from ...responses import (
|
|
11
|
+
AsyncContextResponse,
|
|
12
|
+
AsyncContextStreamResponse,
|
|
13
|
+
AsyncResponse,
|
|
14
|
+
AsyncStreamResponse,
|
|
15
|
+
ContextResponse,
|
|
16
|
+
ContextStreamResponse,
|
|
17
|
+
Response,
|
|
18
|
+
StreamResponse,
|
|
19
|
+
)
|
|
20
|
+
from ...tools import (
|
|
21
|
+
AsyncContextTool,
|
|
22
|
+
AsyncContextToolkit,
|
|
23
|
+
AsyncTool,
|
|
24
|
+
AsyncToolkit,
|
|
25
|
+
ContextTool,
|
|
26
|
+
ContextToolkit,
|
|
27
|
+
Tool,
|
|
28
|
+
Toolkit,
|
|
29
|
+
)
|
|
30
|
+
from ..base import BaseProvider, Params, Provider
|
|
31
|
+
from . import _utils
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class MirascopeProvider(BaseProvider[None]):
|
|
35
|
+
"""Provider that routes LLM requests through the Mirascope Router API.
|
|
36
|
+
|
|
37
|
+
The Mirascope Router provides a unified API for multiple LLM providers
|
|
38
|
+
(Anthropic, Google, OpenAI) with usage tracking and cost calculation.
|
|
39
|
+
|
|
40
|
+
This provider:
|
|
41
|
+
- Takes model IDs in the format "provider/model-name" (e.g., "openai/gpt-4")
|
|
42
|
+
- Routes requests to the Mirascope Router endpoint
|
|
43
|
+
- Delegates to the appropriate underlying provider (Anthropic, Google, or OpenAI)
|
|
44
|
+
- Uses MIRASCOPE_API_KEY for authentication
|
|
45
|
+
|
|
46
|
+
Environment Variables:
|
|
47
|
+
MIRASCOPE_API_KEY: Required API key for Mirascope Router authentication
|
|
48
|
+
MIRASCOPE_ROUTER_BASE_URL: Optional base URL override (default: https://mirascope.com/router/v0)
|
|
49
|
+
|
|
50
|
+
Example:
|
|
51
|
+
```python
|
|
52
|
+
import os
|
|
53
|
+
from mirascope import llm
|
|
54
|
+
|
|
55
|
+
os.environ["MIRASCOPE_API_KEY"] = "mk..."
|
|
56
|
+
|
|
57
|
+
# Register the Mirascope provider
|
|
58
|
+
llm.register_provider(
|
|
59
|
+
"mirascope",
|
|
60
|
+
scope=["anthropic/", "google/", "openai/"],
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# Use with llm.call decorator
|
|
64
|
+
@llm.call("openai/gpt-4")
|
|
65
|
+
def recommend_book(genre: str):
|
|
66
|
+
return f"Recommend a {genre} book"
|
|
67
|
+
|
|
68
|
+
response = recommend_book("fantasy")
|
|
69
|
+
print(response.content)
|
|
70
|
+
```
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
id = "mirascope"
|
|
74
|
+
default_scope = ["anthropic/", "google/", "openai/"]
|
|
75
|
+
error_map = {}
|
|
76
|
+
"""Empty error map since MirascopeProvider delegates to underlying providers.
|
|
77
|
+
|
|
78
|
+
Error handling is performed by the underlying provider instances (Anthropic,
|
|
79
|
+
Google, OpenAI), which have their own error maps. Any exceptions that bubble
|
|
80
|
+
up from underlying providers are already converted to Mirascope exceptions.
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def __init__(
|
|
84
|
+
self, *, api_key: str | None = None, base_url: str | None = None
|
|
85
|
+
) -> None:
|
|
86
|
+
"""Initialize the Mirascope provider.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
api_key: Mirascope API key. If not provided, reads from MIRASCOPE_API_KEY
|
|
90
|
+
environment variable.
|
|
91
|
+
base_url: Optional base URL override for the Mirascope Router. If not
|
|
92
|
+
provided, reads from MIRASCOPE_ROUTER_BASE_URL environment variable
|
|
93
|
+
or defaults to https://mirascope.com/router/v0
|
|
94
|
+
"""
|
|
95
|
+
api_key = api_key or os.environ.get("MIRASCOPE_API_KEY")
|
|
96
|
+
if not api_key:
|
|
97
|
+
raise ValueError(
|
|
98
|
+
"Mirascope API key not found. "
|
|
99
|
+
"Set MIRASCOPE_API_KEY environment variable or pass api_key parameter."
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
self.api_key = api_key
|
|
103
|
+
self.router_base_url = base_url or _utils.get_default_router_base_url()
|
|
104
|
+
self.client = None # No single client; we create per-provider clients
|
|
105
|
+
|
|
106
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
107
|
+
"""Extract HTTP status code from exception.
|
|
108
|
+
|
|
109
|
+
Since MirascopeProvider delegates to underlying providers, this method
|
|
110
|
+
is not used for direct error extraction. Underlying providers handle
|
|
111
|
+
their own status code extraction.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
e: The exception to extract status code from.
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
None, as status extraction is handled by underlying providers.
|
|
118
|
+
"""
|
|
119
|
+
return None
|
|
120
|
+
|
|
121
|
+
def _get_underlying_provider(self, model_id: str) -> Provider:
|
|
122
|
+
"""Get the underlying provider for a model ID.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
model_id: Model identifier in format "provider/model-name"
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
The appropriate cached provider instance (Anthropic, Google, or OpenAI)
|
|
129
|
+
|
|
130
|
+
Raises:
|
|
131
|
+
ValueError: If the model ID format is invalid or provider is unsupported
|
|
132
|
+
"""
|
|
133
|
+
provider_prefix = _utils.extract_provider_prefix(model_id)
|
|
134
|
+
if not provider_prefix:
|
|
135
|
+
raise ValueError(
|
|
136
|
+
f"Invalid model ID format: {model_id}. "
|
|
137
|
+
f"Expected format 'provider/model-name' (e.g., 'openai/gpt-4')"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Use the cached function to get/create the provider
|
|
141
|
+
return _utils.create_underlying_provider(
|
|
142
|
+
provider_prefix=provider_prefix,
|
|
143
|
+
api_key=self.api_key,
|
|
144
|
+
router_base_url=self.router_base_url,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
def _call(
|
|
148
|
+
self,
|
|
149
|
+
*,
|
|
150
|
+
model_id: str,
|
|
151
|
+
messages: Sequence[Message],
|
|
152
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
153
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
154
|
+
**params: Unpack[Params],
|
|
155
|
+
) -> Response | Response[FormattableT]:
|
|
156
|
+
"""Generate an `llm.Response` by calling through the Mirascope Router."""
|
|
157
|
+
provider = self._get_underlying_provider(model_id)
|
|
158
|
+
return provider.call(
|
|
159
|
+
model_id=model_id,
|
|
160
|
+
messages=messages,
|
|
161
|
+
tools=tools,
|
|
162
|
+
format=format,
|
|
163
|
+
**params,
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
def _context_call(
|
|
167
|
+
self,
|
|
168
|
+
*,
|
|
169
|
+
ctx: Context[DepsT],
|
|
170
|
+
model_id: str,
|
|
171
|
+
messages: Sequence[Message],
|
|
172
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
173
|
+
| ContextToolkit[DepsT]
|
|
174
|
+
| None = None,
|
|
175
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
176
|
+
**params: Unpack[Params],
|
|
177
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
178
|
+
"""Generate an `llm.ContextResponse` by calling through the Mirascope Router."""
|
|
179
|
+
provider = self._get_underlying_provider(model_id)
|
|
180
|
+
return provider.context_call(
|
|
181
|
+
ctx=ctx,
|
|
182
|
+
model_id=model_id,
|
|
183
|
+
messages=messages,
|
|
184
|
+
tools=tools,
|
|
185
|
+
format=format,
|
|
186
|
+
**params,
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
async def _call_async(
|
|
190
|
+
self,
|
|
191
|
+
*,
|
|
192
|
+
model_id: str,
|
|
193
|
+
messages: Sequence[Message],
|
|
194
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
195
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
196
|
+
**params: Unpack[Params],
|
|
197
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
198
|
+
"""Generate an `llm.AsyncResponse` by calling through the Mirascope Router."""
|
|
199
|
+
provider = self._get_underlying_provider(model_id)
|
|
200
|
+
return await provider.call_async(
|
|
201
|
+
model_id=model_id,
|
|
202
|
+
messages=messages,
|
|
203
|
+
tools=tools,
|
|
204
|
+
format=format,
|
|
205
|
+
**params,
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
async def _context_call_async(
|
|
209
|
+
self,
|
|
210
|
+
*,
|
|
211
|
+
ctx: Context[DepsT],
|
|
212
|
+
model_id: str,
|
|
213
|
+
messages: Sequence[Message],
|
|
214
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
215
|
+
| AsyncContextToolkit[DepsT]
|
|
216
|
+
| None = None,
|
|
217
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
218
|
+
**params: Unpack[Params],
|
|
219
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
220
|
+
"""Generate an `llm.AsyncContextResponse` by calling through the Mirascope Router."""
|
|
221
|
+
provider = self._get_underlying_provider(model_id)
|
|
222
|
+
return await provider.context_call_async(
|
|
223
|
+
ctx=ctx,
|
|
224
|
+
model_id=model_id,
|
|
225
|
+
messages=messages,
|
|
226
|
+
tools=tools,
|
|
227
|
+
format=format,
|
|
228
|
+
**params,
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
def _stream(
|
|
232
|
+
self,
|
|
233
|
+
*,
|
|
234
|
+
model_id: str,
|
|
235
|
+
messages: Sequence[Message],
|
|
236
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
237
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
238
|
+
**params: Unpack[Params],
|
|
239
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
240
|
+
"""Stream an `llm.StreamResponse` by calling through the Mirascope Router."""
|
|
241
|
+
provider = self._get_underlying_provider(model_id)
|
|
242
|
+
return provider.stream(
|
|
243
|
+
model_id=model_id,
|
|
244
|
+
messages=messages,
|
|
245
|
+
tools=tools,
|
|
246
|
+
format=format,
|
|
247
|
+
**params,
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
def _context_stream(
|
|
251
|
+
self,
|
|
252
|
+
*,
|
|
253
|
+
ctx: Context[DepsT],
|
|
254
|
+
model_id: str,
|
|
255
|
+
messages: Sequence[Message],
|
|
256
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
257
|
+
| ContextToolkit[DepsT]
|
|
258
|
+
| None = None,
|
|
259
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
260
|
+
**params: Unpack[Params],
|
|
261
|
+
) -> (
|
|
262
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
263
|
+
):
|
|
264
|
+
"""Stream an `llm.ContextStreamResponse` by calling through the Mirascope Router."""
|
|
265
|
+
provider = self._get_underlying_provider(model_id)
|
|
266
|
+
return provider.context_stream(
|
|
267
|
+
ctx=ctx,
|
|
268
|
+
model_id=model_id,
|
|
269
|
+
messages=messages,
|
|
270
|
+
tools=tools,
|
|
271
|
+
format=format,
|
|
272
|
+
**params,
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
async def _stream_async(
|
|
276
|
+
self,
|
|
277
|
+
*,
|
|
278
|
+
model_id: str,
|
|
279
|
+
messages: Sequence[Message],
|
|
280
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
281
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
282
|
+
**params: Unpack[Params],
|
|
283
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
284
|
+
"""Stream an `llm.AsyncStreamResponse` by calling through the Mirascope Router."""
|
|
285
|
+
provider = self._get_underlying_provider(model_id)
|
|
286
|
+
return await provider.stream_async(
|
|
287
|
+
model_id=model_id,
|
|
288
|
+
messages=messages,
|
|
289
|
+
tools=tools,
|
|
290
|
+
format=format,
|
|
291
|
+
**params,
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
async def _context_stream_async(
|
|
295
|
+
self,
|
|
296
|
+
*,
|
|
297
|
+
ctx: Context[DepsT],
|
|
298
|
+
model_id: str,
|
|
299
|
+
messages: Sequence[Message],
|
|
300
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
301
|
+
| AsyncContextToolkit[DepsT]
|
|
302
|
+
| None = None,
|
|
303
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
304
|
+
**params: Unpack[Params],
|
|
305
|
+
) -> (
|
|
306
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
307
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
308
|
+
):
|
|
309
|
+
"""Stream an `llm.AsyncContextStreamResponse` by calling through the Mirascope Router."""
|
|
310
|
+
provider = self._get_underlying_provider(model_id)
|
|
311
|
+
return await provider.context_stream_async(
|
|
312
|
+
ctx=ctx,
|
|
313
|
+
model_id=model_id,
|
|
314
|
+
messages=messages,
|
|
315
|
+
tools=tools,
|
|
316
|
+
format=format,
|
|
317
|
+
**params,
|
|
318
|
+
)
|