mirascope 2.0.0a4__py3-none-any.whl → 2.0.0a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +10 -1
- mirascope/_stubs.py +363 -0
- mirascope/api/__init__.py +8 -0
- mirascope/api/_generated/__init__.py +119 -1
- mirascope/api/_generated/annotations/__init__.py +33 -0
- mirascope/api/_generated/annotations/client.py +474 -0
- mirascope/api/_generated/annotations/raw_client.py +1095 -0
- mirascope/api/_generated/annotations/types/__init__.py +31 -0
- mirascope/api/_generated/annotations/types/annotations_create_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_create_response.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_create_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_get_response.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_get_response_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_list_response.py +21 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_request_label.py +5 -0
- mirascope/api/_generated/annotations/types/annotations_update_response.py +35 -0
- mirascope/api/_generated/annotations/types/annotations_update_response_label.py +5 -0
- mirascope/api/_generated/api_keys/__init__.py +7 -0
- mirascope/api/_generated/api_keys/client.py +429 -0
- mirascope/api/_generated/api_keys/raw_client.py +788 -0
- mirascope/api/_generated/api_keys/types/__init__.py +9 -0
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +28 -0
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +27 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +27 -0
- mirascope/api/_generated/client.py +12 -0
- mirascope/api/_generated/core/client_wrapper.py +2 -14
- mirascope/api/_generated/core/datetime_utils.py +1 -3
- mirascope/api/_generated/core/file.py +2 -5
- mirascope/api/_generated/core/http_client.py +36 -112
- mirascope/api/_generated/core/jsonable_encoder.py +1 -3
- mirascope/api/_generated/core/pydantic_utilities.py +19 -74
- mirascope/api/_generated/core/query_encoder.py +1 -3
- mirascope/api/_generated/core/serialization.py +4 -10
- mirascope/api/_generated/docs/client.py +2 -6
- mirascope/api/_generated/docs/raw_client.py +4 -20
- mirascope/api/_generated/environments/__init__.py +17 -0
- mirascope/api/_generated/environments/client.py +500 -0
- mirascope/api/_generated/environments/raw_client.py +999 -0
- mirascope/api/_generated/environments/types/__init__.py +15 -0
- mirascope/api/_generated/environments/types/environments_create_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_get_response.py +24 -0
- mirascope/api/_generated/environments/types/environments_list_response_item.py +24 -0
- mirascope/api/_generated/environments/types/environments_update_response.py +24 -0
- mirascope/api/_generated/errors/__init__.py +2 -0
- mirascope/api/_generated/errors/bad_request_error.py +1 -5
- mirascope/api/_generated/errors/conflict_error.py +1 -5
- mirascope/api/_generated/errors/forbidden_error.py +1 -5
- mirascope/api/_generated/errors/internal_server_error.py +1 -6
- mirascope/api/_generated/errors/not_found_error.py +1 -5
- mirascope/api/_generated/errors/unauthorized_error.py +11 -0
- mirascope/api/_generated/functions/__init__.py +29 -0
- mirascope/api/_generated/functions/client.py +433 -0
- mirascope/api/_generated/functions/raw_client.py +1049 -0
- mirascope/api/_generated/functions/types/__init__.py +29 -0
- mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_create_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +39 -0
- mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_get_response.py +37 -0
- mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +20 -0
- mirascope/api/_generated/functions/types/functions_list_response.py +21 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +41 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +20 -0
- mirascope/api/_generated/health/client.py +2 -6
- mirascope/api/_generated/health/raw_client.py +5 -23
- mirascope/api/_generated/health/types/health_check_response.py +1 -3
- mirascope/api/_generated/organizations/__init__.py +2 -0
- mirascope/api/_generated/organizations/client.py +94 -27
- mirascope/api/_generated/organizations/raw_client.py +246 -128
- mirascope/api/_generated/organizations/types/__init__.py +2 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_credits_response.py +19 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +1 -3
- mirascope/api/_generated/organizations/types/organizations_update_response.py +5 -3
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +1 -3
- mirascope/api/_generated/projects/__init__.py +2 -12
- mirascope/api/_generated/projects/client.py +38 -68
- mirascope/api/_generated/projects/raw_client.py +92 -163
- mirascope/api/_generated/projects/types/__init__.py +1 -6
- mirascope/api/_generated/projects/types/projects_create_response.py +4 -9
- mirascope/api/_generated/projects/types/projects_get_response.py +4 -9
- mirascope/api/_generated/projects/types/projects_list_response_item.py +4 -9
- mirascope/api/_generated/projects/types/projects_update_response.py +4 -9
- mirascope/api/_generated/reference.md +1862 -70
- mirascope/api/_generated/traces/__init__.py +22 -0
- mirascope/api/_generated/traces/client.py +398 -0
- mirascope/api/_generated/traces/raw_client.py +902 -18
- mirascope/api/_generated/traces/types/__init__.py +32 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +4 -11
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +4 -8
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +6 -18
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +3 -9
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +8 -24
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +1 -3
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +2 -6
- mirascope/api/_generated/traces/types/traces_create_response.py +2 -5
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +3 -9
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +54 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +22 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +90 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +5 -0
- mirascope/api/_generated/traces/types/traces_search_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +41 -0
- mirascope/api/_generated/types/__init__.py +18 -0
- mirascope/api/_generated/types/already_exists_error.py +1 -3
- mirascope/api/_generated/types/click_house_error.py +22 -0
- mirascope/api/_generated/types/database_error.py +1 -3
- mirascope/api/_generated/types/http_api_decode_error.py +1 -3
- mirascope/api/_generated/types/internal_server_error_body.py +49 -0
- mirascope/api/_generated/types/issue.py +1 -3
- mirascope/api/_generated/types/issue_tag.py +1 -8
- mirascope/api/_generated/types/not_found_error_body.py +1 -3
- mirascope/api/_generated/types/number_from_string.py +3 -0
- mirascope/api/_generated/types/permission_denied_error.py +1 -3
- mirascope/api/_generated/types/permission_denied_error_tag.py +1 -3
- mirascope/api/_generated/types/property_key_key.py +1 -3
- mirascope/api/_generated/types/stripe_error.py +20 -0
- mirascope/api/_generated/types/unauthorized_error_body.py +21 -0
- mirascope/api/_generated/types/unauthorized_error_tag.py +5 -0
- mirascope/llm/__init__.py +6 -2
- mirascope/llm/content/tool_call.py +6 -0
- mirascope/llm/exceptions.py +28 -0
- mirascope/llm/formatting/__init__.py +2 -2
- mirascope/llm/formatting/format.py +120 -8
- mirascope/llm/formatting/types.py +1 -56
- mirascope/llm/mcp/__init__.py +2 -2
- mirascope/llm/mcp/mcp_client.py +130 -0
- mirascope/llm/providers/__init__.py +26 -5
- mirascope/llm/providers/anthropic/__init__.py +3 -21
- mirascope/llm/providers/anthropic/_utils/__init__.py +2 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +4 -2
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +13 -12
- mirascope/llm/providers/anthropic/_utils/decode.py +4 -2
- mirascope/llm/providers/anthropic/_utils/encode.py +57 -14
- mirascope/llm/providers/anthropic/_utils/errors.py +46 -0
- mirascope/llm/providers/anthropic/beta_provider.py +6 -0
- mirascope/llm/providers/anthropic/provider.py +5 -0
- mirascope/llm/providers/base/__init__.py +5 -2
- mirascope/llm/providers/base/_utils.py +2 -7
- mirascope/llm/providers/base/base_provider.py +173 -58
- mirascope/llm/providers/base/params.py +63 -34
- mirascope/llm/providers/google/__init__.py +2 -17
- mirascope/llm/providers/google/_utils/__init__.py +2 -0
- mirascope/llm/providers/google/_utils/decode.py +17 -8
- mirascope/llm/providers/google/_utils/encode.py +105 -16
- mirascope/llm/providers/google/_utils/errors.py +49 -0
- mirascope/llm/providers/google/model_info.py +1 -0
- mirascope/llm/providers/google/provider.py +9 -5
- mirascope/llm/providers/mirascope/__init__.py +5 -0
- mirascope/llm/providers/mirascope/_utils.py +77 -0
- mirascope/llm/providers/mirascope/provider.py +318 -0
- mirascope/llm/providers/mlx/__init__.py +2 -17
- mirascope/llm/providers/mlx/_utils.py +9 -2
- mirascope/llm/providers/mlx/provider.py +8 -0
- mirascope/llm/providers/ollama/__init__.py +1 -13
- mirascope/llm/providers/openai/__init__.py +10 -1
- mirascope/llm/providers/openai/_utils/__init__.py +5 -0
- mirascope/llm/providers/openai/_utils/errors.py +46 -0
- mirascope/llm/providers/openai/completions/__init__.py +2 -20
- mirascope/llm/providers/openai/completions/_utils/decode.py +14 -3
- mirascope/llm/providers/openai/completions/_utils/encode.py +15 -12
- mirascope/llm/providers/openai/completions/base_provider.py +6 -6
- mirascope/llm/providers/openai/provider.py +14 -1
- mirascope/llm/providers/openai/responses/__init__.py +1 -17
- mirascope/llm/providers/openai/responses/_utils/decode.py +2 -2
- mirascope/llm/providers/openai/responses/_utils/encode.py +43 -15
- mirascope/llm/providers/openai/responses/provider.py +13 -7
- mirascope/llm/providers/provider_id.py +1 -0
- mirascope/llm/providers/provider_registry.py +59 -3
- mirascope/llm/providers/together/__init__.py +1 -13
- mirascope/llm/responses/base_stream_response.py +24 -20
- mirascope/llm/tools/decorator.py +8 -4
- mirascope/llm/tools/tool_schema.py +33 -6
- mirascope/llm/tools/tools.py +84 -16
- mirascope/ops/__init__.py +60 -109
- mirascope/ops/_internal/closure.py +62 -11
- mirascope/ops/_internal/instrumentation/llm/llm.py +1 -2
- mirascope/ops/_internal/traced_functions.py +23 -4
- mirascope/ops/_internal/versioned_functions.py +54 -43
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a6.dist-info}/METADATA +7 -7
- mirascope-2.0.0a6.dist-info/RECORD +316 -0
- mirascope/llm/formatting/_utils.py +0 -78
- mirascope/llm/mcp/client.py +0 -118
- mirascope/llm/providers/_missing_import_stubs.py +0 -49
- mirascope/llm/providers/load_provider.py +0 -54
- mirascope-2.0.0a4.dist-info/RECORD +0 -247
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a6.dist-info}/WHEEL +0 -0
- {mirascope-2.0.0a4.dist-info → mirascope-2.0.0a6.dist-info}/licenses/LICENSE +0 -0
|
@@ -21,7 +21,6 @@ from ....exceptions import FormattingModeNotSupportedError
|
|
|
21
21
|
from ....formatting import (
|
|
22
22
|
Format,
|
|
23
23
|
FormattableT,
|
|
24
|
-
_utils as _formatting_utils,
|
|
25
24
|
resolve_format,
|
|
26
25
|
)
|
|
27
26
|
from ....messages import AssistantMessage, Message, UserMessage
|
|
@@ -60,11 +59,11 @@ class BetaParseKwargs(TypedDict, total=False):
|
|
|
60
59
|
|
|
61
60
|
def _beta_encode_content(
|
|
62
61
|
content: Sequence[ContentPart],
|
|
63
|
-
|
|
62
|
+
encode_thoughts_as_text: bool,
|
|
64
63
|
add_cache_control: bool = False,
|
|
65
64
|
) -> str | Sequence[BetaContentBlockParam]:
|
|
66
65
|
"""Convert mirascope content to Beta Anthropic content format."""
|
|
67
|
-
result = encode_content(content,
|
|
66
|
+
result = encode_content(content, encode_thoughts_as_text, add_cache_control)
|
|
68
67
|
if isinstance(result, str):
|
|
69
68
|
return result
|
|
70
69
|
return cast(Sequence[BetaContentBlockParam], result)
|
|
@@ -73,7 +72,7 @@ def _beta_encode_content(
|
|
|
73
72
|
def _beta_encode_message(
|
|
74
73
|
message: UserMessage | AssistantMessage,
|
|
75
74
|
model_id: str,
|
|
76
|
-
|
|
75
|
+
encode_thoughts_as_text: bool,
|
|
77
76
|
add_cache_control: bool = False,
|
|
78
77
|
) -> BetaMessageParam:
|
|
79
78
|
"""Convert user or assistant Message to Beta MessageParam format.
|
|
@@ -81,7 +80,7 @@ def _beta_encode_message(
|
|
|
81
80
|
Args:
|
|
82
81
|
message: The message to encode
|
|
83
82
|
model_id: The Anthropic model ID
|
|
84
|
-
|
|
83
|
+
encode_thoughts_as_text: Whether to encode thought blocks as text
|
|
85
84
|
add_cache_control: Whether to add cache_control to the last content block
|
|
86
85
|
"""
|
|
87
86
|
if (
|
|
@@ -89,7 +88,7 @@ def _beta_encode_message(
|
|
|
89
88
|
and message.provider_id == "anthropic"
|
|
90
89
|
and message.model_id == model_id
|
|
91
90
|
and message.raw_message
|
|
92
|
-
and not
|
|
91
|
+
and not encode_thoughts_as_text
|
|
93
92
|
and not add_cache_control
|
|
94
93
|
):
|
|
95
94
|
raw = cast(dict[str, Any], message.raw_message)
|
|
@@ -98,7 +97,9 @@ def _beta_encode_message(
|
|
|
98
97
|
content=raw["content"],
|
|
99
98
|
)
|
|
100
99
|
|
|
101
|
-
content = _beta_encode_content(
|
|
100
|
+
content = _beta_encode_content(
|
|
101
|
+
message.content, encode_thoughts_as_text, add_cache_control
|
|
102
|
+
)
|
|
102
103
|
|
|
103
104
|
return BetaMessageParam(
|
|
104
105
|
role=message.role,
|
|
@@ -109,7 +110,7 @@ def _beta_encode_message(
|
|
|
109
110
|
def _beta_encode_messages(
|
|
110
111
|
messages: Sequence[UserMessage | AssistantMessage],
|
|
111
112
|
model_id: str,
|
|
112
|
-
|
|
113
|
+
encode_thoughts_as_text: bool,
|
|
113
114
|
) -> Sequence[BetaMessageParam]:
|
|
114
115
|
"""Encode messages and add cache control for multi-turn conversations.
|
|
115
116
|
|
|
@@ -125,7 +126,7 @@ def _beta_encode_messages(
|
|
|
125
126
|
is_last = i == len(messages) - 1
|
|
126
127
|
add_cache = has_assistant_message and is_last
|
|
127
128
|
encoded_messages.append(
|
|
128
|
-
_beta_encode_message(message, model_id,
|
|
129
|
+
_beta_encode_message(message, model_id, encode_thoughts_as_text, add_cache)
|
|
129
130
|
)
|
|
130
131
|
return encoded_messages
|
|
131
132
|
|
|
@@ -146,7 +147,7 @@ def beta_encode_request(
|
|
|
146
147
|
"""Prepares a request for the Anthropic beta.messages.parse method."""
|
|
147
148
|
|
|
148
149
|
processed = process_params(params, DEFAULT_MAX_TOKENS)
|
|
149
|
-
|
|
150
|
+
encode_thoughts_as_text = processed.pop("encode_thoughts_as_text", False)
|
|
150
151
|
max_tokens = processed.pop("max_tokens", DEFAULT_MAX_TOKENS)
|
|
151
152
|
|
|
152
153
|
kwargs: BetaParseKwargs = BetaParseKwargs(
|
|
@@ -174,7 +175,7 @@ def beta_encode_request(
|
|
|
174
175
|
kwargs["output_format"] = cast(type[BaseModel], format.formattable)
|
|
175
176
|
|
|
176
177
|
if format.mode == "tool":
|
|
177
|
-
format_tool_schema =
|
|
178
|
+
format_tool_schema = format.create_tool_schema()
|
|
178
179
|
anthropic_tools.append(_beta_convert_tool_to_tool_param(format_tool_schema))
|
|
179
180
|
if tools:
|
|
180
181
|
kwargs["tool_choice"] = {"type": "any"}
|
|
@@ -201,7 +202,7 @@ def beta_encode_request(
|
|
|
201
202
|
)
|
|
202
203
|
|
|
203
204
|
kwargs["messages"] = _beta_encode_messages(
|
|
204
|
-
remaining_messages, model_id,
|
|
205
|
+
remaining_messages, model_id, encode_thoughts_as_text
|
|
205
206
|
)
|
|
206
207
|
|
|
207
208
|
if system_message_content:
|
|
@@ -180,7 +180,9 @@ class _AnthropicChunkProcessor:
|
|
|
180
180
|
f"Received input_json_delta for {self.current_block_param['type']} block"
|
|
181
181
|
)
|
|
182
182
|
self.accumulated_tool_json += delta.partial_json
|
|
183
|
-
yield ToolCallChunk(
|
|
183
|
+
yield ToolCallChunk(
|
|
184
|
+
id=self.current_block_param["id"], delta=delta.partial_json
|
|
185
|
+
)
|
|
184
186
|
elif delta.type == "thinking_delta":
|
|
185
187
|
if self.current_block_param["type"] != "thinking": # pragma: no cover
|
|
186
188
|
raise RuntimeError(
|
|
@@ -217,7 +219,7 @@ class _AnthropicChunkProcessor:
|
|
|
217
219
|
if self.accumulated_tool_json
|
|
218
220
|
else {}
|
|
219
221
|
)
|
|
220
|
-
yield ToolCallEndChunk()
|
|
222
|
+
yield ToolCallEndChunk(id=self.current_block_param["id"])
|
|
221
223
|
elif block_type == "thinking":
|
|
222
224
|
yield ThoughtEndChunk()
|
|
223
225
|
else:
|
|
@@ -13,18 +13,26 @@ from ....exceptions import FeatureNotSupportedError, FormattingModeNotSupportedE
|
|
|
13
13
|
from ....formatting import (
|
|
14
14
|
Format,
|
|
15
15
|
FormattableT,
|
|
16
|
-
_utils as _formatting_utils,
|
|
17
16
|
resolve_format,
|
|
18
17
|
)
|
|
19
18
|
from ....messages import AssistantMessage, Message, UserMessage
|
|
20
19
|
from ....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
|
|
21
|
-
from ...base import Params, _utils as _base_utils
|
|
20
|
+
from ...base import Params, ThinkingLevel, _utils as _base_utils
|
|
22
21
|
from ..model_id import AnthropicModelId, model_name
|
|
23
22
|
|
|
24
23
|
DEFAULT_MAX_TOKENS = 16000
|
|
25
24
|
# TODO: Change DEFAULT_FORMAT_MODE to strict when strict is no longer a beta feature.
|
|
26
25
|
DEFAULT_FORMAT_MODE = "tool"
|
|
27
26
|
|
|
27
|
+
# Thinking level to a float multiplier % of max tokens
|
|
28
|
+
THINKING_LEVEL_TO_BUDGET_MULTIPLIER: dict[ThinkingLevel, float] = {
|
|
29
|
+
"minimal": 0, # Will become 1024 (actual minimal value)
|
|
30
|
+
"low": 0.2,
|
|
31
|
+
"medium": 0.4,
|
|
32
|
+
"high": 0.6,
|
|
33
|
+
"max": 0.8,
|
|
34
|
+
}
|
|
35
|
+
|
|
28
36
|
AnthropicImageMimeType = Literal["image/jpeg", "image/png", "image/gif", "image/webp"]
|
|
29
37
|
|
|
30
38
|
|
|
@@ -37,6 +45,30 @@ def encode_image_mime_type(mime_type: ImageMimeType) -> AnthropicImageMimeType:
|
|
|
37
45
|
) # pragma: no cover
|
|
38
46
|
|
|
39
47
|
|
|
48
|
+
def compute_thinking_budget(
|
|
49
|
+
level: ThinkingLevel,
|
|
50
|
+
max_tokens: int,
|
|
51
|
+
) -> int:
|
|
52
|
+
"""Compute Anthropic token budget from ThinkingConfig level.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
level: The thinking level from ThinkingConfig
|
|
56
|
+
max_tokens: The max_tokens value for the request
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Token budget for thinking (0 to disable, positive for budget)
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
if level == "none":
|
|
63
|
+
return 0
|
|
64
|
+
elif level == "default":
|
|
65
|
+
return -1 # Do not set thinking, leave to provider default
|
|
66
|
+
|
|
67
|
+
multiplier: float = THINKING_LEVEL_TO_BUDGET_MULTIPLIER.get(level, 0.4)
|
|
68
|
+
budget = int(multiplier * max_tokens)
|
|
69
|
+
return max(1024, budget) # Always return at least 1024, minimum allowed budget
|
|
70
|
+
|
|
71
|
+
|
|
40
72
|
class ProcessedParams(TypedDict, total=False):
|
|
41
73
|
"""Common parameters processed from Params."""
|
|
42
74
|
|
|
@@ -46,7 +78,7 @@ class ProcessedParams(TypedDict, total=False):
|
|
|
46
78
|
top_k: int
|
|
47
79
|
stop_sequences: list[str]
|
|
48
80
|
thinking: dict[str, Any]
|
|
49
|
-
|
|
81
|
+
encode_thoughts_as_text: bool
|
|
50
82
|
|
|
51
83
|
|
|
52
84
|
def process_params(params: Params, default_max_tokens: int) -> ProcessedParams:
|
|
@@ -56,7 +88,7 @@ def process_params(params: Params, default_max_tokens: int) -> ProcessedParams:
|
|
|
56
88
|
"""
|
|
57
89
|
result: ProcessedParams = {
|
|
58
90
|
"max_tokens": default_max_tokens,
|
|
59
|
-
"
|
|
91
|
+
"encode_thoughts_as_text": False,
|
|
60
92
|
}
|
|
61
93
|
|
|
62
94
|
with _base_utils.ensure_all_params_accessed(
|
|
@@ -73,13 +105,22 @@ def process_params(params: Params, default_max_tokens: int) -> ProcessedParams:
|
|
|
73
105
|
if param_accessor.stop_sequences is not None:
|
|
74
106
|
result["stop_sequences"] = param_accessor.stop_sequences
|
|
75
107
|
if param_accessor.thinking is not None:
|
|
76
|
-
|
|
77
|
-
|
|
108
|
+
thinking_config = param_accessor.thinking
|
|
109
|
+
level = thinking_config.get("level")
|
|
110
|
+
|
|
111
|
+
# Compute token budget from level
|
|
112
|
+
budget_tokens = compute_thinking_budget(level, result["max_tokens"])
|
|
113
|
+
if budget_tokens == 0:
|
|
114
|
+
result["thinking"] = {"type": "disabled"}
|
|
115
|
+
elif budget_tokens > 0:
|
|
78
116
|
result["thinking"] = {"type": "enabled", "budget_tokens": budget_tokens}
|
|
79
117
|
else:
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
118
|
+
# budget is -1, do not set thinking at all.
|
|
119
|
+
pass
|
|
120
|
+
|
|
121
|
+
# Handle encode_thoughts_as_text from ThinkingConfig
|
|
122
|
+
if thinking_config.get("encode_thoughts_as_text"):
|
|
123
|
+
result["encode_thoughts_as_text"] = True
|
|
83
124
|
|
|
84
125
|
return result
|
|
85
126
|
|
|
@@ -254,7 +295,7 @@ def _encode_message(
|
|
|
254
295
|
def _encode_messages(
|
|
255
296
|
messages: Sequence[UserMessage | AssistantMessage],
|
|
256
297
|
model_id: AnthropicModelId,
|
|
257
|
-
|
|
298
|
+
encode_thoughts_as_text: bool,
|
|
258
299
|
) -> Sequence[anthropic_types.MessageParam]:
|
|
259
300
|
"""Encode messages and add cache control for multi-turn conversations.
|
|
260
301
|
|
|
@@ -270,7 +311,7 @@ def _encode_messages(
|
|
|
270
311
|
is_last = i == len(messages) - 1
|
|
271
312
|
add_cache = has_assistant_message and is_last
|
|
272
313
|
encoded_messages.append(
|
|
273
|
-
_encode_message(message, model_id,
|
|
314
|
+
_encode_message(message, model_id, encode_thoughts_as_text, add_cache)
|
|
274
315
|
)
|
|
275
316
|
return encoded_messages
|
|
276
317
|
|
|
@@ -298,7 +339,7 @@ def encode_request(
|
|
|
298
339
|
"""Prepares a request for the Anthropic messages.create method."""
|
|
299
340
|
|
|
300
341
|
processed = process_params(params, DEFAULT_MAX_TOKENS)
|
|
301
|
-
|
|
342
|
+
encode_thoughts_as_text = processed.pop("encode_thoughts_as_text", False)
|
|
302
343
|
max_tokens = processed.pop("max_tokens", DEFAULT_MAX_TOKENS)
|
|
303
344
|
|
|
304
345
|
kwargs: MessageCreateKwargs = MessageCreateKwargs(
|
|
@@ -316,7 +357,7 @@ def encode_request(
|
|
|
316
357
|
model_id=model_id,
|
|
317
358
|
)
|
|
318
359
|
if format.mode == "tool":
|
|
319
|
-
format_tool_schema =
|
|
360
|
+
format_tool_schema = format.create_tool_schema()
|
|
320
361
|
anthropic_tools.append(convert_tool_to_tool_param(format_tool_schema))
|
|
321
362
|
if tools:
|
|
322
363
|
kwargs["tool_choice"] = {"type": "any"}
|
|
@@ -342,7 +383,9 @@ def encode_request(
|
|
|
342
383
|
messages
|
|
343
384
|
)
|
|
344
385
|
|
|
345
|
-
kwargs["messages"] = _encode_messages(
|
|
386
|
+
kwargs["messages"] = _encode_messages(
|
|
387
|
+
remaining_messages, model_id, encode_thoughts_as_text
|
|
388
|
+
)
|
|
346
389
|
|
|
347
390
|
if system_message_content:
|
|
348
391
|
kwargs["system"] = [
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""Anthropic error handling utilities."""
|
|
2
|
+
|
|
3
|
+
from anthropic import (
|
|
4
|
+
AnthropicError,
|
|
5
|
+
APIConnectionError as AnthropicAPIConnectionError,
|
|
6
|
+
APIResponseValidationError as AnthropicAPIResponseValidationError,
|
|
7
|
+
APITimeoutError as AnthropicAPITimeoutError,
|
|
8
|
+
AuthenticationError as AnthropicAuthenticationError,
|
|
9
|
+
BadRequestError as AnthropicBadRequestError,
|
|
10
|
+
ConflictError as AnthropicConflictError,
|
|
11
|
+
InternalServerError as AnthropicInternalServerError,
|
|
12
|
+
NotFoundError as AnthropicNotFoundError,
|
|
13
|
+
PermissionDeniedError as AnthropicPermissionDeniedError,
|
|
14
|
+
RateLimitError as AnthropicRateLimitError,
|
|
15
|
+
UnprocessableEntityError as AnthropicUnprocessableEntityError,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
from ....exceptions import (
|
|
19
|
+
APIError,
|
|
20
|
+
AuthenticationError,
|
|
21
|
+
BadRequestError,
|
|
22
|
+
ConnectionError,
|
|
23
|
+
NotFoundError,
|
|
24
|
+
PermissionError,
|
|
25
|
+
RateLimitError,
|
|
26
|
+
ResponseValidationError,
|
|
27
|
+
ServerError,
|
|
28
|
+
TimeoutError,
|
|
29
|
+
)
|
|
30
|
+
from ...base import ProviderErrorMap
|
|
31
|
+
|
|
32
|
+
# Shared error mapping used by both AnthropicProvider and AnthropicBetaProvider
|
|
33
|
+
ANTHROPIC_ERROR_MAP: ProviderErrorMap = {
|
|
34
|
+
AnthropicAuthenticationError: AuthenticationError,
|
|
35
|
+
AnthropicPermissionDeniedError: PermissionError,
|
|
36
|
+
AnthropicBadRequestError: BadRequestError,
|
|
37
|
+
AnthropicUnprocessableEntityError: BadRequestError,
|
|
38
|
+
AnthropicNotFoundError: NotFoundError,
|
|
39
|
+
AnthropicConflictError: BadRequestError,
|
|
40
|
+
AnthropicRateLimitError: RateLimitError,
|
|
41
|
+
AnthropicInternalServerError: ServerError,
|
|
42
|
+
AnthropicAPITimeoutError: TimeoutError,
|
|
43
|
+
AnthropicAPIConnectionError: ConnectionError,
|
|
44
|
+
AnthropicAPIResponseValidationError: ResponseValidationError,
|
|
45
|
+
AnthropicError: APIError, # Catch-all for unknown Anthropic errors
|
|
46
|
+
}
|
|
@@ -29,6 +29,7 @@ from ...tools import (
|
|
|
29
29
|
Toolkit,
|
|
30
30
|
)
|
|
31
31
|
from ..base import BaseProvider, Params
|
|
32
|
+
from . import _utils
|
|
32
33
|
from ._utils import beta_decode, beta_encode
|
|
33
34
|
from .model_id import model_name
|
|
34
35
|
|
|
@@ -38,6 +39,7 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
38
39
|
|
|
39
40
|
id = "anthropic-beta"
|
|
40
41
|
default_scope = "anthropic-beta/"
|
|
42
|
+
error_map = _utils.ANTHROPIC_ERROR_MAP
|
|
41
43
|
|
|
42
44
|
def __init__(
|
|
43
45
|
self, *, api_key: str | None = None, base_url: str | None = None
|
|
@@ -46,6 +48,10 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
46
48
|
self.client = Anthropic(api_key=api_key, base_url=base_url)
|
|
47
49
|
self.async_client = AsyncAnthropic(api_key=api_key, base_url=base_url)
|
|
48
50
|
|
|
51
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
52
|
+
"""Extract HTTP status code from Anthropic exception."""
|
|
53
|
+
return getattr(e, "status_code", None)
|
|
54
|
+
|
|
49
55
|
def _call(
|
|
50
56
|
self,
|
|
51
57
|
*,
|
|
@@ -55,6 +55,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
55
55
|
|
|
56
56
|
id = "anthropic"
|
|
57
57
|
default_scope = "anthropic/"
|
|
58
|
+
error_map = _utils.ANTHROPIC_ERROR_MAP
|
|
58
59
|
_beta_provider: AnthropicBetaProvider
|
|
59
60
|
|
|
60
61
|
def __init__(
|
|
@@ -65,6 +66,10 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
65
66
|
self.async_client = AsyncAnthropic(api_key=api_key, base_url=base_url)
|
|
66
67
|
self._beta_provider = AnthropicBetaProvider(api_key=api_key, base_url=base_url)
|
|
67
68
|
|
|
69
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
70
|
+
"""Extract HTTP status code from Anthropic exception."""
|
|
71
|
+
return getattr(e, "status_code", None)
|
|
72
|
+
|
|
68
73
|
def _call(
|
|
69
74
|
self,
|
|
70
75
|
*,
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
"""Base client interfaces and types."""
|
|
2
2
|
|
|
3
3
|
from . import _utils
|
|
4
|
-
from .base_provider import BaseProvider, Provider
|
|
4
|
+
from .base_provider import BaseProvider, Provider, ProviderErrorMap
|
|
5
5
|
from .kwargs import BaseKwargs, KwargsT
|
|
6
|
-
from .params import Params
|
|
6
|
+
from .params import Params, ThinkingConfig, ThinkingLevel
|
|
7
7
|
|
|
8
8
|
__all__ = [
|
|
9
9
|
"BaseKwargs",
|
|
@@ -11,5 +11,8 @@ __all__ = [
|
|
|
11
11
|
"KwargsT",
|
|
12
12
|
"Params",
|
|
13
13
|
"Provider",
|
|
14
|
+
"ProviderErrorMap",
|
|
15
|
+
"ThinkingConfig",
|
|
16
|
+
"ThinkingLevel",
|
|
14
17
|
"_utils",
|
|
15
18
|
]
|
|
@@ -10,6 +10,7 @@ from .params import Params
|
|
|
10
10
|
|
|
11
11
|
if TYPE_CHECKING:
|
|
12
12
|
from ..model_id import ModelId
|
|
13
|
+
from .params import ThinkingConfig
|
|
13
14
|
|
|
14
15
|
logger = logging.getLogger(__name__)
|
|
15
16
|
|
|
@@ -138,17 +139,11 @@ class SafeParamsAccessor:
|
|
|
138
139
|
return self._params.get("stop_sequences")
|
|
139
140
|
|
|
140
141
|
@property
|
|
141
|
-
def thinking(self) ->
|
|
142
|
+
def thinking(self) -> "ThinkingConfig | None":
|
|
142
143
|
"""Access the thinking parameter."""
|
|
143
144
|
self._unaccessed.discard("thinking")
|
|
144
145
|
return self._params.get("thinking")
|
|
145
146
|
|
|
146
|
-
@property
|
|
147
|
-
def encode_thoughts_as_text(self) -> bool | None:
|
|
148
|
-
"""Access the encode_thoughts_as_text parameter."""
|
|
149
|
-
self._unaccessed.discard("encode_thoughts_as_text")
|
|
150
|
-
return self._params.get("encode_thoughts_as_text")
|
|
151
|
-
|
|
152
147
|
def emit_warning_for_unused_param(
|
|
153
148
|
self,
|
|
154
149
|
param_name: str,
|