mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +2 -2
- mirascope/api/__init__.py +6 -0
- mirascope/api/_generated/README.md +207 -0
- mirascope/api/_generated/__init__.py +141 -0
- mirascope/api/_generated/client.py +163 -0
- mirascope/api/_generated/core/__init__.py +52 -0
- mirascope/api/_generated/core/api_error.py +23 -0
- mirascope/api/_generated/core/client_wrapper.py +58 -0
- mirascope/api/_generated/core/datetime_utils.py +30 -0
- mirascope/api/_generated/core/file.py +70 -0
- mirascope/api/_generated/core/force_multipart.py +16 -0
- mirascope/api/_generated/core/http_client.py +619 -0
- mirascope/api/_generated/core/http_response.py +55 -0
- mirascope/api/_generated/core/jsonable_encoder.py +102 -0
- mirascope/api/_generated/core/pydantic_utilities.py +310 -0
- mirascope/api/_generated/core/query_encoder.py +60 -0
- mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
- mirascope/api/_generated/core/request_options.py +35 -0
- mirascope/api/_generated/core/serialization.py +282 -0
- mirascope/api/_generated/docs/__init__.py +4 -0
- mirascope/api/_generated/docs/client.py +95 -0
- mirascope/api/_generated/docs/raw_client.py +132 -0
- mirascope/api/_generated/environment.py +9 -0
- mirascope/api/_generated/errors/__init__.py +17 -0
- mirascope/api/_generated/errors/bad_request_error.py +15 -0
- mirascope/api/_generated/errors/conflict_error.py +15 -0
- mirascope/api/_generated/errors/forbidden_error.py +15 -0
- mirascope/api/_generated/errors/internal_server_error.py +15 -0
- mirascope/api/_generated/errors/not_found_error.py +15 -0
- mirascope/api/_generated/health/__init__.py +7 -0
- mirascope/api/_generated/health/client.py +96 -0
- mirascope/api/_generated/health/raw_client.py +129 -0
- mirascope/api/_generated/health/types/__init__.py +8 -0
- mirascope/api/_generated/health/types/health_check_response.py +24 -0
- mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
- mirascope/api/_generated/organizations/__init__.py +25 -0
- mirascope/api/_generated/organizations/client.py +380 -0
- mirascope/api/_generated/organizations/raw_client.py +876 -0
- mirascope/api/_generated/organizations/types/__init__.py +23 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
- mirascope/api/_generated/projects/__init__.py +17 -0
- mirascope/api/_generated/projects/client.py +458 -0
- mirascope/api/_generated/projects/raw_client.py +1016 -0
- mirascope/api/_generated/projects/types/__init__.py +15 -0
- mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
- mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
- mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
- mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
- mirascope/api/_generated/reference.md +753 -0
- mirascope/api/_generated/traces/__init__.py +55 -0
- mirascope/api/_generated/traces/client.py +162 -0
- mirascope/api/_generated/traces/raw_client.py +168 -0
- mirascope/api/_generated/traces/types/__init__.py +95 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
- mirascope/api/_generated/types/__init__.py +37 -0
- mirascope/api/_generated/types/already_exists_error.py +24 -0
- mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
- mirascope/api/_generated/types/database_error.py +24 -0
- mirascope/api/_generated/types/database_error_tag.py +5 -0
- mirascope/api/_generated/types/http_api_decode_error.py +29 -0
- mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
- mirascope/api/_generated/types/issue.py +40 -0
- mirascope/api/_generated/types/issue_tag.py +17 -0
- mirascope/api/_generated/types/not_found_error_body.py +24 -0
- mirascope/api/_generated/types/not_found_error_tag.py +5 -0
- mirascope/api/_generated/types/permission_denied_error.py +24 -0
- mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
- mirascope/api/_generated/types/property_key.py +7 -0
- mirascope/api/_generated/types/property_key_key.py +27 -0
- mirascope/api/_generated/types/property_key_key_tag.py +5 -0
- mirascope/api/client.py +255 -0
- mirascope/api/settings.py +81 -0
- mirascope/llm/__init__.py +45 -11
- mirascope/llm/calls/calls.py +81 -57
- mirascope/llm/calls/decorator.py +121 -115
- mirascope/llm/content/__init__.py +3 -2
- mirascope/llm/context/_utils.py +19 -6
- mirascope/llm/exceptions.py +30 -16
- mirascope/llm/formatting/_utils.py +9 -5
- mirascope/llm/formatting/format.py +2 -2
- mirascope/llm/formatting/from_call_args.py +2 -2
- mirascope/llm/messages/message.py +13 -5
- mirascope/llm/models/__init__.py +2 -2
- mirascope/llm/models/models.py +189 -81
- mirascope/llm/prompts/__init__.py +13 -12
- mirascope/llm/prompts/_utils.py +27 -24
- mirascope/llm/prompts/decorator.py +133 -204
- mirascope/llm/prompts/prompts.py +424 -0
- mirascope/llm/prompts/protocols.py +25 -59
- mirascope/llm/providers/__init__.py +44 -0
- mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
- mirascope/llm/providers/anthropic/__init__.py +29 -0
- mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
- mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
- mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
- mirascope/llm/providers/anthropic/beta_provider.py +322 -0
- mirascope/llm/providers/anthropic/model_id.py +23 -0
- mirascope/llm/providers/anthropic/model_info.py +87 -0
- mirascope/llm/providers/anthropic/provider.py +416 -0
- mirascope/llm/{clients → providers}/base/__init__.py +3 -3
- mirascope/llm/{clients → providers}/base/_utils.py +25 -8
- mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
- mirascope/llm/providers/google/__init__.py +21 -0
- mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
- mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
- mirascope/llm/providers/google/model_id.py +22 -0
- mirascope/llm/providers/google/model_info.py +62 -0
- mirascope/llm/providers/google/provider.py +442 -0
- mirascope/llm/providers/load_provider.py +54 -0
- mirascope/llm/providers/mlx/__init__.py +24 -0
- mirascope/llm/providers/mlx/_utils.py +129 -0
- mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
- mirascope/llm/providers/mlx/encoding/base.py +69 -0
- mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
- mirascope/llm/providers/mlx/mlx.py +237 -0
- mirascope/llm/providers/mlx/model_id.py +17 -0
- mirascope/llm/providers/mlx/provider.py +415 -0
- mirascope/llm/providers/model_id.py +16 -0
- mirascope/llm/providers/ollama/__init__.py +19 -0
- mirascope/llm/providers/ollama/provider.py +71 -0
- mirascope/llm/providers/openai/__init__.py +6 -0
- mirascope/llm/providers/openai/completions/__init__.py +25 -0
- mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
- mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
- mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
- mirascope/llm/providers/openai/completions/base_provider.py +513 -0
- mirascope/llm/providers/openai/completions/provider.py +22 -0
- mirascope/llm/providers/openai/model_id.py +31 -0
- mirascope/llm/providers/openai/model_info.py +303 -0
- mirascope/llm/providers/openai/provider.py +398 -0
- mirascope/llm/providers/openai/responses/__init__.py +21 -0
- mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
- mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
- mirascope/llm/providers/openai/responses/provider.py +469 -0
- mirascope/llm/providers/provider_id.py +23 -0
- mirascope/llm/providers/provider_registry.py +169 -0
- mirascope/llm/providers/together/__init__.py +19 -0
- mirascope/llm/providers/together/provider.py +40 -0
- mirascope/llm/responses/__init__.py +3 -0
- mirascope/llm/responses/base_response.py +14 -5
- mirascope/llm/responses/base_stream_response.py +35 -6
- mirascope/llm/responses/finish_reason.py +1 -0
- mirascope/llm/responses/response.py +33 -13
- mirascope/llm/responses/root_response.py +12 -13
- mirascope/llm/responses/stream_response.py +35 -23
- mirascope/llm/responses/usage.py +95 -0
- mirascope/llm/tools/__init__.py +9 -2
- mirascope/llm/tools/_utils.py +12 -3
- mirascope/llm/tools/protocols.py +4 -4
- mirascope/llm/tools/tool_schema.py +44 -9
- mirascope/llm/tools/tools.py +10 -9
- mirascope/ops/__init__.py +156 -0
- mirascope/ops/_internal/__init__.py +5 -0
- mirascope/ops/_internal/closure.py +1118 -0
- mirascope/ops/_internal/configuration.py +126 -0
- mirascope/ops/_internal/context.py +76 -0
- mirascope/ops/_internal/exporters/__init__.py +26 -0
- mirascope/ops/_internal/exporters/exporters.py +342 -0
- mirascope/ops/_internal/exporters/processors.py +104 -0
- mirascope/ops/_internal/exporters/types.py +165 -0
- mirascope/ops/_internal/exporters/utils.py +29 -0
- mirascope/ops/_internal/instrumentation/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
- mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
- mirascope/ops/_internal/propagation.py +198 -0
- mirascope/ops/_internal/protocols.py +51 -0
- mirascope/ops/_internal/session.py +139 -0
- mirascope/ops/_internal/spans.py +232 -0
- mirascope/ops/_internal/traced_calls.py +371 -0
- mirascope/ops/_internal/traced_functions.py +394 -0
- mirascope/ops/_internal/tracing.py +276 -0
- mirascope/ops/_internal/types.py +13 -0
- mirascope/ops/_internal/utils.py +75 -0
- mirascope/ops/_internal/versioned_calls.py +512 -0
- mirascope/ops/_internal/versioned_functions.py +346 -0
- mirascope/ops/_internal/versioning.py +303 -0
- mirascope/ops/exceptions.py +21 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
- mirascope-2.0.0a4.dist-info/RECORD +247 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
- mirascope/graphs/__init__.py +0 -22
- mirascope/graphs/finite_state_machine.py +0 -625
- mirascope/llm/agents/__init__.py +0 -15
- mirascope/llm/agents/agent.py +0 -97
- mirascope/llm/agents/agent_template.py +0 -45
- mirascope/llm/agents/decorator.py +0 -176
- mirascope/llm/calls/base_call.py +0 -33
- mirascope/llm/clients/__init__.py +0 -34
- mirascope/llm/clients/anthropic/__init__.py +0 -25
- mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
- mirascope/llm/clients/anthropic/clients.py +0 -819
- mirascope/llm/clients/anthropic/model_ids.py +0 -8
- mirascope/llm/clients/google/__init__.py +0 -20
- mirascope/llm/clients/google/clients.py +0 -853
- mirascope/llm/clients/google/model_ids.py +0 -15
- mirascope/llm/clients/openai/__init__.py +0 -25
- mirascope/llm/clients/openai/completions/__init__.py +0 -28
- mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
- mirascope/llm/clients/openai/completions/clients.py +0 -833
- mirascope/llm/clients/openai/completions/model_ids.py +0 -8
- mirascope/llm/clients/openai/responses/__init__.py +0 -26
- mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
- mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
- mirascope/llm/clients/openai/responses/clients.py +0 -832
- mirascope/llm/clients/openai/responses/model_ids.py +0 -8
- mirascope/llm/clients/openai/shared/__init__.py +0 -7
- mirascope/llm/clients/openai/shared/_utils.py +0 -55
- mirascope/llm/clients/providers.py +0 -175
- mirascope-2.0.0a2.dist-info/RECORD +0 -102
- /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
- /mirascope/llm/{clients → providers}/base/params.py +0 -0
- /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/google/message.py +0 -0
- /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,356 @@
|
|
|
1
|
+
"""Shared Anthropic encoding utilities."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from collections.abc import Sequence
|
|
5
|
+
from functools import lru_cache
|
|
6
|
+
from typing import Any, Literal, TypedDict, cast
|
|
7
|
+
from typing_extensions import Required
|
|
8
|
+
|
|
9
|
+
from anthropic import Omit, types as anthropic_types
|
|
10
|
+
|
|
11
|
+
from ....content import ContentPart, ImageMimeType
|
|
12
|
+
from ....exceptions import FeatureNotSupportedError, FormattingModeNotSupportedError
|
|
13
|
+
from ....formatting import (
|
|
14
|
+
Format,
|
|
15
|
+
FormattableT,
|
|
16
|
+
_utils as _formatting_utils,
|
|
17
|
+
resolve_format,
|
|
18
|
+
)
|
|
19
|
+
from ....messages import AssistantMessage, Message, UserMessage
|
|
20
|
+
from ....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
|
|
21
|
+
from ...base import Params, _utils as _base_utils
|
|
22
|
+
from ..model_id import AnthropicModelId, model_name
|
|
23
|
+
|
|
24
|
+
DEFAULT_MAX_TOKENS = 16000
|
|
25
|
+
# TODO: Change DEFAULT_FORMAT_MODE to strict when strict is no longer a beta feature.
|
|
26
|
+
DEFAULT_FORMAT_MODE = "tool"
|
|
27
|
+
|
|
28
|
+
AnthropicImageMimeType = Literal["image/jpeg", "image/png", "image/gif", "image/webp"]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def encode_image_mime_type(mime_type: ImageMimeType) -> AnthropicImageMimeType:
|
|
32
|
+
"""Convert an ImageMimeType into anthropic supported mime type."""
|
|
33
|
+
if mime_type in ("image/jpeg", "image/png", "image/gif", "image/webp"):
|
|
34
|
+
return mime_type
|
|
35
|
+
raise FeatureNotSupportedError(
|
|
36
|
+
feature=f"Image with mime_type: {mime_type}", provider_id="anthropic"
|
|
37
|
+
) # pragma: no cover
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ProcessedParams(TypedDict, total=False):
|
|
41
|
+
"""Common parameters processed from Params."""
|
|
42
|
+
|
|
43
|
+
temperature: float
|
|
44
|
+
max_tokens: int
|
|
45
|
+
top_p: float
|
|
46
|
+
top_k: int
|
|
47
|
+
stop_sequences: list[str]
|
|
48
|
+
thinking: dict[str, Any]
|
|
49
|
+
encode_thoughts: bool
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def process_params(params: Params, default_max_tokens: int) -> ProcessedParams:
|
|
53
|
+
"""Process common Anthropic parameters from Params.
|
|
54
|
+
|
|
55
|
+
Returns a dict with processed parameters that can be merged into kwargs.
|
|
56
|
+
"""
|
|
57
|
+
result: ProcessedParams = {
|
|
58
|
+
"max_tokens": default_max_tokens,
|
|
59
|
+
"encode_thoughts": False,
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
with _base_utils.ensure_all_params_accessed(
|
|
63
|
+
params=params, provider_id="anthropic", unsupported_params=["seed"]
|
|
64
|
+
) as param_accessor:
|
|
65
|
+
if param_accessor.temperature is not None:
|
|
66
|
+
result["temperature"] = param_accessor.temperature
|
|
67
|
+
if param_accessor.max_tokens is not None:
|
|
68
|
+
result["max_tokens"] = param_accessor.max_tokens
|
|
69
|
+
if param_accessor.top_p is not None:
|
|
70
|
+
result["top_p"] = param_accessor.top_p
|
|
71
|
+
if param_accessor.top_k is not None:
|
|
72
|
+
result["top_k"] = param_accessor.top_k
|
|
73
|
+
if param_accessor.stop_sequences is not None:
|
|
74
|
+
result["stop_sequences"] = param_accessor.stop_sequences
|
|
75
|
+
if param_accessor.thinking is not None:
|
|
76
|
+
if param_accessor.thinking:
|
|
77
|
+
budget_tokens = max(1024, result["max_tokens"] // 2)
|
|
78
|
+
result["thinking"] = {"type": "enabled", "budget_tokens": budget_tokens}
|
|
79
|
+
else:
|
|
80
|
+
result["thinking"] = {"type": "disabled"}
|
|
81
|
+
if param_accessor.encode_thoughts_as_text:
|
|
82
|
+
result["encode_thoughts"] = True
|
|
83
|
+
|
|
84
|
+
return result
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class MessageCreateKwargs(TypedDict, total=False):
|
|
88
|
+
"""Kwargs for Anthropic Message.create method."""
|
|
89
|
+
|
|
90
|
+
model: Required[str]
|
|
91
|
+
max_tokens: Required[int]
|
|
92
|
+
messages: Sequence[anthropic_types.MessageParam]
|
|
93
|
+
system: Sequence[anthropic_types.TextBlockParam] | Omit
|
|
94
|
+
tools: Sequence[anthropic_types.ToolParam] | Omit
|
|
95
|
+
tool_choice: anthropic_types.ToolChoiceParam | Omit
|
|
96
|
+
temperature: float | Omit
|
|
97
|
+
top_p: float | Omit
|
|
98
|
+
top_k: int | Omit
|
|
99
|
+
stop_sequences: list[str] | Omit
|
|
100
|
+
thinking: anthropic_types.ThinkingConfigParam | Omit
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def encode_content(
|
|
104
|
+
content: Sequence[ContentPart],
|
|
105
|
+
encode_thoughts: bool,
|
|
106
|
+
add_cache_control: bool,
|
|
107
|
+
) -> str | Sequence[anthropic_types.ContentBlockParam]:
|
|
108
|
+
"""Convert mirascope content to Anthropic content format."""
|
|
109
|
+
|
|
110
|
+
if len(content) == 1 and content[0].type == "text":
|
|
111
|
+
if not content[0].text:
|
|
112
|
+
raise FeatureNotSupportedError(
|
|
113
|
+
"empty message content",
|
|
114
|
+
"anthropic",
|
|
115
|
+
message="Anthropic does not support empty message content.",
|
|
116
|
+
)
|
|
117
|
+
if add_cache_control:
|
|
118
|
+
return [
|
|
119
|
+
anthropic_types.TextBlockParam(
|
|
120
|
+
type="text",
|
|
121
|
+
text=content[0].text,
|
|
122
|
+
cache_control={"type": "ephemeral"},
|
|
123
|
+
)
|
|
124
|
+
]
|
|
125
|
+
return content[0].text
|
|
126
|
+
|
|
127
|
+
blocks: list[anthropic_types.ContentBlockParam] = []
|
|
128
|
+
|
|
129
|
+
# Find the last cacheable content part (text, image, tool_result, or tool_call)
|
|
130
|
+
last_cacheable_index = -1
|
|
131
|
+
if add_cache_control:
|
|
132
|
+
for i in range(len(content) - 1, -1, -1):
|
|
133
|
+
part = content[i]
|
|
134
|
+
if part.type in ("text", "image", "tool_output", "tool_call"):
|
|
135
|
+
if part.type == "text" and not part.text: # pragma: no cover
|
|
136
|
+
continue # Skip empty text
|
|
137
|
+
last_cacheable_index = i
|
|
138
|
+
break
|
|
139
|
+
|
|
140
|
+
for i, part in enumerate(content):
|
|
141
|
+
should_add_cache = add_cache_control and i == last_cacheable_index
|
|
142
|
+
|
|
143
|
+
if part.type == "text":
|
|
144
|
+
if part.text:
|
|
145
|
+
blocks.append(
|
|
146
|
+
anthropic_types.TextBlockParam(
|
|
147
|
+
type="text",
|
|
148
|
+
text=part.text,
|
|
149
|
+
cache_control={"type": "ephemeral"}
|
|
150
|
+
if should_add_cache
|
|
151
|
+
else None,
|
|
152
|
+
)
|
|
153
|
+
)
|
|
154
|
+
elif part.type == "image":
|
|
155
|
+
source: (
|
|
156
|
+
anthropic_types.Base64ImageSourceParam
|
|
157
|
+
| anthropic_types.URLImageSourceParam
|
|
158
|
+
)
|
|
159
|
+
if part.source.type == "base64_image_source":
|
|
160
|
+
source = anthropic_types.Base64ImageSourceParam(
|
|
161
|
+
type="base64",
|
|
162
|
+
media_type=encode_image_mime_type(part.source.mime_type),
|
|
163
|
+
data=part.source.data,
|
|
164
|
+
)
|
|
165
|
+
else: # url_image_source
|
|
166
|
+
source = anthropic_types.URLImageSourceParam(
|
|
167
|
+
type="url",
|
|
168
|
+
url=part.source.url,
|
|
169
|
+
)
|
|
170
|
+
blocks.append(
|
|
171
|
+
anthropic_types.ImageBlockParam(
|
|
172
|
+
type="image",
|
|
173
|
+
source=source,
|
|
174
|
+
cache_control={"type": "ephemeral"} if should_add_cache else None,
|
|
175
|
+
)
|
|
176
|
+
)
|
|
177
|
+
elif part.type == "audio":
|
|
178
|
+
raise FeatureNotSupportedError(
|
|
179
|
+
"audio input",
|
|
180
|
+
"anthropic",
|
|
181
|
+
message="Anthropic does not support audio inputs.",
|
|
182
|
+
)
|
|
183
|
+
elif part.type == "tool_output":
|
|
184
|
+
blocks.append(
|
|
185
|
+
anthropic_types.ToolResultBlockParam(
|
|
186
|
+
type="tool_result",
|
|
187
|
+
tool_use_id=part.id,
|
|
188
|
+
content=str(part.value),
|
|
189
|
+
cache_control={"type": "ephemeral"} if should_add_cache else None,
|
|
190
|
+
)
|
|
191
|
+
)
|
|
192
|
+
elif part.type == "tool_call":
|
|
193
|
+
blocks.append(
|
|
194
|
+
anthropic_types.ToolUseBlockParam(
|
|
195
|
+
type="tool_use",
|
|
196
|
+
id=part.id,
|
|
197
|
+
name=part.name,
|
|
198
|
+
input=json.loads(part.args),
|
|
199
|
+
cache_control={"type": "ephemeral"} if should_add_cache else None,
|
|
200
|
+
)
|
|
201
|
+
)
|
|
202
|
+
elif part.type == "thought":
|
|
203
|
+
if encode_thoughts:
|
|
204
|
+
blocks.append(
|
|
205
|
+
anthropic_types.TextBlockParam(
|
|
206
|
+
type="text", text="**Thinking:** " + part.thought
|
|
207
|
+
)
|
|
208
|
+
)
|
|
209
|
+
else:
|
|
210
|
+
raise NotImplementedError(f"Unsupported content type: {part.type}")
|
|
211
|
+
|
|
212
|
+
if not blocks:
|
|
213
|
+
raise FeatureNotSupportedError(
|
|
214
|
+
"empty message content",
|
|
215
|
+
"anthropic",
|
|
216
|
+
message="Anthropic does not support empty message content.",
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
return blocks
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def _encode_message(
|
|
223
|
+
message: UserMessage | AssistantMessage,
|
|
224
|
+
model_id: AnthropicModelId,
|
|
225
|
+
encode_thoughts: bool,
|
|
226
|
+
add_cache_control: bool = False,
|
|
227
|
+
) -> anthropic_types.MessageParam:
|
|
228
|
+
"""Convert user or assistant Message to Anthropic MessageParam format.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
message: The message to encode
|
|
232
|
+
model_id: The Anthropic model ID
|
|
233
|
+
encode_thoughts: Whether to encode thought blocks as text
|
|
234
|
+
add_cache_control: Whether to add cache_control to the last content block
|
|
235
|
+
"""
|
|
236
|
+
if (
|
|
237
|
+
message.role == "assistant"
|
|
238
|
+
and message.provider_id == "anthropic"
|
|
239
|
+
and message.model_id == model_id
|
|
240
|
+
and message.raw_message
|
|
241
|
+
and not encode_thoughts
|
|
242
|
+
and not add_cache_control
|
|
243
|
+
):
|
|
244
|
+
return cast(anthropic_types.MessageParam, message.raw_message)
|
|
245
|
+
|
|
246
|
+
content = encode_content(message.content, encode_thoughts, add_cache_control)
|
|
247
|
+
|
|
248
|
+
return {
|
|
249
|
+
"role": message.role,
|
|
250
|
+
"content": content,
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def _encode_messages(
|
|
255
|
+
messages: Sequence[UserMessage | AssistantMessage],
|
|
256
|
+
model_id: AnthropicModelId,
|
|
257
|
+
encode_thoughts: bool,
|
|
258
|
+
) -> Sequence[anthropic_types.MessageParam]:
|
|
259
|
+
"""Encode messages and add cache control for multi-turn conversations.
|
|
260
|
+
|
|
261
|
+
If the conversation contains assistant messages (indicating multi-turn),
|
|
262
|
+
adds cache_control to the last content block of the last message.
|
|
263
|
+
"""
|
|
264
|
+
# Detect multi-turn conversations by checking for assistant messages
|
|
265
|
+
has_assistant_message = any(msg.role == "assistant" for msg in messages)
|
|
266
|
+
|
|
267
|
+
# Encode messages, adding cache_control to the last message if multi-turn
|
|
268
|
+
encoded_messages: list[anthropic_types.MessageParam] = []
|
|
269
|
+
for i, message in enumerate(messages):
|
|
270
|
+
is_last = i == len(messages) - 1
|
|
271
|
+
add_cache = has_assistant_message and is_last
|
|
272
|
+
encoded_messages.append(
|
|
273
|
+
_encode_message(message, model_id, encode_thoughts, add_cache)
|
|
274
|
+
)
|
|
275
|
+
return encoded_messages
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
@lru_cache(maxsize=128)
|
|
279
|
+
def convert_tool_to_tool_param(tool: AnyToolSchema) -> anthropic_types.ToolParam:
|
|
280
|
+
"""Convert a single Mirascope tool to Anthropic tool format with caching."""
|
|
281
|
+
schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
|
|
282
|
+
schema_dict["type"] = "object"
|
|
283
|
+
return anthropic_types.ToolParam(
|
|
284
|
+
name=tool.name,
|
|
285
|
+
description=tool.description,
|
|
286
|
+
input_schema=schema_dict,
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
def encode_request(
|
|
291
|
+
*,
|
|
292
|
+
model_id: AnthropicModelId,
|
|
293
|
+
messages: Sequence[Message],
|
|
294
|
+
tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
|
|
295
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
296
|
+
params: Params,
|
|
297
|
+
) -> tuple[Sequence[Message], Format[FormattableT] | None, MessageCreateKwargs]:
|
|
298
|
+
"""Prepares a request for the Anthropic messages.create method."""
|
|
299
|
+
|
|
300
|
+
processed = process_params(params, DEFAULT_MAX_TOKENS)
|
|
301
|
+
encode_thoughts = processed.pop("encode_thoughts", False)
|
|
302
|
+
max_tokens = processed.pop("max_tokens", DEFAULT_MAX_TOKENS)
|
|
303
|
+
|
|
304
|
+
kwargs: MessageCreateKwargs = MessageCreateKwargs(
|
|
305
|
+
{"model": model_name(model_id), "max_tokens": max_tokens, **processed}
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
|
|
309
|
+
anthropic_tools = [convert_tool_to_tool_param(tool) for tool in tools]
|
|
310
|
+
format = resolve_format(format, default_mode=DEFAULT_FORMAT_MODE)
|
|
311
|
+
if format is not None:
|
|
312
|
+
if format.mode == "strict":
|
|
313
|
+
raise FormattingModeNotSupportedError(
|
|
314
|
+
formatting_mode="strict",
|
|
315
|
+
provider_id="anthropic",
|
|
316
|
+
model_id=model_id,
|
|
317
|
+
)
|
|
318
|
+
if format.mode == "tool":
|
|
319
|
+
format_tool_schema = _formatting_utils.create_tool_schema(format)
|
|
320
|
+
anthropic_tools.append(convert_tool_to_tool_param(format_tool_schema))
|
|
321
|
+
if tools:
|
|
322
|
+
kwargs["tool_choice"] = {"type": "any"}
|
|
323
|
+
else:
|
|
324
|
+
kwargs["tool_choice"] = {
|
|
325
|
+
"type": "tool",
|
|
326
|
+
"name": FORMAT_TOOL_NAME,
|
|
327
|
+
"disable_parallel_tool_use": True,
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
if format.formatting_instructions:
|
|
331
|
+
messages = _base_utils.add_system_instructions(
|
|
332
|
+
messages, format.formatting_instructions
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
if anthropic_tools:
|
|
336
|
+
# Add cache control to the last tool for prompt caching
|
|
337
|
+
last_tool = anthropic_tools[-1]
|
|
338
|
+
last_tool["cache_control"] = {"type": "ephemeral"}
|
|
339
|
+
kwargs["tools"] = anthropic_tools
|
|
340
|
+
|
|
341
|
+
system_message_content, remaining_messages = _base_utils.extract_system_message(
|
|
342
|
+
messages
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
kwargs["messages"] = _encode_messages(remaining_messages, model_id, encode_thoughts)
|
|
346
|
+
|
|
347
|
+
if system_message_content:
|
|
348
|
+
kwargs["system"] = [
|
|
349
|
+
anthropic_types.TextBlockParam(
|
|
350
|
+
type="text",
|
|
351
|
+
text=system_message_content,
|
|
352
|
+
cache_control={"type": "ephemeral"},
|
|
353
|
+
)
|
|
354
|
+
]
|
|
355
|
+
|
|
356
|
+
return messages, format, kwargs
|
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
"""Beta Anthropic provider implementation."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
from typing_extensions import Unpack
|
|
5
|
+
|
|
6
|
+
from anthropic import Anthropic, AsyncAnthropic
|
|
7
|
+
|
|
8
|
+
from ...context import Context, DepsT
|
|
9
|
+
from ...formatting import Format, FormattableT
|
|
10
|
+
from ...messages import Message
|
|
11
|
+
from ...responses import (
|
|
12
|
+
AsyncContextResponse,
|
|
13
|
+
AsyncContextStreamResponse,
|
|
14
|
+
AsyncResponse,
|
|
15
|
+
AsyncStreamResponse,
|
|
16
|
+
ContextResponse,
|
|
17
|
+
ContextStreamResponse,
|
|
18
|
+
Response,
|
|
19
|
+
StreamResponse,
|
|
20
|
+
)
|
|
21
|
+
from ...tools import (
|
|
22
|
+
AsyncContextTool,
|
|
23
|
+
AsyncContextToolkit,
|
|
24
|
+
AsyncTool,
|
|
25
|
+
AsyncToolkit,
|
|
26
|
+
ContextTool,
|
|
27
|
+
ContextToolkit,
|
|
28
|
+
Tool,
|
|
29
|
+
Toolkit,
|
|
30
|
+
)
|
|
31
|
+
from ..base import BaseProvider, Params
|
|
32
|
+
from ._utils import beta_decode, beta_encode
|
|
33
|
+
from .model_id import model_name
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
37
|
+
"""Provider using beta Anthropic API."""
|
|
38
|
+
|
|
39
|
+
id = "anthropic-beta"
|
|
40
|
+
default_scope = "anthropic-beta/"
|
|
41
|
+
|
|
42
|
+
def __init__(
|
|
43
|
+
self, *, api_key: str | None = None, base_url: str | None = None
|
|
44
|
+
) -> None:
|
|
45
|
+
"""Initialize the beta Anthropic client."""
|
|
46
|
+
self.client = Anthropic(api_key=api_key, base_url=base_url)
|
|
47
|
+
self.async_client = AsyncAnthropic(api_key=api_key, base_url=base_url)
|
|
48
|
+
|
|
49
|
+
def _call(
|
|
50
|
+
self,
|
|
51
|
+
*,
|
|
52
|
+
model_id: str,
|
|
53
|
+
messages: Sequence[Message],
|
|
54
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
55
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
56
|
+
**params: Unpack[Params],
|
|
57
|
+
) -> Response | Response[FormattableT]:
|
|
58
|
+
"""Generate an `llm.Response` using the beta Anthropic API."""
|
|
59
|
+
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
60
|
+
model_id=model_id,
|
|
61
|
+
messages=messages,
|
|
62
|
+
tools=tools,
|
|
63
|
+
format=format,
|
|
64
|
+
params=params,
|
|
65
|
+
)
|
|
66
|
+
beta_response = self.client.beta.messages.parse(**kwargs)
|
|
67
|
+
assistant_message, finish_reason, usage = beta_decode.beta_decode_response(
|
|
68
|
+
beta_response, model_id
|
|
69
|
+
)
|
|
70
|
+
return Response(
|
|
71
|
+
raw=beta_response,
|
|
72
|
+
provider_id="anthropic",
|
|
73
|
+
model_id=model_id,
|
|
74
|
+
provider_model_name=model_name(model_id),
|
|
75
|
+
params=params,
|
|
76
|
+
tools=tools,
|
|
77
|
+
input_messages=input_messages,
|
|
78
|
+
assistant_message=assistant_message,
|
|
79
|
+
finish_reason=finish_reason,
|
|
80
|
+
usage=usage,
|
|
81
|
+
format=resolved_format,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
def _context_call(
|
|
85
|
+
self,
|
|
86
|
+
*,
|
|
87
|
+
ctx: Context[DepsT],
|
|
88
|
+
model_id: str,
|
|
89
|
+
messages: Sequence[Message],
|
|
90
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
91
|
+
| ContextToolkit[DepsT]
|
|
92
|
+
| None = None,
|
|
93
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
94
|
+
**params: Unpack[Params],
|
|
95
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
96
|
+
"""Generate an `llm.ContextResponse` using the beta Anthropic API."""
|
|
97
|
+
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
98
|
+
model_id=model_id,
|
|
99
|
+
messages=messages,
|
|
100
|
+
tools=tools,
|
|
101
|
+
format=format,
|
|
102
|
+
params=params,
|
|
103
|
+
)
|
|
104
|
+
beta_response = self.client.beta.messages.parse(**kwargs)
|
|
105
|
+
assistant_message, finish_reason, usage = beta_decode.beta_decode_response(
|
|
106
|
+
beta_response, model_id
|
|
107
|
+
)
|
|
108
|
+
return ContextResponse(
|
|
109
|
+
raw=beta_response,
|
|
110
|
+
provider_id="anthropic",
|
|
111
|
+
model_id=model_id,
|
|
112
|
+
provider_model_name=model_name(model_id),
|
|
113
|
+
params=params,
|
|
114
|
+
tools=tools,
|
|
115
|
+
input_messages=input_messages,
|
|
116
|
+
assistant_message=assistant_message,
|
|
117
|
+
finish_reason=finish_reason,
|
|
118
|
+
usage=usage,
|
|
119
|
+
format=resolved_format,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
async def _call_async(
|
|
123
|
+
self,
|
|
124
|
+
*,
|
|
125
|
+
model_id: str,
|
|
126
|
+
messages: Sequence[Message],
|
|
127
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
128
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
129
|
+
**params: Unpack[Params],
|
|
130
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
131
|
+
"""Generate an `llm.AsyncResponse` using the beta Anthropic API."""
|
|
132
|
+
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
133
|
+
model_id=model_id,
|
|
134
|
+
messages=messages,
|
|
135
|
+
tools=tools,
|
|
136
|
+
format=format,
|
|
137
|
+
params=params,
|
|
138
|
+
)
|
|
139
|
+
beta_response = await self.async_client.beta.messages.parse(**kwargs)
|
|
140
|
+
assistant_message, finish_reason, usage = beta_decode.beta_decode_response(
|
|
141
|
+
beta_response, model_id
|
|
142
|
+
)
|
|
143
|
+
return AsyncResponse(
|
|
144
|
+
raw=beta_response,
|
|
145
|
+
provider_id="anthropic",
|
|
146
|
+
model_id=model_id,
|
|
147
|
+
provider_model_name=model_name(model_id),
|
|
148
|
+
params=params,
|
|
149
|
+
tools=tools,
|
|
150
|
+
input_messages=input_messages,
|
|
151
|
+
assistant_message=assistant_message,
|
|
152
|
+
finish_reason=finish_reason,
|
|
153
|
+
usage=usage,
|
|
154
|
+
format=resolved_format,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
async def _context_call_async(
|
|
158
|
+
self,
|
|
159
|
+
*,
|
|
160
|
+
ctx: Context[DepsT],
|
|
161
|
+
model_id: str,
|
|
162
|
+
messages: Sequence[Message],
|
|
163
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
164
|
+
| AsyncContextToolkit[DepsT]
|
|
165
|
+
| None = None,
|
|
166
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
167
|
+
**params: Unpack[Params],
|
|
168
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
169
|
+
"""Generate an `llm.AsyncContextResponse` using the beta Anthropic API."""
|
|
170
|
+
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
171
|
+
model_id=model_id,
|
|
172
|
+
messages=messages,
|
|
173
|
+
tools=tools,
|
|
174
|
+
format=format,
|
|
175
|
+
params=params,
|
|
176
|
+
)
|
|
177
|
+
beta_response = await self.async_client.beta.messages.parse(**kwargs)
|
|
178
|
+
assistant_message, finish_reason, usage = beta_decode.beta_decode_response(
|
|
179
|
+
beta_response, model_id
|
|
180
|
+
)
|
|
181
|
+
return AsyncContextResponse(
|
|
182
|
+
raw=beta_response,
|
|
183
|
+
provider_id="anthropic",
|
|
184
|
+
model_id=model_id,
|
|
185
|
+
provider_model_name=model_name(model_id),
|
|
186
|
+
params=params,
|
|
187
|
+
tools=tools,
|
|
188
|
+
input_messages=input_messages,
|
|
189
|
+
assistant_message=assistant_message,
|
|
190
|
+
finish_reason=finish_reason,
|
|
191
|
+
usage=usage,
|
|
192
|
+
format=resolved_format,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
def _stream(
|
|
196
|
+
self,
|
|
197
|
+
*,
|
|
198
|
+
model_id: str,
|
|
199
|
+
messages: Sequence[Message],
|
|
200
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
201
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
202
|
+
**params: Unpack[Params],
|
|
203
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
204
|
+
"""Generate an `llm.StreamResponse` using the beta Anthropic API."""
|
|
205
|
+
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
206
|
+
model_id=model_id,
|
|
207
|
+
messages=messages,
|
|
208
|
+
tools=tools,
|
|
209
|
+
format=format,
|
|
210
|
+
params=params,
|
|
211
|
+
)
|
|
212
|
+
beta_stream = self.client.beta.messages.stream(**kwargs)
|
|
213
|
+
chunk_iterator = beta_decode.beta_decode_stream(beta_stream)
|
|
214
|
+
return StreamResponse(
|
|
215
|
+
provider_id="anthropic",
|
|
216
|
+
model_id=model_id,
|
|
217
|
+
provider_model_name=model_name(model_id),
|
|
218
|
+
params=params,
|
|
219
|
+
tools=tools,
|
|
220
|
+
input_messages=input_messages,
|
|
221
|
+
chunk_iterator=chunk_iterator,
|
|
222
|
+
format=resolved_format,
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
def _context_stream(
|
|
226
|
+
self,
|
|
227
|
+
*,
|
|
228
|
+
ctx: Context[DepsT],
|
|
229
|
+
model_id: str,
|
|
230
|
+
messages: Sequence[Message],
|
|
231
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
232
|
+
| ContextToolkit[DepsT]
|
|
233
|
+
| None = None,
|
|
234
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
235
|
+
**params: Unpack[Params],
|
|
236
|
+
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
237
|
+
"""Generate an `llm.ContextStreamResponse` using the beta Anthropic API."""
|
|
238
|
+
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
239
|
+
model_id=model_id,
|
|
240
|
+
messages=messages,
|
|
241
|
+
tools=tools,
|
|
242
|
+
format=format,
|
|
243
|
+
params=params,
|
|
244
|
+
)
|
|
245
|
+
beta_stream = self.client.beta.messages.stream(**kwargs)
|
|
246
|
+
chunk_iterator = beta_decode.beta_decode_stream(beta_stream)
|
|
247
|
+
return ContextStreamResponse(
|
|
248
|
+
provider_id="anthropic",
|
|
249
|
+
model_id=model_id,
|
|
250
|
+
provider_model_name=model_name(model_id),
|
|
251
|
+
params=params,
|
|
252
|
+
tools=tools,
|
|
253
|
+
input_messages=input_messages,
|
|
254
|
+
chunk_iterator=chunk_iterator,
|
|
255
|
+
format=resolved_format,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
async def _stream_async(
|
|
259
|
+
self,
|
|
260
|
+
*,
|
|
261
|
+
model_id: str,
|
|
262
|
+
messages: Sequence[Message],
|
|
263
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
264
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
265
|
+
**params: Unpack[Params],
|
|
266
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
267
|
+
"""Generate an `llm.AsyncStreamResponse` using the beta Anthropic API."""
|
|
268
|
+
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
269
|
+
model_id=model_id,
|
|
270
|
+
messages=messages,
|
|
271
|
+
tools=tools,
|
|
272
|
+
format=format,
|
|
273
|
+
params=params,
|
|
274
|
+
)
|
|
275
|
+
beta_stream = self.async_client.beta.messages.stream(**kwargs)
|
|
276
|
+
chunk_iterator = beta_decode.beta_decode_async_stream(beta_stream)
|
|
277
|
+
return AsyncStreamResponse(
|
|
278
|
+
provider_id="anthropic",
|
|
279
|
+
model_id=model_id,
|
|
280
|
+
provider_model_name=model_name(model_id),
|
|
281
|
+
params=params,
|
|
282
|
+
tools=tools,
|
|
283
|
+
input_messages=input_messages,
|
|
284
|
+
chunk_iterator=chunk_iterator,
|
|
285
|
+
format=resolved_format,
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
async def _context_stream_async(
|
|
289
|
+
self,
|
|
290
|
+
*,
|
|
291
|
+
ctx: Context[DepsT],
|
|
292
|
+
model_id: str,
|
|
293
|
+
messages: Sequence[Message],
|
|
294
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
295
|
+
| AsyncContextToolkit[DepsT]
|
|
296
|
+
| None = None,
|
|
297
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
298
|
+
**params: Unpack[Params],
|
|
299
|
+
) -> (
|
|
300
|
+
AsyncContextStreamResponse[DepsT]
|
|
301
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
302
|
+
):
|
|
303
|
+
"""Generate an `llm.AsyncContextStreamResponse` using the beta Anthropic API."""
|
|
304
|
+
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
305
|
+
model_id=model_id,
|
|
306
|
+
messages=messages,
|
|
307
|
+
tools=tools,
|
|
308
|
+
format=format,
|
|
309
|
+
params=params,
|
|
310
|
+
)
|
|
311
|
+
beta_stream = self.async_client.beta.messages.stream(**kwargs)
|
|
312
|
+
chunk_iterator = beta_decode.beta_decode_async_stream(beta_stream)
|
|
313
|
+
return AsyncContextStreamResponse(
|
|
314
|
+
provider_id="anthropic",
|
|
315
|
+
model_id=model_id,
|
|
316
|
+
provider_model_name=model_name(model_id),
|
|
317
|
+
params=params,
|
|
318
|
+
tools=tools,
|
|
319
|
+
input_messages=input_messages,
|
|
320
|
+
chunk_iterator=chunk_iterator,
|
|
321
|
+
format=resolved_format,
|
|
322
|
+
)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""Anthropic registered LLM models."""
|
|
2
|
+
|
|
3
|
+
from typing import TypeAlias, get_args
|
|
4
|
+
|
|
5
|
+
from .model_info import AnthropicKnownModels
|
|
6
|
+
|
|
7
|
+
AnthropicModelId: TypeAlias = AnthropicKnownModels | str
|
|
8
|
+
"""The Anthropic model ids registered with Mirascope."""
|
|
9
|
+
|
|
10
|
+
ANTHROPIC_KNOWN_MODELS: set[str] = set(get_args(AnthropicKnownModels))
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def model_name(model_id: AnthropicModelId) -> str:
|
|
14
|
+
"""Extract the anthropic model name from the ModelId
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
model_id: Full model ID (e.g. "anthropic/claude-sonnet-4-5" or
|
|
18
|
+
"anthropic-beta/claude-sonnet-4-5")
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
Provider-specific model ID (e.g. "claude-sonnet-4-5")
|
|
22
|
+
"""
|
|
23
|
+
return model_id.removeprefix("anthropic-beta/").removeprefix("anthropic/")
|