mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +2 -2
- mirascope/api/__init__.py +6 -0
- mirascope/api/_generated/README.md +207 -0
- mirascope/api/_generated/__init__.py +85 -0
- mirascope/api/_generated/client.py +155 -0
- mirascope/api/_generated/core/__init__.py +52 -0
- mirascope/api/_generated/core/api_error.py +23 -0
- mirascope/api/_generated/core/client_wrapper.py +58 -0
- mirascope/api/_generated/core/datetime_utils.py +30 -0
- mirascope/api/_generated/core/file.py +70 -0
- mirascope/api/_generated/core/force_multipart.py +16 -0
- mirascope/api/_generated/core/http_client.py +619 -0
- mirascope/api/_generated/core/http_response.py +55 -0
- mirascope/api/_generated/core/jsonable_encoder.py +102 -0
- mirascope/api/_generated/core/pydantic_utilities.py +310 -0
- mirascope/api/_generated/core/query_encoder.py +60 -0
- mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
- mirascope/api/_generated/core/request_options.py +35 -0
- mirascope/api/_generated/core/serialization.py +282 -0
- mirascope/api/_generated/docs/__init__.py +4 -0
- mirascope/api/_generated/docs/client.py +95 -0
- mirascope/api/_generated/docs/raw_client.py +132 -0
- mirascope/api/_generated/environment.py +9 -0
- mirascope/api/_generated/errors/__init__.py +7 -0
- mirascope/api/_generated/errors/bad_request_error.py +15 -0
- mirascope/api/_generated/health/__init__.py +7 -0
- mirascope/api/_generated/health/client.py +96 -0
- mirascope/api/_generated/health/raw_client.py +129 -0
- mirascope/api/_generated/health/types/__init__.py +8 -0
- mirascope/api/_generated/health/types/health_check_response.py +24 -0
- mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
- mirascope/api/_generated/reference.md +167 -0
- mirascope/api/_generated/traces/__init__.py +55 -0
- mirascope/api/_generated/traces/client.py +162 -0
- mirascope/api/_generated/traces/raw_client.py +168 -0
- mirascope/api/_generated/traces/types/__init__.py +95 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
- mirascope/api/_generated/types/__init__.py +21 -0
- mirascope/api/_generated/types/http_api_decode_error.py +31 -0
- mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
- mirascope/api/_generated/types/issue.py +44 -0
- mirascope/api/_generated/types/issue_tag.py +17 -0
- mirascope/api/_generated/types/property_key.py +7 -0
- mirascope/api/_generated/types/property_key_tag.py +29 -0
- mirascope/api/_generated/types/property_key_tag_tag.py +5 -0
- mirascope/api/client.py +255 -0
- mirascope/api/settings.py +81 -0
- mirascope/llm/__init__.py +41 -11
- mirascope/llm/calls/calls.py +81 -57
- mirascope/llm/calls/decorator.py +121 -115
- mirascope/llm/content/__init__.py +3 -2
- mirascope/llm/context/_utils.py +19 -6
- mirascope/llm/exceptions.py +30 -16
- mirascope/llm/formatting/_utils.py +9 -5
- mirascope/llm/formatting/format.py +2 -2
- mirascope/llm/formatting/from_call_args.py +2 -2
- mirascope/llm/messages/message.py +13 -5
- mirascope/llm/models/__init__.py +2 -2
- mirascope/llm/models/models.py +189 -81
- mirascope/llm/prompts/__init__.py +13 -12
- mirascope/llm/prompts/_utils.py +27 -24
- mirascope/llm/prompts/decorator.py +133 -204
- mirascope/llm/prompts/prompts.py +424 -0
- mirascope/llm/prompts/protocols.py +25 -59
- mirascope/llm/providers/__init__.py +38 -0
- mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
- mirascope/llm/providers/anthropic/__init__.py +24 -0
- mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +5 -4
- mirascope/llm/{clients → providers}/anthropic/_utils/encode.py +31 -10
- mirascope/llm/providers/anthropic/model_id.py +40 -0
- mirascope/llm/{clients/anthropic/clients.py → providers/anthropic/provider.py} +33 -418
- mirascope/llm/{clients → providers}/base/__init__.py +3 -3
- mirascope/llm/{clients → providers}/base/_utils.py +10 -7
- mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
- mirascope/llm/providers/google/__init__.py +21 -0
- mirascope/llm/{clients → providers}/google/_utils/decode.py +6 -4
- mirascope/llm/{clients → providers}/google/_utils/encode.py +30 -24
- mirascope/llm/providers/google/model_id.py +28 -0
- mirascope/llm/providers/google/provider.py +438 -0
- mirascope/llm/providers/load_provider.py +48 -0
- mirascope/llm/providers/mlx/__init__.py +24 -0
- mirascope/llm/providers/mlx/_utils.py +107 -0
- mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
- mirascope/llm/providers/mlx/encoding/base.py +69 -0
- mirascope/llm/providers/mlx/encoding/transformers.py +131 -0
- mirascope/llm/providers/mlx/mlx.py +237 -0
- mirascope/llm/providers/mlx/model_id.py +17 -0
- mirascope/llm/providers/mlx/provider.py +411 -0
- mirascope/llm/providers/model_id.py +16 -0
- mirascope/llm/providers/openai/__init__.py +6 -0
- mirascope/llm/providers/openai/completions/__init__.py +20 -0
- mirascope/llm/{clients/openai/responses → providers/openai/completions}/_utils/__init__.py +2 -0
- mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +5 -3
- mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +33 -23
- mirascope/llm/providers/openai/completions/provider.py +456 -0
- mirascope/llm/providers/openai/model_id.py +31 -0
- mirascope/llm/providers/openai/model_info.py +246 -0
- mirascope/llm/providers/openai/provider.py +386 -0
- mirascope/llm/providers/openai/responses/__init__.py +21 -0
- mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +5 -3
- mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +28 -17
- mirascope/llm/providers/openai/responses/provider.py +470 -0
- mirascope/llm/{clients → providers}/openai/shared/_utils.py +7 -3
- mirascope/llm/providers/provider_id.py +13 -0
- mirascope/llm/providers/provider_registry.py +167 -0
- mirascope/llm/responses/base_response.py +10 -5
- mirascope/llm/responses/base_stream_response.py +10 -5
- mirascope/llm/responses/response.py +24 -13
- mirascope/llm/responses/root_response.py +7 -12
- mirascope/llm/responses/stream_response.py +35 -23
- mirascope/llm/tools/__init__.py +9 -2
- mirascope/llm/tools/_utils.py +12 -3
- mirascope/llm/tools/protocols.py +4 -4
- mirascope/llm/tools/tool_schema.py +44 -9
- mirascope/llm/tools/tools.py +10 -9
- mirascope/ops/__init__.py +156 -0
- mirascope/ops/_internal/__init__.py +5 -0
- mirascope/ops/_internal/closure.py +1118 -0
- mirascope/ops/_internal/configuration.py +126 -0
- mirascope/ops/_internal/context.py +76 -0
- mirascope/ops/_internal/exporters/__init__.py +26 -0
- mirascope/ops/_internal/exporters/exporters.py +342 -0
- mirascope/ops/_internal/exporters/processors.py +104 -0
- mirascope/ops/_internal/exporters/types.py +165 -0
- mirascope/ops/_internal/exporters/utils.py +29 -0
- mirascope/ops/_internal/instrumentation/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
- mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
- mirascope/ops/_internal/propagation.py +198 -0
- mirascope/ops/_internal/protocols.py +51 -0
- mirascope/ops/_internal/session.py +139 -0
- mirascope/ops/_internal/spans.py +232 -0
- mirascope/ops/_internal/traced_calls.py +371 -0
- mirascope/ops/_internal/traced_functions.py +394 -0
- mirascope/ops/_internal/tracing.py +276 -0
- mirascope/ops/_internal/types.py +13 -0
- mirascope/ops/_internal/utils.py +75 -0
- mirascope/ops/_internal/versioned_calls.py +512 -0
- mirascope/ops/_internal/versioned_functions.py +346 -0
- mirascope/ops/_internal/versioning.py +303 -0
- mirascope/ops/exceptions.py +21 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/METADATA +76 -1
- mirascope-2.0.0a3.dist-info/RECORD +206 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/WHEEL +1 -1
- mirascope/graphs/__init__.py +0 -22
- mirascope/graphs/finite_state_machine.py +0 -625
- mirascope/llm/agents/__init__.py +0 -15
- mirascope/llm/agents/agent.py +0 -97
- mirascope/llm/agents/agent_template.py +0 -45
- mirascope/llm/agents/decorator.py +0 -176
- mirascope/llm/calls/base_call.py +0 -33
- mirascope/llm/clients/__init__.py +0 -34
- mirascope/llm/clients/anthropic/__init__.py +0 -25
- mirascope/llm/clients/anthropic/model_ids.py +0 -8
- mirascope/llm/clients/google/__init__.py +0 -20
- mirascope/llm/clients/google/clients.py +0 -853
- mirascope/llm/clients/google/model_ids.py +0 -15
- mirascope/llm/clients/openai/__init__.py +0 -25
- mirascope/llm/clients/openai/completions/__init__.py +0 -28
- mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
- mirascope/llm/clients/openai/completions/clients.py +0 -833
- mirascope/llm/clients/openai/completions/model_ids.py +0 -8
- mirascope/llm/clients/openai/responses/__init__.py +0 -26
- mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
- mirascope/llm/clients/openai/responses/clients.py +0 -832
- mirascope/llm/clients/openai/responses/model_ids.py +0 -8
- mirascope/llm/clients/providers.py +0 -175
- mirascope-2.0.0a2.dist-info/RECORD +0 -102
- /mirascope/llm/{clients → providers}/anthropic/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
- /mirascope/llm/{clients → providers}/base/params.py +0 -0
- /mirascope/llm/{clients → providers}/google/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/google/message.py +0 -0
- /mirascope/llm/{clients/openai/completions → providers/openai/responses}/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/openai/shared/__init__.py +0 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,10 +1,6 @@
|
|
|
1
1
|
"""Anthropic client implementation."""
|
|
2
2
|
|
|
3
|
-
import os
|
|
4
3
|
from collections.abc import Sequence
|
|
5
|
-
from contextvars import ContextVar
|
|
6
|
-
from functools import lru_cache
|
|
7
|
-
from typing import overload
|
|
8
4
|
from typing_extensions import Unpack
|
|
9
5
|
|
|
10
6
|
from anthropic import Anthropic, AsyncAnthropic
|
|
@@ -32,60 +28,16 @@ from ...tools import (
|
|
|
32
28
|
Tool,
|
|
33
29
|
Toolkit,
|
|
34
30
|
)
|
|
35
|
-
from ..base import
|
|
31
|
+
from ..base import BaseProvider, Params
|
|
36
32
|
from . import _utils
|
|
37
|
-
from .
|
|
33
|
+
from .model_id import AnthropicModelId, model_name
|
|
38
34
|
|
|
39
|
-
ANTHROPIC_CLIENT_CONTEXT: ContextVar["AnthropicClient | None"] = ContextVar(
|
|
40
|
-
"ANTHROPIC_CLIENT_CONTEXT", default=None
|
|
41
|
-
)
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
@lru_cache(maxsize=256)
|
|
45
|
-
def _anthropic_singleton(
|
|
46
|
-
api_key: str | None, base_url: str | None
|
|
47
|
-
) -> "AnthropicClient":
|
|
48
|
-
"""Return a cached Anthropic client instance for the given parameters."""
|
|
49
|
-
return AnthropicClient(api_key=api_key, base_url=base_url)
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def client(
|
|
53
|
-
*, api_key: str | None = None, base_url: str | None = None
|
|
54
|
-
) -> "AnthropicClient":
|
|
55
|
-
"""Create or retrieve an Anthropic client with the given parameters.
|
|
56
|
-
|
|
57
|
-
If a client has already been created with these parameters, it will be
|
|
58
|
-
retrieved from cache and returned.
|
|
59
|
-
|
|
60
|
-
Args:
|
|
61
|
-
api_key: API key for authentication. If None, uses ANTHROPIC_API_KEY env var.
|
|
62
|
-
base_url: Base URL for the API. If None, uses ANTHROPIC_BASE_URL env var.
|
|
63
35
|
|
|
64
|
-
|
|
65
|
-
An Anthropic client instance.
|
|
66
|
-
"""
|
|
67
|
-
api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
|
|
68
|
-
base_url = base_url or os.getenv("ANTHROPIC_BASE_URL")
|
|
69
|
-
return _anthropic_singleton(api_key, base_url)
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
def get_client() -> "AnthropicClient":
|
|
73
|
-
"""Retrieve the current Anthropic client from context, or a global default.
|
|
74
|
-
|
|
75
|
-
Returns:
|
|
76
|
-
The current Anthropic client from context if available, otherwise
|
|
77
|
-
a global default client based on environment variables.
|
|
78
|
-
"""
|
|
79
|
-
ctx_client = ANTHROPIC_CLIENT_CONTEXT.get()
|
|
80
|
-
return ctx_client or client()
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
36
|
+
class AnthropicProvider(BaseProvider[Anthropic]):
|
|
84
37
|
"""The client for the Anthropic LLM model."""
|
|
85
38
|
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
return ANTHROPIC_CLIENT_CONTEXT
|
|
39
|
+
id = "anthropic"
|
|
40
|
+
default_scope = "anthropic/"
|
|
89
41
|
|
|
90
42
|
def __init__(
|
|
91
43
|
self, *, api_key: str | None = None, base_url: str | None = None
|
|
@@ -94,46 +46,7 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
94
46
|
self.client = Anthropic(api_key=api_key, base_url=base_url)
|
|
95
47
|
self.async_client = AsyncAnthropic(api_key=api_key, base_url=base_url)
|
|
96
48
|
|
|
97
|
-
|
|
98
|
-
def call(
|
|
99
|
-
self,
|
|
100
|
-
*,
|
|
101
|
-
model_id: AnthropicModelId,
|
|
102
|
-
messages: Sequence[Message],
|
|
103
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
104
|
-
format: None = None,
|
|
105
|
-
**params: Unpack[Params],
|
|
106
|
-
) -> Response:
|
|
107
|
-
"""Generate an `llm.Response` without a response format."""
|
|
108
|
-
...
|
|
109
|
-
|
|
110
|
-
@overload
|
|
111
|
-
def call(
|
|
112
|
-
self,
|
|
113
|
-
*,
|
|
114
|
-
model_id: AnthropicModelId,
|
|
115
|
-
messages: Sequence[Message],
|
|
116
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
117
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
118
|
-
**params: Unpack[Params],
|
|
119
|
-
) -> Response[FormattableT]:
|
|
120
|
-
"""Generate an `llm.Response` with a response format."""
|
|
121
|
-
...
|
|
122
|
-
|
|
123
|
-
@overload
|
|
124
|
-
def call(
|
|
125
|
-
self,
|
|
126
|
-
*,
|
|
127
|
-
model_id: AnthropicModelId,
|
|
128
|
-
messages: Sequence[Message],
|
|
129
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
130
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
131
|
-
**params: Unpack[Params],
|
|
132
|
-
) -> Response | Response[FormattableT]:
|
|
133
|
-
"""Generate an `llm.Response` with an optional response format."""
|
|
134
|
-
...
|
|
135
|
-
|
|
136
|
-
def call(
|
|
49
|
+
def _call(
|
|
137
50
|
self,
|
|
138
51
|
*,
|
|
139
52
|
model_id: AnthropicModelId,
|
|
@@ -170,8 +83,9 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
170
83
|
|
|
171
84
|
return Response(
|
|
172
85
|
raw=anthropic_response,
|
|
173
|
-
|
|
86
|
+
provider_id="anthropic",
|
|
174
87
|
model_id=model_id,
|
|
88
|
+
provider_model_name=model_name(model_id),
|
|
175
89
|
params=params,
|
|
176
90
|
tools=tools,
|
|
177
91
|
input_messages=input_messages,
|
|
@@ -180,55 +94,7 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
180
94
|
format=format,
|
|
181
95
|
)
|
|
182
96
|
|
|
183
|
-
|
|
184
|
-
def context_call(
|
|
185
|
-
self,
|
|
186
|
-
*,
|
|
187
|
-
ctx: Context[DepsT],
|
|
188
|
-
model_id: AnthropicModelId,
|
|
189
|
-
messages: Sequence[Message],
|
|
190
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
191
|
-
| ContextToolkit[DepsT]
|
|
192
|
-
| None = None,
|
|
193
|
-
format: None = None,
|
|
194
|
-
**params: Unpack[Params],
|
|
195
|
-
) -> ContextResponse[DepsT, None]:
|
|
196
|
-
"""Generate an `llm.ContextResponse` without a response format."""
|
|
197
|
-
...
|
|
198
|
-
|
|
199
|
-
@overload
|
|
200
|
-
def context_call(
|
|
201
|
-
self,
|
|
202
|
-
*,
|
|
203
|
-
ctx: Context[DepsT],
|
|
204
|
-
model_id: AnthropicModelId,
|
|
205
|
-
messages: Sequence[Message],
|
|
206
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
207
|
-
| ContextToolkit[DepsT]
|
|
208
|
-
| None = None,
|
|
209
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
210
|
-
**params: Unpack[Params],
|
|
211
|
-
) -> ContextResponse[DepsT, FormattableT]:
|
|
212
|
-
"""Generate an `llm.ContextResponse` with a response format."""
|
|
213
|
-
...
|
|
214
|
-
|
|
215
|
-
@overload
|
|
216
|
-
def context_call(
|
|
217
|
-
self,
|
|
218
|
-
*,
|
|
219
|
-
ctx: Context[DepsT],
|
|
220
|
-
model_id: AnthropicModelId,
|
|
221
|
-
messages: Sequence[Message],
|
|
222
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
223
|
-
| ContextToolkit[DepsT]
|
|
224
|
-
| None = None,
|
|
225
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
226
|
-
**params: Unpack[Params],
|
|
227
|
-
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
228
|
-
"""Generate an `llm.ContextResponse` with an optional response format."""
|
|
229
|
-
...
|
|
230
|
-
|
|
231
|
-
def context_call(
|
|
97
|
+
def _context_call(
|
|
232
98
|
self,
|
|
233
99
|
*,
|
|
234
100
|
ctx: Context[DepsT],
|
|
@@ -269,8 +135,9 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
269
135
|
|
|
270
136
|
return ContextResponse(
|
|
271
137
|
raw=anthropic_response,
|
|
272
|
-
|
|
138
|
+
provider_id="anthropic",
|
|
273
139
|
model_id=model_id,
|
|
140
|
+
provider_model_name=model_name(model_id),
|
|
274
141
|
params=params,
|
|
275
142
|
tools=tools,
|
|
276
143
|
input_messages=input_messages,
|
|
@@ -279,46 +146,7 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
279
146
|
format=format,
|
|
280
147
|
)
|
|
281
148
|
|
|
282
|
-
|
|
283
|
-
async def call_async(
|
|
284
|
-
self,
|
|
285
|
-
*,
|
|
286
|
-
model_id: AnthropicModelId,
|
|
287
|
-
messages: Sequence[Message],
|
|
288
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
289
|
-
format: None = None,
|
|
290
|
-
**params: Unpack[Params],
|
|
291
|
-
) -> AsyncResponse:
|
|
292
|
-
"""Generate an `llm.AsyncResponse` without a response format."""
|
|
293
|
-
...
|
|
294
|
-
|
|
295
|
-
@overload
|
|
296
|
-
async def call_async(
|
|
297
|
-
self,
|
|
298
|
-
*,
|
|
299
|
-
model_id: AnthropicModelId,
|
|
300
|
-
messages: Sequence[Message],
|
|
301
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
302
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
303
|
-
**params: Unpack[Params],
|
|
304
|
-
) -> AsyncResponse[FormattableT]:
|
|
305
|
-
"""Generate an `llm.AsyncResponse` with a response format."""
|
|
306
|
-
...
|
|
307
|
-
|
|
308
|
-
@overload
|
|
309
|
-
async def call_async(
|
|
310
|
-
self,
|
|
311
|
-
*,
|
|
312
|
-
model_id: AnthropicModelId,
|
|
313
|
-
messages: Sequence[Message],
|
|
314
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
315
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
316
|
-
**params: Unpack[Params],
|
|
317
|
-
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
318
|
-
"""Generate an `llm.AsyncResponse` with an optional response format."""
|
|
319
|
-
...
|
|
320
|
-
|
|
321
|
-
async def call_async(
|
|
149
|
+
async def _call_async(
|
|
322
150
|
self,
|
|
323
151
|
*,
|
|
324
152
|
model_id: AnthropicModelId,
|
|
@@ -355,8 +183,9 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
355
183
|
|
|
356
184
|
return AsyncResponse(
|
|
357
185
|
raw=anthropic_response,
|
|
358
|
-
|
|
186
|
+
provider_id="anthropic",
|
|
359
187
|
model_id=model_id,
|
|
188
|
+
provider_model_name=model_name(model_id),
|
|
360
189
|
params=params,
|
|
361
190
|
tools=tools,
|
|
362
191
|
input_messages=input_messages,
|
|
@@ -365,55 +194,7 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
365
194
|
format=format,
|
|
366
195
|
)
|
|
367
196
|
|
|
368
|
-
|
|
369
|
-
async def context_call_async(
|
|
370
|
-
self,
|
|
371
|
-
*,
|
|
372
|
-
ctx: Context[DepsT],
|
|
373
|
-
model_id: AnthropicModelId,
|
|
374
|
-
messages: Sequence[Message],
|
|
375
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
376
|
-
| AsyncContextToolkit[DepsT]
|
|
377
|
-
| None = None,
|
|
378
|
-
format: None = None,
|
|
379
|
-
**params: Unpack[Params],
|
|
380
|
-
) -> AsyncContextResponse[DepsT, None]:
|
|
381
|
-
"""Generate an `llm.AsyncContextResponse` without a response format."""
|
|
382
|
-
...
|
|
383
|
-
|
|
384
|
-
@overload
|
|
385
|
-
async def context_call_async(
|
|
386
|
-
self,
|
|
387
|
-
*,
|
|
388
|
-
ctx: Context[DepsT],
|
|
389
|
-
model_id: AnthropicModelId,
|
|
390
|
-
messages: Sequence[Message],
|
|
391
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
392
|
-
| AsyncContextToolkit[DepsT]
|
|
393
|
-
| None = None,
|
|
394
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
395
|
-
**params: Unpack[Params],
|
|
396
|
-
) -> AsyncContextResponse[DepsT, FormattableT]:
|
|
397
|
-
"""Generate an `llm.AsyncContextResponse` with a response format."""
|
|
398
|
-
...
|
|
399
|
-
|
|
400
|
-
@overload
|
|
401
|
-
async def context_call_async(
|
|
402
|
-
self,
|
|
403
|
-
*,
|
|
404
|
-
ctx: Context[DepsT],
|
|
405
|
-
model_id: AnthropicModelId,
|
|
406
|
-
messages: Sequence[Message],
|
|
407
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
408
|
-
| AsyncContextToolkit[DepsT]
|
|
409
|
-
| None = None,
|
|
410
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
411
|
-
**params: Unpack[Params],
|
|
412
|
-
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
413
|
-
"""Generate an `llm.AsyncContextResponse` with an optional response format."""
|
|
414
|
-
...
|
|
415
|
-
|
|
416
|
-
async def context_call_async(
|
|
197
|
+
async def _context_call_async(
|
|
417
198
|
self,
|
|
418
199
|
*,
|
|
419
200
|
ctx: Context[DepsT],
|
|
@@ -454,8 +235,9 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
454
235
|
|
|
455
236
|
return AsyncContextResponse(
|
|
456
237
|
raw=anthropic_response,
|
|
457
|
-
|
|
238
|
+
provider_id="anthropic",
|
|
458
239
|
model_id=model_id,
|
|
240
|
+
provider_model_name=model_name(model_id),
|
|
459
241
|
params=params,
|
|
460
242
|
tools=tools,
|
|
461
243
|
input_messages=input_messages,
|
|
@@ -464,46 +246,7 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
464
246
|
format=format,
|
|
465
247
|
)
|
|
466
248
|
|
|
467
|
-
|
|
468
|
-
def stream(
|
|
469
|
-
self,
|
|
470
|
-
*,
|
|
471
|
-
model_id: AnthropicModelId,
|
|
472
|
-
messages: Sequence[Message],
|
|
473
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
474
|
-
format: None = None,
|
|
475
|
-
**params: Unpack[Params],
|
|
476
|
-
) -> StreamResponse:
|
|
477
|
-
"""Stream an `llm.StreamResponse` without a response format."""
|
|
478
|
-
...
|
|
479
|
-
|
|
480
|
-
@overload
|
|
481
|
-
def stream(
|
|
482
|
-
self,
|
|
483
|
-
*,
|
|
484
|
-
model_id: AnthropicModelId,
|
|
485
|
-
messages: Sequence[Message],
|
|
486
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
487
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
488
|
-
**params: Unpack[Params],
|
|
489
|
-
) -> StreamResponse[FormattableT]:
|
|
490
|
-
"""Stream an `llm.StreamResponse` with a response format."""
|
|
491
|
-
...
|
|
492
|
-
|
|
493
|
-
@overload
|
|
494
|
-
def stream(
|
|
495
|
-
self,
|
|
496
|
-
*,
|
|
497
|
-
model_id: AnthropicModelId,
|
|
498
|
-
messages: Sequence[Message],
|
|
499
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
500
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
501
|
-
**params: Unpack[Params],
|
|
502
|
-
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
503
|
-
"""Stream an `llm.StreamResponse` with an optional response format."""
|
|
504
|
-
...
|
|
505
|
-
|
|
506
|
-
def stream(
|
|
249
|
+
def _stream(
|
|
507
250
|
self,
|
|
508
251
|
*,
|
|
509
252
|
model_id: AnthropicModelId,
|
|
@@ -537,8 +280,9 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
537
280
|
chunk_iterator = _utils.decode_stream(anthropic_stream)
|
|
538
281
|
|
|
539
282
|
return StreamResponse(
|
|
540
|
-
|
|
283
|
+
provider_id="anthropic",
|
|
541
284
|
model_id=model_id,
|
|
285
|
+
provider_model_name=model_name(model_id),
|
|
542
286
|
params=params,
|
|
543
287
|
tools=tools,
|
|
544
288
|
input_messages=input_messages,
|
|
@@ -546,55 +290,7 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
546
290
|
format=format,
|
|
547
291
|
)
|
|
548
292
|
|
|
549
|
-
|
|
550
|
-
def context_stream(
|
|
551
|
-
self,
|
|
552
|
-
*,
|
|
553
|
-
ctx: Context[DepsT],
|
|
554
|
-
model_id: AnthropicModelId,
|
|
555
|
-
messages: Sequence[Message],
|
|
556
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
557
|
-
| ContextToolkit[DepsT]
|
|
558
|
-
| None = None,
|
|
559
|
-
format: None = None,
|
|
560
|
-
**params: Unpack[Params],
|
|
561
|
-
) -> ContextStreamResponse[DepsT]:
|
|
562
|
-
"""Stream an `llm.ContextStreamResponse` without a response format."""
|
|
563
|
-
...
|
|
564
|
-
|
|
565
|
-
@overload
|
|
566
|
-
def context_stream(
|
|
567
|
-
self,
|
|
568
|
-
*,
|
|
569
|
-
ctx: Context[DepsT],
|
|
570
|
-
model_id: AnthropicModelId,
|
|
571
|
-
messages: Sequence[Message],
|
|
572
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
573
|
-
| ContextToolkit[DepsT]
|
|
574
|
-
| None = None,
|
|
575
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
576
|
-
**params: Unpack[Params],
|
|
577
|
-
) -> ContextStreamResponse[DepsT, FormattableT]:
|
|
578
|
-
"""Stream an `llm.ContextStreamResponse` with a response format."""
|
|
579
|
-
...
|
|
580
|
-
|
|
581
|
-
@overload
|
|
582
|
-
def context_stream(
|
|
583
|
-
self,
|
|
584
|
-
*,
|
|
585
|
-
ctx: Context[DepsT],
|
|
586
|
-
model_id: AnthropicModelId,
|
|
587
|
-
messages: Sequence[Message],
|
|
588
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
589
|
-
| ContextToolkit[DepsT]
|
|
590
|
-
| None = None,
|
|
591
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
592
|
-
**params: Unpack[Params],
|
|
593
|
-
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
594
|
-
"""Stream an `llm.ContextStreamResponse` with an optional response format."""
|
|
595
|
-
...
|
|
596
|
-
|
|
597
|
-
def context_stream(
|
|
293
|
+
def _context_stream(
|
|
598
294
|
self,
|
|
599
295
|
*,
|
|
600
296
|
ctx: Context[DepsT],
|
|
@@ -632,8 +328,9 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
632
328
|
chunk_iterator = _utils.decode_stream(anthropic_stream)
|
|
633
329
|
|
|
634
330
|
return ContextStreamResponse(
|
|
635
|
-
|
|
331
|
+
provider_id="anthropic",
|
|
636
332
|
model_id=model_id,
|
|
333
|
+
provider_model_name=model_name(model_id),
|
|
637
334
|
params=params,
|
|
638
335
|
tools=tools,
|
|
639
336
|
input_messages=input_messages,
|
|
@@ -641,46 +338,7 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
641
338
|
format=format,
|
|
642
339
|
)
|
|
643
340
|
|
|
644
|
-
|
|
645
|
-
async def stream_async(
|
|
646
|
-
self,
|
|
647
|
-
*,
|
|
648
|
-
model_id: AnthropicModelId,
|
|
649
|
-
messages: Sequence[Message],
|
|
650
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
651
|
-
format: None = None,
|
|
652
|
-
**params: Unpack[Params],
|
|
653
|
-
) -> AsyncStreamResponse:
|
|
654
|
-
"""Stream an `llm.AsyncStreamResponse` without a response format."""
|
|
655
|
-
...
|
|
656
|
-
|
|
657
|
-
@overload
|
|
658
|
-
async def stream_async(
|
|
659
|
-
self,
|
|
660
|
-
*,
|
|
661
|
-
model_id: AnthropicModelId,
|
|
662
|
-
messages: Sequence[Message],
|
|
663
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
664
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
665
|
-
**params: Unpack[Params],
|
|
666
|
-
) -> AsyncStreamResponse[FormattableT]:
|
|
667
|
-
"""Stream an `llm.AsyncStreamResponse` with a response format."""
|
|
668
|
-
...
|
|
669
|
-
|
|
670
|
-
@overload
|
|
671
|
-
async def stream_async(
|
|
672
|
-
self,
|
|
673
|
-
*,
|
|
674
|
-
model_id: AnthropicModelId,
|
|
675
|
-
messages: Sequence[Message],
|
|
676
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
677
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
678
|
-
**params: Unpack[Params],
|
|
679
|
-
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
680
|
-
"""Stream an `llm.AsyncStreamResponse` with an optional response format."""
|
|
681
|
-
...
|
|
682
|
-
|
|
683
|
-
async def stream_async(
|
|
341
|
+
async def _stream_async(
|
|
684
342
|
self,
|
|
685
343
|
*,
|
|
686
344
|
model_id: AnthropicModelId,
|
|
@@ -714,8 +372,9 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
714
372
|
chunk_iterator = _utils.decode_async_stream(anthropic_stream)
|
|
715
373
|
|
|
716
374
|
return AsyncStreamResponse(
|
|
717
|
-
|
|
375
|
+
provider_id="anthropic",
|
|
718
376
|
model_id=model_id,
|
|
377
|
+
provider_model_name=model_name(model_id),
|
|
719
378
|
params=params,
|
|
720
379
|
tools=tools,
|
|
721
380
|
input_messages=input_messages,
|
|
@@ -723,55 +382,7 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
723
382
|
format=format,
|
|
724
383
|
)
|
|
725
384
|
|
|
726
|
-
|
|
727
|
-
async def context_stream_async(
|
|
728
|
-
self,
|
|
729
|
-
*,
|
|
730
|
-
ctx: Context[DepsT],
|
|
731
|
-
model_id: AnthropicModelId,
|
|
732
|
-
messages: Sequence[Message],
|
|
733
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
734
|
-
| AsyncContextToolkit[DepsT]
|
|
735
|
-
| None = None,
|
|
736
|
-
format: None = None,
|
|
737
|
-
**params: Unpack[Params],
|
|
738
|
-
) -> AsyncContextStreamResponse[DepsT]:
|
|
739
|
-
"""Stream an `llm.AsyncContextStreamResponse` without a response format."""
|
|
740
|
-
...
|
|
741
|
-
|
|
742
|
-
@overload
|
|
743
|
-
async def context_stream_async(
|
|
744
|
-
self,
|
|
745
|
-
*,
|
|
746
|
-
ctx: Context[DepsT],
|
|
747
|
-
model_id: AnthropicModelId,
|
|
748
|
-
messages: Sequence[Message],
|
|
749
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
750
|
-
| AsyncContextToolkit[DepsT]
|
|
751
|
-
| None = None,
|
|
752
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
753
|
-
**params: Unpack[Params],
|
|
754
|
-
) -> AsyncContextStreamResponse[DepsT, FormattableT]:
|
|
755
|
-
"""Stream an `llm.AsyncContextStreamResponse` with a response format."""
|
|
756
|
-
...
|
|
757
|
-
|
|
758
|
-
@overload
|
|
759
|
-
async def context_stream_async(
|
|
760
|
-
self,
|
|
761
|
-
*,
|
|
762
|
-
ctx: Context[DepsT],
|
|
763
|
-
model_id: AnthropicModelId,
|
|
764
|
-
messages: Sequence[Message],
|
|
765
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
766
|
-
| AsyncContextToolkit[DepsT]
|
|
767
|
-
| None = None,
|
|
768
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
769
|
-
**params: Unpack[Params],
|
|
770
|
-
) -> AsyncContextStreamResponse | AsyncContextStreamResponse[DepsT, FormattableT]:
|
|
771
|
-
"""Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
|
|
772
|
-
...
|
|
773
|
-
|
|
774
|
-
async def context_stream_async(
|
|
385
|
+
async def _context_stream_async(
|
|
775
386
|
self,
|
|
776
387
|
*,
|
|
777
388
|
ctx: Context[DepsT],
|
|
@@ -782,7 +393,10 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
782
393
|
| None = None,
|
|
783
394
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
784
395
|
**params: Unpack[Params],
|
|
785
|
-
) ->
|
|
396
|
+
) -> (
|
|
397
|
+
AsyncContextStreamResponse[DepsT]
|
|
398
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
399
|
+
):
|
|
786
400
|
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the Anthropic Messages API.
|
|
787
401
|
|
|
788
402
|
Args:
|
|
@@ -809,8 +423,9 @@ class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
|
|
|
809
423
|
chunk_iterator = _utils.decode_async_stream(anthropic_stream)
|
|
810
424
|
|
|
811
425
|
return AsyncContextStreamResponse(
|
|
812
|
-
|
|
426
|
+
provider_id="anthropic",
|
|
813
427
|
model_id=model_id,
|
|
428
|
+
provider_model_name=model_name(model_id),
|
|
814
429
|
params=params,
|
|
815
430
|
tools=tools,
|
|
816
431
|
input_messages=input_messages,
|
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
"""Base client interfaces and types."""
|
|
2
2
|
|
|
3
3
|
from . import _utils
|
|
4
|
-
from .
|
|
4
|
+
from .base_provider import BaseProvider, Provider
|
|
5
5
|
from .kwargs import BaseKwargs, KwargsT
|
|
6
6
|
from .params import Params
|
|
7
7
|
|
|
8
8
|
__all__ = [
|
|
9
|
-
"BaseClient",
|
|
10
9
|
"BaseKwargs",
|
|
11
|
-
"
|
|
10
|
+
"BaseProvider",
|
|
12
11
|
"KwargsT",
|
|
13
12
|
"Params",
|
|
13
|
+
"Provider",
|
|
14
14
|
"_utils",
|
|
15
15
|
]
|
|
@@ -5,10 +5,11 @@ from typing import TYPE_CHECKING, TypeAlias, get_type_hints
|
|
|
5
5
|
|
|
6
6
|
from ...content import Text
|
|
7
7
|
from ...messages import AssistantMessage, Message, SystemMessage, UserMessage
|
|
8
|
+
from ..provider_id import ProviderId
|
|
8
9
|
from .params import Params
|
|
9
10
|
|
|
10
11
|
if TYPE_CHECKING:
|
|
11
|
-
from ..
|
|
12
|
+
from ..model_id import ModelId
|
|
12
13
|
|
|
13
14
|
logger = logging.getLogger(__name__)
|
|
14
15
|
|
|
@@ -61,8 +62,8 @@ def extract_system_message(
|
|
|
61
62
|
This is intended for use in clients where the system message is not included in the
|
|
62
63
|
input messages, but passed as an additional argument or metadata.
|
|
63
64
|
"""
|
|
64
|
-
system_message_content = None
|
|
65
|
-
remaining_messages = []
|
|
65
|
+
system_message_content: SystemMessageContent = None
|
|
66
|
+
remaining_messages: list[UserMessage | AssistantMessage] = []
|
|
66
67
|
|
|
67
68
|
for i, message in enumerate(messages):
|
|
68
69
|
if message.role == "system":
|
|
@@ -138,10 +139,10 @@ class SafeParamsAccessor:
|
|
|
138
139
|
self,
|
|
139
140
|
param_name: str,
|
|
140
141
|
param_value: object,
|
|
141
|
-
|
|
142
|
+
provider_id: "ProviderId",
|
|
142
143
|
model_id: "ModelId | None" = None,
|
|
143
144
|
) -> None:
|
|
144
|
-
unsupported_by = f"provider: {
|
|
145
|
+
unsupported_by = f"provider: {provider_id}"
|
|
145
146
|
if model_id:
|
|
146
147
|
unsupported_by += f" with model_id: {model_id}"
|
|
147
148
|
logger.warning(
|
|
@@ -159,7 +160,7 @@ class SafeParamsAccessor:
|
|
|
159
160
|
def ensure_all_params_accessed(
|
|
160
161
|
*,
|
|
161
162
|
params: Params,
|
|
162
|
-
|
|
163
|
+
provider_id: "ProviderId",
|
|
163
164
|
unsupported_params: list[str] | None = None,
|
|
164
165
|
) -> Generator[SafeParamsAccessor, None, None]:
|
|
165
166
|
"""Context manager that ensures all parameters are accessed.
|
|
@@ -185,7 +186,9 @@ def ensure_all_params_accessed(
|
|
|
185
186
|
unsupported_params = unsupported_params or []
|
|
186
187
|
for unsupported in unsupported_params:
|
|
187
188
|
if (val := params.get(unsupported)) is not None:
|
|
188
|
-
accessor.emit_warning_for_unused_param(
|
|
189
|
+
accessor.emit_warning_for_unused_param(
|
|
190
|
+
unsupported, val, provider_id=provider_id
|
|
191
|
+
)
|
|
189
192
|
try:
|
|
190
193
|
yield accessor
|
|
191
194
|
finally:
|