mirascope 2.0.0a1__py3-none-any.whl → 2.0.0a3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +2 -2
- mirascope/api/__init__.py +6 -0
- mirascope/api/_generated/README.md +207 -0
- mirascope/api/_generated/__init__.py +85 -0
- mirascope/api/_generated/client.py +155 -0
- mirascope/api/_generated/core/__init__.py +52 -0
- mirascope/api/_generated/core/api_error.py +23 -0
- mirascope/api/_generated/core/client_wrapper.py +58 -0
- mirascope/api/_generated/core/datetime_utils.py +30 -0
- mirascope/api/_generated/core/file.py +70 -0
- mirascope/api/_generated/core/force_multipart.py +16 -0
- mirascope/api/_generated/core/http_client.py +619 -0
- mirascope/api/_generated/core/http_response.py +55 -0
- mirascope/api/_generated/core/jsonable_encoder.py +102 -0
- mirascope/api/_generated/core/pydantic_utilities.py +310 -0
- mirascope/api/_generated/core/query_encoder.py +60 -0
- mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
- mirascope/api/_generated/core/request_options.py +35 -0
- mirascope/api/_generated/core/serialization.py +282 -0
- mirascope/api/_generated/docs/__init__.py +4 -0
- mirascope/api/_generated/docs/client.py +95 -0
- mirascope/api/_generated/docs/raw_client.py +132 -0
- mirascope/api/_generated/environment.py +9 -0
- mirascope/api/_generated/errors/__init__.py +7 -0
- mirascope/api/_generated/errors/bad_request_error.py +15 -0
- mirascope/api/_generated/health/__init__.py +7 -0
- mirascope/api/_generated/health/client.py +96 -0
- mirascope/api/_generated/health/raw_client.py +129 -0
- mirascope/api/_generated/health/types/__init__.py +8 -0
- mirascope/api/_generated/health/types/health_check_response.py +24 -0
- mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
- mirascope/api/_generated/reference.md +167 -0
- mirascope/api/_generated/traces/__init__.py +55 -0
- mirascope/api/_generated/traces/client.py +162 -0
- mirascope/api/_generated/traces/raw_client.py +168 -0
- mirascope/api/_generated/traces/types/__init__.py +95 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
- mirascope/api/_generated/types/__init__.py +21 -0
- mirascope/api/_generated/types/http_api_decode_error.py +31 -0
- mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
- mirascope/api/_generated/types/issue.py +44 -0
- mirascope/api/_generated/types/issue_tag.py +17 -0
- mirascope/api/_generated/types/property_key.py +7 -0
- mirascope/api/_generated/types/property_key_tag.py +29 -0
- mirascope/api/_generated/types/property_key_tag_tag.py +5 -0
- mirascope/api/client.py +255 -0
- mirascope/api/settings.py +81 -0
- mirascope/llm/__init__.py +41 -11
- mirascope/llm/calls/calls.py +81 -57
- mirascope/llm/calls/decorator.py +121 -115
- mirascope/llm/content/__init__.py +3 -2
- mirascope/llm/context/_utils.py +19 -6
- mirascope/llm/exceptions.py +30 -16
- mirascope/llm/formatting/_utils.py +9 -5
- mirascope/llm/formatting/format.py +2 -2
- mirascope/llm/formatting/from_call_args.py +2 -2
- mirascope/llm/messages/message.py +13 -5
- mirascope/llm/models/__init__.py +2 -2
- mirascope/llm/models/models.py +189 -81
- mirascope/llm/prompts/__init__.py +13 -12
- mirascope/llm/prompts/_utils.py +27 -24
- mirascope/llm/prompts/decorator.py +133 -204
- mirascope/llm/prompts/prompts.py +424 -0
- mirascope/llm/prompts/protocols.py +25 -59
- mirascope/llm/providers/__init__.py +38 -0
- mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
- mirascope/llm/providers/anthropic/__init__.py +24 -0
- mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +5 -4
- mirascope/llm/{clients → providers}/anthropic/_utils/encode.py +31 -10
- mirascope/llm/providers/anthropic/model_id.py +40 -0
- mirascope/llm/{clients/anthropic/clients.py → providers/anthropic/provider.py} +33 -418
- mirascope/llm/{clients → providers}/base/__init__.py +3 -3
- mirascope/llm/{clients → providers}/base/_utils.py +10 -7
- mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
- mirascope/llm/providers/google/__init__.py +21 -0
- mirascope/llm/{clients → providers}/google/_utils/decode.py +6 -4
- mirascope/llm/{clients → providers}/google/_utils/encode.py +30 -24
- mirascope/llm/providers/google/model_id.py +28 -0
- mirascope/llm/providers/google/provider.py +438 -0
- mirascope/llm/providers/load_provider.py +48 -0
- mirascope/llm/providers/mlx/__init__.py +24 -0
- mirascope/llm/providers/mlx/_utils.py +107 -0
- mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
- mirascope/llm/providers/mlx/encoding/base.py +69 -0
- mirascope/llm/providers/mlx/encoding/transformers.py +131 -0
- mirascope/llm/providers/mlx/mlx.py +237 -0
- mirascope/llm/providers/mlx/model_id.py +17 -0
- mirascope/llm/providers/mlx/provider.py +411 -0
- mirascope/llm/providers/model_id.py +16 -0
- mirascope/llm/providers/openai/__init__.py +6 -0
- mirascope/llm/providers/openai/completions/__init__.py +20 -0
- mirascope/llm/{clients/openai/responses → providers/openai/completions}/_utils/__init__.py +2 -0
- mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +5 -3
- mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +33 -23
- mirascope/llm/providers/openai/completions/provider.py +456 -0
- mirascope/llm/providers/openai/model_id.py +31 -0
- mirascope/llm/providers/openai/model_info.py +246 -0
- mirascope/llm/providers/openai/provider.py +386 -0
- mirascope/llm/providers/openai/responses/__init__.py +21 -0
- mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +5 -3
- mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +28 -17
- mirascope/llm/providers/openai/responses/provider.py +470 -0
- mirascope/llm/{clients → providers}/openai/shared/_utils.py +7 -3
- mirascope/llm/providers/provider_id.py +13 -0
- mirascope/llm/providers/provider_registry.py +167 -0
- mirascope/llm/responses/base_response.py +10 -5
- mirascope/llm/responses/base_stream_response.py +10 -5
- mirascope/llm/responses/response.py +24 -13
- mirascope/llm/responses/root_response.py +7 -12
- mirascope/llm/responses/stream_response.py +35 -23
- mirascope/llm/tools/__init__.py +9 -2
- mirascope/llm/tools/_utils.py +12 -3
- mirascope/llm/tools/decorator.py +10 -10
- mirascope/llm/tools/protocols.py +4 -4
- mirascope/llm/tools/tool_schema.py +44 -9
- mirascope/llm/tools/tools.py +12 -11
- mirascope/ops/__init__.py +156 -0
- mirascope/ops/_internal/__init__.py +5 -0
- mirascope/ops/_internal/closure.py +1118 -0
- mirascope/ops/_internal/configuration.py +126 -0
- mirascope/ops/_internal/context.py +76 -0
- mirascope/ops/_internal/exporters/__init__.py +26 -0
- mirascope/ops/_internal/exporters/exporters.py +342 -0
- mirascope/ops/_internal/exporters/processors.py +104 -0
- mirascope/ops/_internal/exporters/types.py +165 -0
- mirascope/ops/_internal/exporters/utils.py +29 -0
- mirascope/ops/_internal/instrumentation/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
- mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
- mirascope/ops/_internal/propagation.py +198 -0
- mirascope/ops/_internal/protocols.py +51 -0
- mirascope/ops/_internal/session.py +139 -0
- mirascope/ops/_internal/spans.py +232 -0
- mirascope/ops/_internal/traced_calls.py +371 -0
- mirascope/ops/_internal/traced_functions.py +394 -0
- mirascope/ops/_internal/tracing.py +276 -0
- mirascope/ops/_internal/types.py +13 -0
- mirascope/ops/_internal/utils.py +75 -0
- mirascope/ops/_internal/versioned_calls.py +512 -0
- mirascope/ops/_internal/versioned_functions.py +346 -0
- mirascope/ops/_internal/versioning.py +303 -0
- mirascope/ops/exceptions.py +21 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/METADATA +77 -1
- mirascope-2.0.0a3.dist-info/RECORD +206 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/WHEEL +1 -1
- mirascope/graphs/__init__.py +0 -22
- mirascope/graphs/finite_state_machine.py +0 -625
- mirascope/llm/agents/__init__.py +0 -15
- mirascope/llm/agents/agent.py +0 -97
- mirascope/llm/agents/agent_template.py +0 -45
- mirascope/llm/agents/decorator.py +0 -176
- mirascope/llm/calls/base_call.py +0 -33
- mirascope/llm/clients/__init__.py +0 -34
- mirascope/llm/clients/anthropic/__init__.py +0 -25
- mirascope/llm/clients/anthropic/model_ids.py +0 -8
- mirascope/llm/clients/google/__init__.py +0 -20
- mirascope/llm/clients/google/clients.py +0 -853
- mirascope/llm/clients/google/model_ids.py +0 -15
- mirascope/llm/clients/openai/__init__.py +0 -25
- mirascope/llm/clients/openai/completions/__init__.py +0 -28
- mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
- mirascope/llm/clients/openai/completions/clients.py +0 -833
- mirascope/llm/clients/openai/completions/model_ids.py +0 -8
- mirascope/llm/clients/openai/responses/__init__.py +0 -26
- mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
- mirascope/llm/clients/openai/responses/clients.py +0 -832
- mirascope/llm/clients/openai/responses/model_ids.py +0 -8
- mirascope/llm/clients/providers.py +0 -175
- mirascope-2.0.0a1.dist-info/RECORD +0 -102
- /mirascope/llm/{clients → providers}/anthropic/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
- /mirascope/llm/{clients → providers}/base/params.py +0 -0
- /mirascope/llm/{clients → providers}/google/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/google/message.py +0 -0
- /mirascope/llm/{clients/openai/completions → providers/openai/responses}/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/openai/shared/__init__.py +0 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Google client implementation."""
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from .model_id import GoogleModelId
|
|
7
|
+
from .provider import GoogleProvider
|
|
8
|
+
else:
|
|
9
|
+
try:
|
|
10
|
+
from .model_id import GoogleModelId
|
|
11
|
+
from .provider import GoogleProvider
|
|
12
|
+
except ImportError: # pragma: no cover
|
|
13
|
+
from .._missing_import_stubs import (
|
|
14
|
+
create_import_error_stub,
|
|
15
|
+
create_provider_stub,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
GoogleProvider = create_provider_stub("google", "GoogleProvider")
|
|
19
|
+
GoogleModelId = str
|
|
20
|
+
|
|
21
|
+
__all__ = ["GoogleModelId", "GoogleProvider"]
|
|
@@ -30,7 +30,7 @@ from ....responses import (
|
|
|
30
30
|
RawMessageChunk,
|
|
31
31
|
RawStreamEventChunk,
|
|
32
32
|
)
|
|
33
|
-
from ..
|
|
33
|
+
from ..model_id import GoogleModelId, model_name
|
|
34
34
|
from .encode import UNKNOWN_TOOL_ID
|
|
35
35
|
|
|
36
36
|
GOOGLE_FINISH_REASON_MAP = {
|
|
@@ -88,7 +88,7 @@ def _decode_candidate_content(
|
|
|
88
88
|
candidate: genai_types.Candidate,
|
|
89
89
|
) -> Sequence[AssistantContentPart]:
|
|
90
90
|
"""Returns a sequence of `AssistantContentPart` decoded from a google `Candidate`"""
|
|
91
|
-
content_parts = []
|
|
91
|
+
content_parts: list[AssistantContentPart] = []
|
|
92
92
|
if candidate.content and candidate.content.parts:
|
|
93
93
|
for part in candidate.content.parts:
|
|
94
94
|
decoded_part = _decode_content_part(part)
|
|
@@ -98,7 +98,8 @@ def _decode_candidate_content(
|
|
|
98
98
|
|
|
99
99
|
|
|
100
100
|
def decode_response(
|
|
101
|
-
response: genai_types.GenerateContentResponse,
|
|
101
|
+
response: genai_types.GenerateContentResponse,
|
|
102
|
+
model_id: GoogleModelId,
|
|
102
103
|
) -> tuple[AssistantMessage, FinishReason | None]:
|
|
103
104
|
"""Returns an `AssistantMessage` and `FinishReason` extracted from a `GenerateContentResponse`"""
|
|
104
105
|
content: Sequence[AssistantContentPart] = []
|
|
@@ -115,8 +116,9 @@ def decode_response(
|
|
|
115
116
|
|
|
116
117
|
assistant_message = AssistantMessage(
|
|
117
118
|
content=content,
|
|
118
|
-
|
|
119
|
+
provider_id="google",
|
|
119
120
|
model_id=model_id,
|
|
121
|
+
provider_model_name=model_name(model_id),
|
|
120
122
|
raw_message=candidate_content.model_dump(),
|
|
121
123
|
)
|
|
122
124
|
|
|
@@ -4,7 +4,8 @@ import base64
|
|
|
4
4
|
import json
|
|
5
5
|
from collections.abc import Sequence
|
|
6
6
|
from functools import lru_cache
|
|
7
|
-
from typing import Any, cast
|
|
7
|
+
from typing import Any, TypedDict, cast
|
|
8
|
+
from typing_extensions import Required
|
|
8
9
|
|
|
9
10
|
from google.genai import types as genai_types
|
|
10
11
|
|
|
@@ -17,15 +18,19 @@ from ....formatting import (
|
|
|
17
18
|
resolve_format,
|
|
18
19
|
)
|
|
19
20
|
from ....messages import AssistantMessage, Message, UserMessage
|
|
20
|
-
from ....tools import FORMAT_TOOL_NAME,
|
|
21
|
-
from ...base import
|
|
22
|
-
from ..
|
|
21
|
+
from ....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
|
|
22
|
+
from ...base import Params, _utils as _base_utils
|
|
23
|
+
from ..model_id import GoogleModelId, model_name
|
|
23
24
|
|
|
24
25
|
UNKNOWN_TOOL_ID = "google_unknown_tool_id"
|
|
25
26
|
|
|
26
27
|
|
|
27
|
-
class GoogleKwargs(
|
|
28
|
-
"""Google's
|
|
28
|
+
class GoogleKwargs(TypedDict, total=False):
|
|
29
|
+
"""Kwargs for Google's generate_content method."""
|
|
30
|
+
|
|
31
|
+
model: Required[str]
|
|
32
|
+
contents: Required[genai_types.ContentListUnionDict]
|
|
33
|
+
config: genai_types.GenerateContentConfigDict
|
|
29
34
|
|
|
30
35
|
|
|
31
36
|
def _resolve_refs(
|
|
@@ -52,7 +57,7 @@ def _encode_content(
|
|
|
52
57
|
content: Sequence[ContentPart], encode_thoughts: bool
|
|
53
58
|
) -> list[genai_types.PartDict]:
|
|
54
59
|
"""Returns a list of google `PartDicts` converted from a sequence of Mirascope `ContentPart`s"""
|
|
55
|
-
result = []
|
|
60
|
+
result: list[genai_types.PartDict] = []
|
|
56
61
|
|
|
57
62
|
for part in content:
|
|
58
63
|
if part.type == "text":
|
|
@@ -121,7 +126,7 @@ def _encode_message(
|
|
|
121
126
|
"""Returns a Google `ContentDict` converted from a Mirascope `Message`"""
|
|
122
127
|
if (
|
|
123
128
|
message.role == "assistant"
|
|
124
|
-
and message.
|
|
129
|
+
and message.provider_id == "google"
|
|
125
130
|
and message.model_id == model_id
|
|
126
131
|
and message.raw_message
|
|
127
132
|
and not encode_thoughts
|
|
@@ -144,7 +149,7 @@ def _encode_messages(
|
|
|
144
149
|
|
|
145
150
|
@lru_cache(maxsize=128)
|
|
146
151
|
def _convert_tool_to_function_declaration(
|
|
147
|
-
tool:
|
|
152
|
+
tool: AnyToolSchema,
|
|
148
153
|
) -> genai_types.FunctionDeclarationDict:
|
|
149
154
|
"""Convert a single Mirascope tool to Google FunctionDeclaration format with caching."""
|
|
150
155
|
schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
|
|
@@ -170,21 +175,21 @@ def encode_request(
|
|
|
170
175
|
*,
|
|
171
176
|
model_id: GoogleModelId,
|
|
172
177
|
messages: Sequence[Message],
|
|
173
|
-
tools: Sequence[
|
|
178
|
+
tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
|
|
174
179
|
format: type[FormattableT] | Format[FormattableT] | None,
|
|
175
180
|
params: Params,
|
|
176
|
-
) -> tuple[
|
|
177
|
-
Sequence[Message],
|
|
178
|
-
Format[FormattableT] | None,
|
|
179
|
-
genai_types.ContentListUnionDict,
|
|
180
|
-
GoogleKwargs,
|
|
181
|
-
]:
|
|
181
|
+
) -> tuple[Sequence[Message], Format[FormattableT] | None, GoogleKwargs]:
|
|
182
182
|
"""Prepares a request for the genai `Client.models.generate_content` method."""
|
|
183
|
-
|
|
183
|
+
if not model_id.startswith("google/"): # pragma: no cover
|
|
184
|
+
raise ValueError(f"Model ID must start with 'google/' prefix, got: {model_id}")
|
|
185
|
+
|
|
186
|
+
google_config: genai_types.GenerateContentConfigDict = (
|
|
187
|
+
genai_types.GenerateContentConfigDict()
|
|
188
|
+
)
|
|
184
189
|
encode_thoughts = False
|
|
185
190
|
|
|
186
191
|
with _base_utils.ensure_all_params_accessed(
|
|
187
|
-
params=params,
|
|
192
|
+
params=params, provider_id="google"
|
|
188
193
|
) as param_accessor:
|
|
189
194
|
if param_accessor.temperature is not None:
|
|
190
195
|
google_config["temperature"] = param_accessor.temperature
|
|
@@ -224,7 +229,7 @@ def encode_request(
|
|
|
224
229
|
if format.mode in ("strict", "json") and tools:
|
|
225
230
|
raise FeatureNotSupportedError(
|
|
226
231
|
feature=f"formatting_mode:{format.mode} with tools",
|
|
227
|
-
|
|
232
|
+
provider_id="google",
|
|
228
233
|
)
|
|
229
234
|
|
|
230
235
|
if format.mode == "strict":
|
|
@@ -271,9 +276,10 @@ def encode_request(
|
|
|
271
276
|
if system_message_content:
|
|
272
277
|
google_config["system_instruction"] = system_message_content
|
|
273
278
|
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
google_config,
|
|
279
|
+
kwargs = GoogleKwargs(
|
|
280
|
+
model=model_name(model_id),
|
|
281
|
+
contents=_encode_messages(remaining_messages, model_id, encode_thoughts),
|
|
282
|
+
config=google_config,
|
|
279
283
|
)
|
|
284
|
+
|
|
285
|
+
return messages, format, kwargs
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""Google registered LLM models."""
|
|
2
|
+
|
|
3
|
+
from typing import Literal, TypeAlias
|
|
4
|
+
|
|
5
|
+
GoogleModelId: TypeAlias = (
|
|
6
|
+
Literal[
|
|
7
|
+
"google/gemini-3-pro-preview",
|
|
8
|
+
"google/gemini-2.5-pro",
|
|
9
|
+
"google/gemini-2.5-flash",
|
|
10
|
+
"google/gemini-2.5-flash-lite",
|
|
11
|
+
"google/gemini-2.0-flash",
|
|
12
|
+
"google/gemini-2.0-flash-lite",
|
|
13
|
+
]
|
|
14
|
+
| str
|
|
15
|
+
)
|
|
16
|
+
"""The Google model ids registered with Mirascope."""
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def model_name(model_id: GoogleModelId) -> str:
|
|
20
|
+
"""Extract the google model name from a full model ID.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
model_id: Full model ID (e.g. "google/gemini-2.5-flash")
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
Provider-specific model ID (e.g. "gemini-2.5-flash")
|
|
27
|
+
"""
|
|
28
|
+
return model_id.removeprefix("google/")
|
|
@@ -0,0 +1,438 @@
|
|
|
1
|
+
"""Google provider implementation."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
from typing_extensions import Unpack
|
|
5
|
+
|
|
6
|
+
from google.genai import Client
|
|
7
|
+
from google.genai.types import HttpOptions
|
|
8
|
+
|
|
9
|
+
from ...context import Context, DepsT
|
|
10
|
+
from ...formatting import Format, FormattableT
|
|
11
|
+
from ...messages import Message
|
|
12
|
+
from ...responses import (
|
|
13
|
+
AsyncContextResponse,
|
|
14
|
+
AsyncContextStreamResponse,
|
|
15
|
+
AsyncResponse,
|
|
16
|
+
AsyncStreamResponse,
|
|
17
|
+
ContextResponse,
|
|
18
|
+
ContextStreamResponse,
|
|
19
|
+
Response,
|
|
20
|
+
StreamResponse,
|
|
21
|
+
)
|
|
22
|
+
from ...tools import (
|
|
23
|
+
AsyncContextTool,
|
|
24
|
+
AsyncContextToolkit,
|
|
25
|
+
AsyncTool,
|
|
26
|
+
AsyncToolkit,
|
|
27
|
+
ContextTool,
|
|
28
|
+
ContextToolkit,
|
|
29
|
+
Tool,
|
|
30
|
+
Toolkit,
|
|
31
|
+
)
|
|
32
|
+
from ..base import BaseProvider, Params
|
|
33
|
+
from . import _utils
|
|
34
|
+
from .model_id import GoogleModelId, model_name
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class GoogleProvider(BaseProvider[Client]):
|
|
38
|
+
"""The client for the Google LLM model."""
|
|
39
|
+
|
|
40
|
+
id = "google"
|
|
41
|
+
default_scope = "google/"
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self, *, api_key: str | None = None, base_url: str | None = None
|
|
45
|
+
) -> None:
|
|
46
|
+
"""Initialize the Google client."""
|
|
47
|
+
http_options = None
|
|
48
|
+
if base_url:
|
|
49
|
+
http_options = HttpOptions(base_url=base_url)
|
|
50
|
+
|
|
51
|
+
self.client = Client(api_key=api_key, http_options=http_options)
|
|
52
|
+
|
|
53
|
+
def _call(
|
|
54
|
+
self,
|
|
55
|
+
*,
|
|
56
|
+
model_id: GoogleModelId,
|
|
57
|
+
messages: Sequence[Message],
|
|
58
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
59
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
60
|
+
**params: Unpack[Params],
|
|
61
|
+
) -> Response | Response[FormattableT]:
|
|
62
|
+
"""Generate an `llm.Response` by synchronously calling the Google GenAI API.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
model_id: Model identifier to use.
|
|
66
|
+
messages: Messages to send to the LLM.
|
|
67
|
+
tools: Optional tools that the model may invoke.
|
|
68
|
+
format: Optional response format specifier.
|
|
69
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
An `llm.Response` object containing the LLM-generated content.
|
|
73
|
+
"""
|
|
74
|
+
input_messages, format, kwargs = _utils.encode_request(
|
|
75
|
+
model_id=model_id,
|
|
76
|
+
messages=messages,
|
|
77
|
+
tools=tools,
|
|
78
|
+
format=format,
|
|
79
|
+
params=params,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
google_response = self.client.models.generate_content(**kwargs)
|
|
83
|
+
|
|
84
|
+
assistant_message, finish_reason = _utils.decode_response(
|
|
85
|
+
google_response, model_id
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
return Response(
|
|
89
|
+
raw=google_response,
|
|
90
|
+
provider_id="google",
|
|
91
|
+
model_id=model_id,
|
|
92
|
+
provider_model_name=model_name(model_id),
|
|
93
|
+
params=params,
|
|
94
|
+
tools=tools,
|
|
95
|
+
input_messages=input_messages,
|
|
96
|
+
assistant_message=assistant_message,
|
|
97
|
+
finish_reason=finish_reason,
|
|
98
|
+
format=format,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
def _context_call(
|
|
102
|
+
self,
|
|
103
|
+
*,
|
|
104
|
+
ctx: Context[DepsT],
|
|
105
|
+
model_id: GoogleModelId,
|
|
106
|
+
messages: Sequence[Message],
|
|
107
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
108
|
+
| ContextToolkit[DepsT]
|
|
109
|
+
| None = None,
|
|
110
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
111
|
+
**params: Unpack[Params],
|
|
112
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
113
|
+
"""Generate an `llm.ContextResponse` by synchronously calling the Google GenAI API.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
ctx: Context object with dependencies for tools.
|
|
117
|
+
model_id: Model identifier to use.
|
|
118
|
+
messages: Messages to send to the LLM.
|
|
119
|
+
tools: Optional tools that the model may invoke.
|
|
120
|
+
format: Optional response format specifier.
|
|
121
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
An `llm.ContextResponse` object containing the LLM-generated content.
|
|
125
|
+
"""
|
|
126
|
+
input_messages, format, kwargs = _utils.encode_request(
|
|
127
|
+
model_id=model_id,
|
|
128
|
+
messages=messages,
|
|
129
|
+
tools=tools,
|
|
130
|
+
format=format,
|
|
131
|
+
params=params,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
google_response = self.client.models.generate_content(**kwargs)
|
|
135
|
+
|
|
136
|
+
assistant_message, finish_reason = _utils.decode_response(
|
|
137
|
+
google_response, model_id
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
return ContextResponse(
|
|
141
|
+
raw=google_response,
|
|
142
|
+
provider_id="google",
|
|
143
|
+
model_id=model_id,
|
|
144
|
+
provider_model_name=model_name(model_id),
|
|
145
|
+
params=params,
|
|
146
|
+
tools=tools,
|
|
147
|
+
input_messages=input_messages,
|
|
148
|
+
assistant_message=assistant_message,
|
|
149
|
+
finish_reason=finish_reason,
|
|
150
|
+
format=format,
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
async def _call_async(
|
|
154
|
+
self,
|
|
155
|
+
*,
|
|
156
|
+
model_id: GoogleModelId,
|
|
157
|
+
messages: Sequence[Message],
|
|
158
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
159
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
160
|
+
**params: Unpack[Params],
|
|
161
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
162
|
+
"""Generate an `llm.AsyncResponse` by asynchronously calling the Google GenAI API.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
model_id: Model identifier to use.
|
|
166
|
+
messages: Messages to send to the LLM.
|
|
167
|
+
tools: Optional tools that the model may invoke.
|
|
168
|
+
format: Optional response format specifier.
|
|
169
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
An `llm.AsyncResponse` object containing the LLM-generated content.
|
|
173
|
+
"""
|
|
174
|
+
input_messages, format, kwargs = _utils.encode_request(
|
|
175
|
+
model_id=model_id,
|
|
176
|
+
messages=messages,
|
|
177
|
+
tools=tools,
|
|
178
|
+
format=format,
|
|
179
|
+
params=params,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
google_response = await self.client.aio.models.generate_content(**kwargs)
|
|
183
|
+
|
|
184
|
+
assistant_message, finish_reason = _utils.decode_response(
|
|
185
|
+
google_response, model_id
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
return AsyncResponse(
|
|
189
|
+
raw=google_response,
|
|
190
|
+
provider_id="google",
|
|
191
|
+
model_id=model_id,
|
|
192
|
+
provider_model_name=model_name(model_id),
|
|
193
|
+
params=params,
|
|
194
|
+
tools=tools,
|
|
195
|
+
input_messages=input_messages,
|
|
196
|
+
assistant_message=assistant_message,
|
|
197
|
+
finish_reason=finish_reason,
|
|
198
|
+
format=format,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
async def _context_call_async(
|
|
202
|
+
self,
|
|
203
|
+
*,
|
|
204
|
+
ctx: Context[DepsT],
|
|
205
|
+
model_id: GoogleModelId,
|
|
206
|
+
messages: Sequence[Message],
|
|
207
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
208
|
+
| AsyncContextToolkit[DepsT]
|
|
209
|
+
| None = None,
|
|
210
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
211
|
+
**params: Unpack[Params],
|
|
212
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
213
|
+
"""Generate an `llm.AsyncContextResponse` by asynchronously calling the Google GenAI API.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
ctx: Context object with dependencies for tools.
|
|
217
|
+
model_id: Model identifier to use.
|
|
218
|
+
messages: Messages to send to the LLM.
|
|
219
|
+
tools: Optional tools that the model may invoke.
|
|
220
|
+
format: Optional response format specifier.
|
|
221
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
An `llm.AsyncContextResponse` object containing the LLM-generated content.
|
|
225
|
+
"""
|
|
226
|
+
input_messages, format, kwargs = _utils.encode_request(
|
|
227
|
+
model_id=model_id,
|
|
228
|
+
messages=messages,
|
|
229
|
+
tools=tools,
|
|
230
|
+
format=format,
|
|
231
|
+
params=params,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
google_response = await self.client.aio.models.generate_content(**kwargs)
|
|
235
|
+
|
|
236
|
+
assistant_message, finish_reason = _utils.decode_response(
|
|
237
|
+
google_response, model_id
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
return AsyncContextResponse(
|
|
241
|
+
raw=google_response,
|
|
242
|
+
provider_id="google",
|
|
243
|
+
model_id=model_id,
|
|
244
|
+
provider_model_name=model_name(model_id),
|
|
245
|
+
params=params,
|
|
246
|
+
tools=tools,
|
|
247
|
+
input_messages=input_messages,
|
|
248
|
+
assistant_message=assistant_message,
|
|
249
|
+
finish_reason=finish_reason,
|
|
250
|
+
format=format,
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
def _stream(
|
|
254
|
+
self,
|
|
255
|
+
*,
|
|
256
|
+
model_id: GoogleModelId,
|
|
257
|
+
messages: Sequence[Message],
|
|
258
|
+
tools: Sequence[Tool] | Toolkit | None = None,
|
|
259
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
260
|
+
**params: Unpack[Params],
|
|
261
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
262
|
+
"""Generate an `llm.StreamResponse` by synchronously streaming from the Google GenAI API.
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
model_id: Model identifier to use.
|
|
266
|
+
messages: Messages to send to the LLM.
|
|
267
|
+
tools: Optional tools that the model may invoke.
|
|
268
|
+
format: Optional response format specifier.
|
|
269
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
An `llm.StreamResponse` object for iterating over the LLM-generated content.
|
|
273
|
+
"""
|
|
274
|
+
input_messages, format, kwargs = _utils.encode_request(
|
|
275
|
+
model_id=model_id,
|
|
276
|
+
messages=messages,
|
|
277
|
+
tools=tools,
|
|
278
|
+
format=format,
|
|
279
|
+
params=params,
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
google_stream = self.client.models.generate_content_stream(**kwargs)
|
|
283
|
+
|
|
284
|
+
chunk_iterator = _utils.decode_stream(google_stream)
|
|
285
|
+
|
|
286
|
+
return StreamResponse(
|
|
287
|
+
provider_id="google",
|
|
288
|
+
model_id=model_id,
|
|
289
|
+
provider_model_name=model_name(model_id),
|
|
290
|
+
params=params,
|
|
291
|
+
tools=tools,
|
|
292
|
+
input_messages=input_messages,
|
|
293
|
+
chunk_iterator=chunk_iterator,
|
|
294
|
+
format=format,
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
def _context_stream(
|
|
298
|
+
self,
|
|
299
|
+
*,
|
|
300
|
+
ctx: Context[DepsT],
|
|
301
|
+
model_id: GoogleModelId,
|
|
302
|
+
messages: Sequence[Message],
|
|
303
|
+
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
304
|
+
| ContextToolkit[DepsT]
|
|
305
|
+
| None = None,
|
|
306
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
307
|
+
**params: Unpack[Params],
|
|
308
|
+
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
309
|
+
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from the Google GenAI API.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
ctx: Context object with dependencies for tools.
|
|
313
|
+
model_id: Model identifier to use.
|
|
314
|
+
messages: Messages to send to the LLM.
|
|
315
|
+
tools: Optional tools that the model may invoke.
|
|
316
|
+
format: Optional response format specifier.
|
|
317
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
318
|
+
|
|
319
|
+
Returns:
|
|
320
|
+
An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
|
|
321
|
+
"""
|
|
322
|
+
input_messages, format, kwargs = _utils.encode_request(
|
|
323
|
+
model_id=model_id,
|
|
324
|
+
messages=messages,
|
|
325
|
+
tools=tools,
|
|
326
|
+
format=format,
|
|
327
|
+
params=params,
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
google_stream = self.client.models.generate_content_stream(**kwargs)
|
|
331
|
+
|
|
332
|
+
chunk_iterator = _utils.decode_stream(google_stream)
|
|
333
|
+
|
|
334
|
+
return ContextStreamResponse(
|
|
335
|
+
provider_id="google",
|
|
336
|
+
model_id=model_id,
|
|
337
|
+
provider_model_name=model_name(model_id),
|
|
338
|
+
params=params,
|
|
339
|
+
tools=tools,
|
|
340
|
+
input_messages=input_messages,
|
|
341
|
+
chunk_iterator=chunk_iterator,
|
|
342
|
+
format=format,
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
async def _stream_async(
|
|
346
|
+
self,
|
|
347
|
+
*,
|
|
348
|
+
model_id: GoogleModelId,
|
|
349
|
+
messages: Sequence[Message],
|
|
350
|
+
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
351
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
352
|
+
**params: Unpack[Params],
|
|
353
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
354
|
+
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the Google GenAI API.
|
|
355
|
+
|
|
356
|
+
Args:
|
|
357
|
+
model_id: Model identifier to use.
|
|
358
|
+
messages: Messages to send to the LLM.
|
|
359
|
+
tools: Optional tools that the model may invoke.
|
|
360
|
+
format: Optional response format specifier.
|
|
361
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
365
|
+
"""
|
|
366
|
+
input_messages, format, kwargs = _utils.encode_request(
|
|
367
|
+
model_id=model_id,
|
|
368
|
+
messages=messages,
|
|
369
|
+
tools=tools,
|
|
370
|
+
format=format,
|
|
371
|
+
params=params,
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
google_stream = await self.client.aio.models.generate_content_stream(**kwargs)
|
|
375
|
+
|
|
376
|
+
chunk_iterator = _utils.decode_async_stream(google_stream)
|
|
377
|
+
|
|
378
|
+
return AsyncStreamResponse(
|
|
379
|
+
provider_id="google",
|
|
380
|
+
model_id=model_id,
|
|
381
|
+
provider_model_name=model_name(model_id),
|
|
382
|
+
params=params,
|
|
383
|
+
tools=tools,
|
|
384
|
+
input_messages=input_messages,
|
|
385
|
+
chunk_iterator=chunk_iterator,
|
|
386
|
+
format=format,
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
async def _context_stream_async(
|
|
390
|
+
self,
|
|
391
|
+
*,
|
|
392
|
+
ctx: Context[DepsT],
|
|
393
|
+
model_id: GoogleModelId,
|
|
394
|
+
messages: Sequence[Message],
|
|
395
|
+
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
396
|
+
| AsyncContextToolkit[DepsT]
|
|
397
|
+
| None = None,
|
|
398
|
+
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
399
|
+
**params: Unpack[Params],
|
|
400
|
+
) -> (
|
|
401
|
+
AsyncContextStreamResponse[DepsT]
|
|
402
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
403
|
+
):
|
|
404
|
+
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the Google GenAI API.
|
|
405
|
+
|
|
406
|
+
Args:
|
|
407
|
+
ctx: Context object with dependencies for tools.
|
|
408
|
+
model_id: Model identifier to use.
|
|
409
|
+
messages: Messages to send to the LLM.
|
|
410
|
+
tools: Optional tools that the model may invoke.
|
|
411
|
+
format: Optional response format specifier.
|
|
412
|
+
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
413
|
+
|
|
414
|
+
Returns:
|
|
415
|
+
An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
416
|
+
"""
|
|
417
|
+
input_messages, format, kwargs = _utils.encode_request(
|
|
418
|
+
model_id=model_id,
|
|
419
|
+
messages=messages,
|
|
420
|
+
tools=tools,
|
|
421
|
+
format=format,
|
|
422
|
+
params=params,
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
google_stream = await self.client.aio.models.generate_content_stream(**kwargs)
|
|
426
|
+
|
|
427
|
+
chunk_iterator = _utils.decode_async_stream(google_stream)
|
|
428
|
+
|
|
429
|
+
return AsyncContextStreamResponse(
|
|
430
|
+
provider_id="google",
|
|
431
|
+
model_id=model_id,
|
|
432
|
+
provider_model_name=model_name(model_id),
|
|
433
|
+
params=params,
|
|
434
|
+
tools=tools,
|
|
435
|
+
input_messages=input_messages,
|
|
436
|
+
chunk_iterator=chunk_iterator,
|
|
437
|
+
format=format,
|
|
438
|
+
)
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from functools import lru_cache
|
|
2
|
+
|
|
3
|
+
from .anthropic import AnthropicProvider
|
|
4
|
+
from .base import Provider
|
|
5
|
+
from .google import GoogleProvider
|
|
6
|
+
from .mlx import MLXProvider
|
|
7
|
+
from .openai import OpenAIProvider
|
|
8
|
+
from .openai.completions.provider import OpenAICompletionsProvider
|
|
9
|
+
from .openai.responses.provider import OpenAIResponsesProvider
|
|
10
|
+
from .provider_id import ProviderId
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@lru_cache(maxsize=256)
|
|
14
|
+
def load_provider(
|
|
15
|
+
provider_id: ProviderId, *, api_key: str | None = None, base_url: str | None = None
|
|
16
|
+
) -> Provider:
|
|
17
|
+
"""Create a cached provider instance for the specified provider id.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
provider_id: The provider name ("openai", "anthropic", or "google").
|
|
21
|
+
api_key: API key for authentication. If None, uses provider-specific env var.
|
|
22
|
+
base_url: Base URL for the API. If None, uses provider-specific env var.
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
A cached provider instance for the specified provider with the given parameters.
|
|
26
|
+
|
|
27
|
+
Raises:
|
|
28
|
+
ValueError: If the provider_id is not supported.
|
|
29
|
+
"""
|
|
30
|
+
match provider_id:
|
|
31
|
+
case "anthropic":
|
|
32
|
+
return AnthropicProvider(api_key=api_key, base_url=base_url)
|
|
33
|
+
case "google":
|
|
34
|
+
return GoogleProvider(api_key=api_key, base_url=base_url)
|
|
35
|
+
case "openai":
|
|
36
|
+
return OpenAIProvider(api_key=api_key, base_url=base_url)
|
|
37
|
+
case "openai:completions":
|
|
38
|
+
return OpenAICompletionsProvider(api_key=api_key, base_url=base_url)
|
|
39
|
+
case "openai:responses":
|
|
40
|
+
return OpenAIResponsesProvider(api_key=api_key, base_url=base_url)
|
|
41
|
+
case "mlx": # pragma: no cover (MLX is only available on macOS)
|
|
42
|
+
return MLXProvider()
|
|
43
|
+
case _: # pragma: no cover
|
|
44
|
+
raise ValueError(f"Unknown provider: '{provider_id}'")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
load = load_provider
|
|
48
|
+
"""Convenient alias as `llm.providers.load`"""
|