mirascope 2.0.0a1__py3-none-any.whl → 2.0.0a3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +2 -2
- mirascope/api/__init__.py +6 -0
- mirascope/api/_generated/README.md +207 -0
- mirascope/api/_generated/__init__.py +85 -0
- mirascope/api/_generated/client.py +155 -0
- mirascope/api/_generated/core/__init__.py +52 -0
- mirascope/api/_generated/core/api_error.py +23 -0
- mirascope/api/_generated/core/client_wrapper.py +58 -0
- mirascope/api/_generated/core/datetime_utils.py +30 -0
- mirascope/api/_generated/core/file.py +70 -0
- mirascope/api/_generated/core/force_multipart.py +16 -0
- mirascope/api/_generated/core/http_client.py +619 -0
- mirascope/api/_generated/core/http_response.py +55 -0
- mirascope/api/_generated/core/jsonable_encoder.py +102 -0
- mirascope/api/_generated/core/pydantic_utilities.py +310 -0
- mirascope/api/_generated/core/query_encoder.py +60 -0
- mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
- mirascope/api/_generated/core/request_options.py +35 -0
- mirascope/api/_generated/core/serialization.py +282 -0
- mirascope/api/_generated/docs/__init__.py +4 -0
- mirascope/api/_generated/docs/client.py +95 -0
- mirascope/api/_generated/docs/raw_client.py +132 -0
- mirascope/api/_generated/environment.py +9 -0
- mirascope/api/_generated/errors/__init__.py +7 -0
- mirascope/api/_generated/errors/bad_request_error.py +15 -0
- mirascope/api/_generated/health/__init__.py +7 -0
- mirascope/api/_generated/health/client.py +96 -0
- mirascope/api/_generated/health/raw_client.py +129 -0
- mirascope/api/_generated/health/types/__init__.py +8 -0
- mirascope/api/_generated/health/types/health_check_response.py +24 -0
- mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
- mirascope/api/_generated/reference.md +167 -0
- mirascope/api/_generated/traces/__init__.py +55 -0
- mirascope/api/_generated/traces/client.py +162 -0
- mirascope/api/_generated/traces/raw_client.py +168 -0
- mirascope/api/_generated/traces/types/__init__.py +95 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
- mirascope/api/_generated/types/__init__.py +21 -0
- mirascope/api/_generated/types/http_api_decode_error.py +31 -0
- mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
- mirascope/api/_generated/types/issue.py +44 -0
- mirascope/api/_generated/types/issue_tag.py +17 -0
- mirascope/api/_generated/types/property_key.py +7 -0
- mirascope/api/_generated/types/property_key_tag.py +29 -0
- mirascope/api/_generated/types/property_key_tag_tag.py +5 -0
- mirascope/api/client.py +255 -0
- mirascope/api/settings.py +81 -0
- mirascope/llm/__init__.py +41 -11
- mirascope/llm/calls/calls.py +81 -57
- mirascope/llm/calls/decorator.py +121 -115
- mirascope/llm/content/__init__.py +3 -2
- mirascope/llm/context/_utils.py +19 -6
- mirascope/llm/exceptions.py +30 -16
- mirascope/llm/formatting/_utils.py +9 -5
- mirascope/llm/formatting/format.py +2 -2
- mirascope/llm/formatting/from_call_args.py +2 -2
- mirascope/llm/messages/message.py +13 -5
- mirascope/llm/models/__init__.py +2 -2
- mirascope/llm/models/models.py +189 -81
- mirascope/llm/prompts/__init__.py +13 -12
- mirascope/llm/prompts/_utils.py +27 -24
- mirascope/llm/prompts/decorator.py +133 -204
- mirascope/llm/prompts/prompts.py +424 -0
- mirascope/llm/prompts/protocols.py +25 -59
- mirascope/llm/providers/__init__.py +38 -0
- mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
- mirascope/llm/providers/anthropic/__init__.py +24 -0
- mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +5 -4
- mirascope/llm/{clients → providers}/anthropic/_utils/encode.py +31 -10
- mirascope/llm/providers/anthropic/model_id.py +40 -0
- mirascope/llm/{clients/anthropic/clients.py → providers/anthropic/provider.py} +33 -418
- mirascope/llm/{clients → providers}/base/__init__.py +3 -3
- mirascope/llm/{clients → providers}/base/_utils.py +10 -7
- mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
- mirascope/llm/providers/google/__init__.py +21 -0
- mirascope/llm/{clients → providers}/google/_utils/decode.py +6 -4
- mirascope/llm/{clients → providers}/google/_utils/encode.py +30 -24
- mirascope/llm/providers/google/model_id.py +28 -0
- mirascope/llm/providers/google/provider.py +438 -0
- mirascope/llm/providers/load_provider.py +48 -0
- mirascope/llm/providers/mlx/__init__.py +24 -0
- mirascope/llm/providers/mlx/_utils.py +107 -0
- mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
- mirascope/llm/providers/mlx/encoding/base.py +69 -0
- mirascope/llm/providers/mlx/encoding/transformers.py +131 -0
- mirascope/llm/providers/mlx/mlx.py +237 -0
- mirascope/llm/providers/mlx/model_id.py +17 -0
- mirascope/llm/providers/mlx/provider.py +411 -0
- mirascope/llm/providers/model_id.py +16 -0
- mirascope/llm/providers/openai/__init__.py +6 -0
- mirascope/llm/providers/openai/completions/__init__.py +20 -0
- mirascope/llm/{clients/openai/responses → providers/openai/completions}/_utils/__init__.py +2 -0
- mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +5 -3
- mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +33 -23
- mirascope/llm/providers/openai/completions/provider.py +456 -0
- mirascope/llm/providers/openai/model_id.py +31 -0
- mirascope/llm/providers/openai/model_info.py +246 -0
- mirascope/llm/providers/openai/provider.py +386 -0
- mirascope/llm/providers/openai/responses/__init__.py +21 -0
- mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +5 -3
- mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +28 -17
- mirascope/llm/providers/openai/responses/provider.py +470 -0
- mirascope/llm/{clients → providers}/openai/shared/_utils.py +7 -3
- mirascope/llm/providers/provider_id.py +13 -0
- mirascope/llm/providers/provider_registry.py +167 -0
- mirascope/llm/responses/base_response.py +10 -5
- mirascope/llm/responses/base_stream_response.py +10 -5
- mirascope/llm/responses/response.py +24 -13
- mirascope/llm/responses/root_response.py +7 -12
- mirascope/llm/responses/stream_response.py +35 -23
- mirascope/llm/tools/__init__.py +9 -2
- mirascope/llm/tools/_utils.py +12 -3
- mirascope/llm/tools/decorator.py +10 -10
- mirascope/llm/tools/protocols.py +4 -4
- mirascope/llm/tools/tool_schema.py +44 -9
- mirascope/llm/tools/tools.py +12 -11
- mirascope/ops/__init__.py +156 -0
- mirascope/ops/_internal/__init__.py +5 -0
- mirascope/ops/_internal/closure.py +1118 -0
- mirascope/ops/_internal/configuration.py +126 -0
- mirascope/ops/_internal/context.py +76 -0
- mirascope/ops/_internal/exporters/__init__.py +26 -0
- mirascope/ops/_internal/exporters/exporters.py +342 -0
- mirascope/ops/_internal/exporters/processors.py +104 -0
- mirascope/ops/_internal/exporters/types.py +165 -0
- mirascope/ops/_internal/exporters/utils.py +29 -0
- mirascope/ops/_internal/instrumentation/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
- mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
- mirascope/ops/_internal/propagation.py +198 -0
- mirascope/ops/_internal/protocols.py +51 -0
- mirascope/ops/_internal/session.py +139 -0
- mirascope/ops/_internal/spans.py +232 -0
- mirascope/ops/_internal/traced_calls.py +371 -0
- mirascope/ops/_internal/traced_functions.py +394 -0
- mirascope/ops/_internal/tracing.py +276 -0
- mirascope/ops/_internal/types.py +13 -0
- mirascope/ops/_internal/utils.py +75 -0
- mirascope/ops/_internal/versioned_calls.py +512 -0
- mirascope/ops/_internal/versioned_functions.py +346 -0
- mirascope/ops/_internal/versioning.py +303 -0
- mirascope/ops/exceptions.py +21 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/METADATA +77 -1
- mirascope-2.0.0a3.dist-info/RECORD +206 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/WHEEL +1 -1
- mirascope/graphs/__init__.py +0 -22
- mirascope/graphs/finite_state_machine.py +0 -625
- mirascope/llm/agents/__init__.py +0 -15
- mirascope/llm/agents/agent.py +0 -97
- mirascope/llm/agents/agent_template.py +0 -45
- mirascope/llm/agents/decorator.py +0 -176
- mirascope/llm/calls/base_call.py +0 -33
- mirascope/llm/clients/__init__.py +0 -34
- mirascope/llm/clients/anthropic/__init__.py +0 -25
- mirascope/llm/clients/anthropic/model_ids.py +0 -8
- mirascope/llm/clients/google/__init__.py +0 -20
- mirascope/llm/clients/google/clients.py +0 -853
- mirascope/llm/clients/google/model_ids.py +0 -15
- mirascope/llm/clients/openai/__init__.py +0 -25
- mirascope/llm/clients/openai/completions/__init__.py +0 -28
- mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
- mirascope/llm/clients/openai/completions/clients.py +0 -833
- mirascope/llm/clients/openai/completions/model_ids.py +0 -8
- mirascope/llm/clients/openai/responses/__init__.py +0 -26
- mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
- mirascope/llm/clients/openai/responses/clients.py +0 -832
- mirascope/llm/clients/openai/responses/model_ids.py +0 -8
- mirascope/llm/clients/providers.py +0 -175
- mirascope-2.0.0a1.dist-info/RECORD +0 -102
- /mirascope/llm/{clients → providers}/anthropic/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
- /mirascope/llm/{clients → providers}/base/params.py +0 -0
- /mirascope/llm/{clients → providers}/google/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/google/message.py +0 -0
- /mirascope/llm/{clients/openai/completions → providers/openai/responses}/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/openai/shared/__init__.py +0 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,424 @@
|
|
|
1
|
+
"""Concrete Prompt classes for generating messages with tools and formatting."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Generic, overload
|
|
5
|
+
|
|
6
|
+
from ..context import Context, DepsT
|
|
7
|
+
from ..formatting import Format, FormattableT
|
|
8
|
+
from ..models import Model
|
|
9
|
+
from ..responses import (
|
|
10
|
+
AsyncContextResponse,
|
|
11
|
+
AsyncContextStreamResponse,
|
|
12
|
+
AsyncResponse,
|
|
13
|
+
AsyncStreamResponse,
|
|
14
|
+
ContextResponse,
|
|
15
|
+
ContextStreamResponse,
|
|
16
|
+
Response,
|
|
17
|
+
StreamResponse,
|
|
18
|
+
)
|
|
19
|
+
from ..tools import (
|
|
20
|
+
AsyncContextToolkit,
|
|
21
|
+
AsyncToolkit,
|
|
22
|
+
ContextToolkit,
|
|
23
|
+
Toolkit,
|
|
24
|
+
)
|
|
25
|
+
from ..types import P
|
|
26
|
+
from . import _utils
|
|
27
|
+
from .protocols import (
|
|
28
|
+
AsyncContextMessageTemplate,
|
|
29
|
+
AsyncMessageTemplate,
|
|
30
|
+
ContextMessageTemplate,
|
|
31
|
+
MessageTemplate,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class Prompt(Generic[P, FormattableT]):
|
|
37
|
+
"""A prompt that can be called with a model to generate a response.
|
|
38
|
+
|
|
39
|
+
Created by decorating a `MessageTemplate` with `llm.prompt`. The decorated
|
|
40
|
+
function becomes callable with a `Model` to generate LLM responses.
|
|
41
|
+
|
|
42
|
+
A `Prompt` is essentially: `MessageTemplate` + tools + format.
|
|
43
|
+
It can be invoked with a model: `prompt(model, *args, **kwargs)`.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
fn: MessageTemplate[P]
|
|
47
|
+
"""The underlying prompt function that generates message content."""
|
|
48
|
+
|
|
49
|
+
toolkit: Toolkit
|
|
50
|
+
"""The toolkit containing this prompt's tools."""
|
|
51
|
+
|
|
52
|
+
format: type[FormattableT] | Format[FormattableT] | None
|
|
53
|
+
"""The response format for the generated response."""
|
|
54
|
+
|
|
55
|
+
@overload
|
|
56
|
+
def __call__(
|
|
57
|
+
self: "Prompt[P, None]", model: Model, *args: P.args, **kwargs: P.kwargs
|
|
58
|
+
) -> Response: ...
|
|
59
|
+
|
|
60
|
+
@overload
|
|
61
|
+
def __call__(
|
|
62
|
+
self: "Prompt[P, FormattableT]", model: Model, *args: P.args, **kwargs: P.kwargs
|
|
63
|
+
) -> Response[FormattableT]: ...
|
|
64
|
+
|
|
65
|
+
def __call__(
|
|
66
|
+
self, model: Model, *args: P.args, **kwargs: P.kwargs
|
|
67
|
+
) -> Response | Response[FormattableT]:
|
|
68
|
+
"""Generates a response using the provided model."""
|
|
69
|
+
return self.call(model, *args, **kwargs)
|
|
70
|
+
|
|
71
|
+
@overload
|
|
72
|
+
def call(
|
|
73
|
+
self: "Prompt[P, None]", model: Model, *args: P.args, **kwargs: P.kwargs
|
|
74
|
+
) -> Response: ...
|
|
75
|
+
|
|
76
|
+
@overload
|
|
77
|
+
def call(
|
|
78
|
+
self: "Prompt[P, FormattableT]", model: Model, *args: P.args, **kwargs: P.kwargs
|
|
79
|
+
) -> Response[FormattableT]: ...
|
|
80
|
+
|
|
81
|
+
def call(
|
|
82
|
+
self, model: Model, *args: P.args, **kwargs: P.kwargs
|
|
83
|
+
) -> Response | Response[FormattableT]:
|
|
84
|
+
"""Generates a response using the provided model."""
|
|
85
|
+
result = self.fn(*args, **kwargs)
|
|
86
|
+
messages = _utils.promote_to_messages(result)
|
|
87
|
+
return model.call(messages=messages, tools=self.toolkit, format=self.format)
|
|
88
|
+
|
|
89
|
+
@overload
|
|
90
|
+
def stream(
|
|
91
|
+
self: "Prompt[P, None]", model: Model, *args: P.args, **kwargs: P.kwargs
|
|
92
|
+
) -> StreamResponse: ...
|
|
93
|
+
|
|
94
|
+
@overload
|
|
95
|
+
def stream(
|
|
96
|
+
self: "Prompt[P, FormattableT]", model: Model, *args: P.args, **kwargs: P.kwargs
|
|
97
|
+
) -> StreamResponse[FormattableT]: ...
|
|
98
|
+
|
|
99
|
+
def stream(
|
|
100
|
+
self, model: Model, *args: P.args, **kwargs: P.kwargs
|
|
101
|
+
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
102
|
+
"""Generates a streaming response using the provided model."""
|
|
103
|
+
result = self.fn(*args, **kwargs)
|
|
104
|
+
messages = _utils.promote_to_messages(result)
|
|
105
|
+
return model.stream(messages=messages, tools=self.toolkit, format=self.format)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
@dataclass
|
|
109
|
+
class AsyncPrompt(Generic[P, FormattableT]):
|
|
110
|
+
"""An async prompt that can be called with a model to generate a response.
|
|
111
|
+
|
|
112
|
+
Created by decorating an async `MessageTemplate` with `llm.prompt`. The decorated
|
|
113
|
+
async function becomes callable with a `Model` to generate LLM responses asynchronously.
|
|
114
|
+
|
|
115
|
+
An `AsyncPrompt` is essentially: async `MessageTemplate` + tools + format.
|
|
116
|
+
It can be invoked with a model: `await prompt(model, *args, **kwargs)`.
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
fn: AsyncMessageTemplate[P]
|
|
120
|
+
"""The underlying async prompt function that generates message content."""
|
|
121
|
+
|
|
122
|
+
toolkit: AsyncToolkit
|
|
123
|
+
"""The toolkit containing this prompt's async tools."""
|
|
124
|
+
|
|
125
|
+
format: type[FormattableT] | Format[FormattableT] | None
|
|
126
|
+
"""The response format for the generated response."""
|
|
127
|
+
|
|
128
|
+
@overload
|
|
129
|
+
async def __call__(
|
|
130
|
+
self: "AsyncPrompt[P, None]", model: Model, *args: P.args, **kwargs: P.kwargs
|
|
131
|
+
) -> AsyncResponse: ...
|
|
132
|
+
|
|
133
|
+
@overload
|
|
134
|
+
async def __call__(
|
|
135
|
+
self: "AsyncPrompt[P, FormattableT]",
|
|
136
|
+
model: Model,
|
|
137
|
+
*args: P.args,
|
|
138
|
+
**kwargs: P.kwargs,
|
|
139
|
+
) -> AsyncResponse[FormattableT]: ...
|
|
140
|
+
|
|
141
|
+
async def __call__(
|
|
142
|
+
self, model: Model, *args: P.args, **kwargs: P.kwargs
|
|
143
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
144
|
+
"""Generates a response using the provided model asynchronously."""
|
|
145
|
+
return await self.call(model, *args, **kwargs)
|
|
146
|
+
|
|
147
|
+
@overload
|
|
148
|
+
async def call(
|
|
149
|
+
self: "AsyncPrompt[P, None]", model: Model, *args: P.args, **kwargs: P.kwargs
|
|
150
|
+
) -> AsyncResponse: ...
|
|
151
|
+
|
|
152
|
+
@overload
|
|
153
|
+
async def call(
|
|
154
|
+
self: "AsyncPrompt[P, FormattableT]",
|
|
155
|
+
model: Model,
|
|
156
|
+
*args: P.args,
|
|
157
|
+
**kwargs: P.kwargs,
|
|
158
|
+
) -> AsyncResponse[FormattableT]: ...
|
|
159
|
+
|
|
160
|
+
async def call(
|
|
161
|
+
self, model: Model, *args: P.args, **kwargs: P.kwargs
|
|
162
|
+
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
163
|
+
"""Generates a response using the provided model asynchronously."""
|
|
164
|
+
result = await self.fn(*args, **kwargs)
|
|
165
|
+
messages = _utils.promote_to_messages(result)
|
|
166
|
+
return await model.call_async(
|
|
167
|
+
messages=messages, tools=self.toolkit, format=self.format
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
@overload
|
|
171
|
+
async def stream(
|
|
172
|
+
self: "AsyncPrompt[P, None]", model: Model, *args: P.args, **kwargs: P.kwargs
|
|
173
|
+
) -> AsyncStreamResponse: ...
|
|
174
|
+
|
|
175
|
+
@overload
|
|
176
|
+
async def stream(
|
|
177
|
+
self: "AsyncPrompt[P, FormattableT]",
|
|
178
|
+
model: Model,
|
|
179
|
+
*args: P.args,
|
|
180
|
+
**kwargs: P.kwargs,
|
|
181
|
+
) -> AsyncStreamResponse[FormattableT]: ...
|
|
182
|
+
|
|
183
|
+
async def stream(
|
|
184
|
+
self, model: Model, *args: P.args, **kwargs: P.kwargs
|
|
185
|
+
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
186
|
+
"""Generates a streaming response using the provided model asynchronously."""
|
|
187
|
+
result = await self.fn(*args, **kwargs)
|
|
188
|
+
messages = _utils.promote_to_messages(result)
|
|
189
|
+
return await model.stream_async(
|
|
190
|
+
messages=messages, tools=self.toolkit, format=self.format
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
@dataclass
|
|
195
|
+
class ContextPrompt(Generic[P, DepsT, FormattableT]):
|
|
196
|
+
"""A context-aware prompt that can be called with a model to generate a response.
|
|
197
|
+
|
|
198
|
+
Created by decorating a `ContextMessageTemplate` with `llm.prompt`. The decorated
|
|
199
|
+
function (with first parameter `'ctx'` of type `Context[DepsT]`) becomes callable
|
|
200
|
+
with a `Model` to generate LLM responses with context dependencies.
|
|
201
|
+
|
|
202
|
+
A `ContextPrompt` is essentially: `ContextMessageTemplate` + tools + format.
|
|
203
|
+
It can be invoked with a model: `prompt(model, ctx, *args, **kwargs)`.
|
|
204
|
+
"""
|
|
205
|
+
|
|
206
|
+
fn: ContextMessageTemplate[P, DepsT]
|
|
207
|
+
"""The underlying context-aware prompt function that generates message content."""
|
|
208
|
+
|
|
209
|
+
toolkit: ContextToolkit[DepsT]
|
|
210
|
+
"""The toolkit containing this prompt's context-aware tools."""
|
|
211
|
+
|
|
212
|
+
format: type[FormattableT] | Format[FormattableT] | None
|
|
213
|
+
"""The response format for the generated response."""
|
|
214
|
+
|
|
215
|
+
@overload
|
|
216
|
+
def __call__(
|
|
217
|
+
self: "ContextPrompt[P, DepsT, None]",
|
|
218
|
+
model: Model,
|
|
219
|
+
ctx: Context[DepsT],
|
|
220
|
+
*args: P.args,
|
|
221
|
+
**kwargs: P.kwargs,
|
|
222
|
+
) -> ContextResponse[DepsT, None]: ...
|
|
223
|
+
|
|
224
|
+
@overload
|
|
225
|
+
def __call__(
|
|
226
|
+
self: "ContextPrompt[P, DepsT, FormattableT]",
|
|
227
|
+
model: Model,
|
|
228
|
+
ctx: Context[DepsT],
|
|
229
|
+
*args: P.args,
|
|
230
|
+
**kwargs: P.kwargs,
|
|
231
|
+
) -> ContextResponse[DepsT, FormattableT]: ...
|
|
232
|
+
|
|
233
|
+
def __call__(
|
|
234
|
+
self,
|
|
235
|
+
model: Model,
|
|
236
|
+
ctx: Context[DepsT],
|
|
237
|
+
*args: P.args,
|
|
238
|
+
**kwargs: P.kwargs,
|
|
239
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
240
|
+
"""Generates a response using the provided model."""
|
|
241
|
+
return self.call(model, ctx, *args, **kwargs)
|
|
242
|
+
|
|
243
|
+
@overload
|
|
244
|
+
def call(
|
|
245
|
+
self: "ContextPrompt[P, DepsT, None]",
|
|
246
|
+
model: Model,
|
|
247
|
+
ctx: Context[DepsT],
|
|
248
|
+
*args: P.args,
|
|
249
|
+
**kwargs: P.kwargs,
|
|
250
|
+
) -> ContextResponse[DepsT, None]: ...
|
|
251
|
+
|
|
252
|
+
@overload
|
|
253
|
+
def call(
|
|
254
|
+
self: "ContextPrompt[P, DepsT, FormattableT]",
|
|
255
|
+
model: Model,
|
|
256
|
+
ctx: Context[DepsT],
|
|
257
|
+
*args: P.args,
|
|
258
|
+
**kwargs: P.kwargs,
|
|
259
|
+
) -> ContextResponse[DepsT, FormattableT]: ...
|
|
260
|
+
|
|
261
|
+
def call(
|
|
262
|
+
self,
|
|
263
|
+
model: Model,
|
|
264
|
+
ctx: Context[DepsT],
|
|
265
|
+
*args: P.args,
|
|
266
|
+
**kwargs: P.kwargs,
|
|
267
|
+
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
268
|
+
"""Generates a response using the provided model."""
|
|
269
|
+
result = self.fn(ctx, *args, **kwargs)
|
|
270
|
+
messages = _utils.promote_to_messages(result)
|
|
271
|
+
return model.context_call(
|
|
272
|
+
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
@overload
|
|
276
|
+
def stream(
|
|
277
|
+
self: "ContextPrompt[P, DepsT, None]",
|
|
278
|
+
model: Model,
|
|
279
|
+
ctx: Context[DepsT],
|
|
280
|
+
*args: P.args,
|
|
281
|
+
**kwargs: P.kwargs,
|
|
282
|
+
) -> ContextStreamResponse[DepsT, None]: ...
|
|
283
|
+
|
|
284
|
+
@overload
|
|
285
|
+
def stream(
|
|
286
|
+
self: "ContextPrompt[P, DepsT, FormattableT]",
|
|
287
|
+
model: Model,
|
|
288
|
+
ctx: Context[DepsT],
|
|
289
|
+
*args: P.args,
|
|
290
|
+
**kwargs: P.kwargs,
|
|
291
|
+
) -> ContextStreamResponse[DepsT, FormattableT]: ...
|
|
292
|
+
|
|
293
|
+
def stream(
|
|
294
|
+
self,
|
|
295
|
+
model: Model,
|
|
296
|
+
ctx: Context[DepsT],
|
|
297
|
+
*args: P.args,
|
|
298
|
+
**kwargs: P.kwargs,
|
|
299
|
+
) -> (
|
|
300
|
+
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
301
|
+
):
|
|
302
|
+
"""Generates a streaming response using the provided model."""
|
|
303
|
+
result = self.fn(ctx, *args, **kwargs)
|
|
304
|
+
messages = _utils.promote_to_messages(result)
|
|
305
|
+
return model.context_stream(
|
|
306
|
+
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
@dataclass
|
|
311
|
+
class AsyncContextPrompt(Generic[P, DepsT, FormattableT]):
|
|
312
|
+
"""An async context-aware prompt that can be called with a model to generate a response.
|
|
313
|
+
|
|
314
|
+
Created by decorating an async `ContextMessageTemplate` with `llm.prompt`. The decorated
|
|
315
|
+
async function (with first parameter `'ctx'` of type `Context[DepsT]`) becomes callable
|
|
316
|
+
with a `Model` to generate LLM responses asynchronously with context dependencies.
|
|
317
|
+
|
|
318
|
+
An `AsyncContextPrompt` is essentially: async `ContextMessageTemplate` + tools + format.
|
|
319
|
+
It can be invoked with a model: `await prompt(model, ctx, *args, **kwargs)`.
|
|
320
|
+
"""
|
|
321
|
+
|
|
322
|
+
fn: AsyncContextMessageTemplate[P, DepsT]
|
|
323
|
+
"""The underlying async context-aware prompt function that generates message content."""
|
|
324
|
+
|
|
325
|
+
toolkit: AsyncContextToolkit[DepsT]
|
|
326
|
+
"""The toolkit containing this prompt's async context-aware tools."""
|
|
327
|
+
|
|
328
|
+
format: type[FormattableT] | Format[FormattableT] | None
|
|
329
|
+
"""The response format for the generated response."""
|
|
330
|
+
|
|
331
|
+
@overload
|
|
332
|
+
async def __call__(
|
|
333
|
+
self: "AsyncContextPrompt[P, DepsT, None]",
|
|
334
|
+
model: Model,
|
|
335
|
+
ctx: Context[DepsT],
|
|
336
|
+
*args: P.args,
|
|
337
|
+
**kwargs: P.kwargs,
|
|
338
|
+
) -> AsyncContextResponse[DepsT, None]: ...
|
|
339
|
+
|
|
340
|
+
@overload
|
|
341
|
+
async def __call__(
|
|
342
|
+
self: "AsyncContextPrompt[P, DepsT, FormattableT]",
|
|
343
|
+
model: Model,
|
|
344
|
+
ctx: Context[DepsT],
|
|
345
|
+
*args: P.args,
|
|
346
|
+
**kwargs: P.kwargs,
|
|
347
|
+
) -> AsyncContextResponse[DepsT, FormattableT]: ...
|
|
348
|
+
|
|
349
|
+
async def __call__(
|
|
350
|
+
self,
|
|
351
|
+
model: Model,
|
|
352
|
+
ctx: Context[DepsT],
|
|
353
|
+
*args: P.args,
|
|
354
|
+
**kwargs: P.kwargs,
|
|
355
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
356
|
+
"""Generates a response using the provided model asynchronously."""
|
|
357
|
+
return await self.call(model, ctx, *args, **kwargs)
|
|
358
|
+
|
|
359
|
+
@overload
|
|
360
|
+
async def call(
|
|
361
|
+
self: "AsyncContextPrompt[P, DepsT, None]",
|
|
362
|
+
model: Model,
|
|
363
|
+
ctx: Context[DepsT],
|
|
364
|
+
*args: P.args,
|
|
365
|
+
**kwargs: P.kwargs,
|
|
366
|
+
) -> AsyncContextResponse[DepsT, None]: ...
|
|
367
|
+
|
|
368
|
+
@overload
|
|
369
|
+
async def call(
|
|
370
|
+
self: "AsyncContextPrompt[P, DepsT, FormattableT]",
|
|
371
|
+
model: Model,
|
|
372
|
+
ctx: Context[DepsT],
|
|
373
|
+
*args: P.args,
|
|
374
|
+
**kwargs: P.kwargs,
|
|
375
|
+
) -> AsyncContextResponse[DepsT, FormattableT]: ...
|
|
376
|
+
|
|
377
|
+
async def call(
|
|
378
|
+
self,
|
|
379
|
+
model: Model,
|
|
380
|
+
ctx: Context[DepsT],
|
|
381
|
+
*args: P.args,
|
|
382
|
+
**kwargs: P.kwargs,
|
|
383
|
+
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
384
|
+
"""Generates a response using the provided model asynchronously."""
|
|
385
|
+
result = await self.fn(ctx, *args, **kwargs)
|
|
386
|
+
messages = _utils.promote_to_messages(result)
|
|
387
|
+
return await model.context_call_async(
|
|
388
|
+
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
@overload
|
|
392
|
+
async def stream(
|
|
393
|
+
self: "AsyncContextPrompt[P, DepsT, None]",
|
|
394
|
+
model: Model,
|
|
395
|
+
ctx: Context[DepsT],
|
|
396
|
+
*args: P.args,
|
|
397
|
+
**kwargs: P.kwargs,
|
|
398
|
+
) -> AsyncContextStreamResponse[DepsT, None]: ...
|
|
399
|
+
|
|
400
|
+
@overload
|
|
401
|
+
async def stream(
|
|
402
|
+
self: "AsyncContextPrompt[P, DepsT, FormattableT]",
|
|
403
|
+
model: Model,
|
|
404
|
+
ctx: Context[DepsT],
|
|
405
|
+
*args: P.args,
|
|
406
|
+
**kwargs: P.kwargs,
|
|
407
|
+
) -> AsyncContextStreamResponse[DepsT, FormattableT]: ...
|
|
408
|
+
|
|
409
|
+
async def stream(
|
|
410
|
+
self,
|
|
411
|
+
model: Model,
|
|
412
|
+
ctx: Context[DepsT],
|
|
413
|
+
*args: P.args,
|
|
414
|
+
**kwargs: P.kwargs,
|
|
415
|
+
) -> (
|
|
416
|
+
AsyncContextStreamResponse[DepsT, None]
|
|
417
|
+
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
418
|
+
):
|
|
419
|
+
"""Generates a streaming response using the provided model asynchronously."""
|
|
420
|
+
result = await self.fn(ctx, *args, **kwargs)
|
|
421
|
+
messages = _utils.promote_to_messages(result)
|
|
422
|
+
return await model.context_stream_async(
|
|
423
|
+
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
424
|
+
)
|
|
@@ -1,69 +1,44 @@
|
|
|
1
1
|
"""Types for prompt functions."""
|
|
2
2
|
|
|
3
|
-
from
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
from typing import Protocol
|
|
4
5
|
|
|
5
6
|
from ..context import Context, DepsT
|
|
6
7
|
from ..messages import Message, UserContent
|
|
7
8
|
from ..types import P
|
|
8
9
|
|
|
9
|
-
PromptT = TypeVar(
|
|
10
|
-
"PromptT", bound="Prompt | AsyncPrompt | ContextPrompt | AsyncContextPrompt"
|
|
11
|
-
)
|
|
12
|
-
"""Type variable for prompt types.
|
|
13
10
|
|
|
14
|
-
|
|
15
|
-
"""
|
|
11
|
+
class MessageTemplate(Protocol[P]):
|
|
12
|
+
"""Protocol for a prompt function that returns `UserContent` or `Sequence[Message]`.
|
|
16
13
|
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> list[Message]: ...
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
class Promptable(Protocol[P]):
|
|
25
|
-
"""Protocol for a `Promptable` that returns `UserContent` or `list[Message]`.
|
|
26
|
-
|
|
27
|
-
May be be converted by the `prompt` decorator into a `Prompt`.
|
|
14
|
+
A `MessageTemplate` is a raw function that returns prompt content. It can be
|
|
15
|
+
converted by the `llm.prompt` decorator into a `Prompt` (callable with a `Model`),
|
|
16
|
+
or by the `llm.call` decorator into a `Call` (`Prompt` + `Model`).
|
|
28
17
|
"""
|
|
29
18
|
|
|
30
19
|
def __call__(
|
|
31
20
|
self, *args: P.args, **kwargs: P.kwargs
|
|
32
|
-
) -> UserContent |
|
|
33
|
-
|
|
21
|
+
) -> UserContent | Sequence[Message]: ...
|
|
34
22
|
|
|
35
|
-
class AsyncPrompt(Protocol[P]):
|
|
36
|
-
"""Protocol for an `AsyncPrompt`, which returns `list[Message]`."""
|
|
37
23
|
|
|
38
|
-
|
|
24
|
+
class AsyncMessageTemplate(Protocol[P]):
|
|
25
|
+
"""Protocol for an async prompt function that returns `UserContent` or `Sequence[Message]`.
|
|
39
26
|
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
"""Protocol for an `AsyncPromptable` that returns `UserContent` or `list[Message]`.
|
|
43
|
-
|
|
44
|
-
May be converted by the `prompt` decorator into an `AsyncPrompt`.
|
|
27
|
+
An async `MessageTemplate` that can be converted by the `llm.prompt` decorator
|
|
28
|
+
into an `AsyncPrompt`, or by the `llm.call` decorator into an `AsyncCall`.
|
|
45
29
|
"""
|
|
46
30
|
|
|
47
31
|
async def __call__(
|
|
48
32
|
self, *args: P.args, **kwargs: P.kwargs
|
|
49
|
-
) -> UserContent |
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
class ContextPrompt(Protocol[P, DepsT]):
|
|
53
|
-
"""Protocol for a `ContextPrompt`, which returns `list[Message]`."""
|
|
54
|
-
|
|
55
|
-
def __call__(
|
|
56
|
-
self,
|
|
57
|
-
ctx: Context[DepsT],
|
|
58
|
-
*args: P.args,
|
|
59
|
-
**kwargs: P.kwargs,
|
|
60
|
-
) -> list[Message]: ...
|
|
33
|
+
) -> UserContent | Sequence[Message]: ...
|
|
61
34
|
|
|
62
35
|
|
|
63
|
-
class
|
|
64
|
-
"""Protocol for a
|
|
36
|
+
class ContextMessageTemplate(Protocol[P, DepsT]):
|
|
37
|
+
"""Protocol for a context-aware prompt function that returns `UserContent` or `Sequence[Message]`.
|
|
65
38
|
|
|
66
|
-
|
|
39
|
+
A `MessageTemplate` with a first parameter named `'ctx'` of type `Context[DepsT]`.
|
|
40
|
+
Can be converted by the `llm.prompt` decorator into a `ContextPrompt`, or by
|
|
41
|
+
the `llm.call` decorator into a `ContextCall`.
|
|
67
42
|
"""
|
|
68
43
|
|
|
69
44
|
def __call__(
|
|
@@ -71,24 +46,15 @@ class ContextPromptable(Protocol[P, DepsT]):
|
|
|
71
46
|
ctx: Context[DepsT],
|
|
72
47
|
*args: P.args,
|
|
73
48
|
**kwargs: P.kwargs,
|
|
74
|
-
) -> UserContent |
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
class AsyncContextPrompt(Protocol[P, DepsT]):
|
|
78
|
-
"""Protocol for an `AsyncContextPrompt`, which returns `list[Message]`."""
|
|
79
|
-
|
|
80
|
-
async def __call__(
|
|
81
|
-
self,
|
|
82
|
-
ctx: Context[DepsT],
|
|
83
|
-
*args: P.args,
|
|
84
|
-
**kwargs: P.kwargs,
|
|
85
|
-
) -> list[Message]: ...
|
|
49
|
+
) -> UserContent | Sequence[Message]: ...
|
|
86
50
|
|
|
87
51
|
|
|
88
|
-
class
|
|
89
|
-
"""Protocol for an
|
|
52
|
+
class AsyncContextMessageTemplate(Protocol[P, DepsT]):
|
|
53
|
+
"""Protocol for an async context-aware prompt function that returns `UserContent` or `Sequence[Message]`.
|
|
90
54
|
|
|
91
|
-
|
|
55
|
+
An async `MessageTemplate` with a first parameter named `'ctx'` of type `Context[DepsT]`.
|
|
56
|
+
Can be converted by the `llm.prompt` decorator into an `AsyncContextPrompt`, or by
|
|
57
|
+
the `llm.call` decorator into an `AsyncContextCall`.
|
|
92
58
|
"""
|
|
93
59
|
|
|
94
60
|
async def __call__(
|
|
@@ -96,4 +62,4 @@ class AsyncContextPromptable(Protocol[P, DepsT]):
|
|
|
96
62
|
ctx: Context[DepsT],
|
|
97
63
|
*args: P.args,
|
|
98
64
|
**kwargs: P.kwargs,
|
|
99
|
-
) -> UserContent |
|
|
65
|
+
) -> UserContent | Sequence[Message]: ...
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""Interfaces for LLM providers."""
|
|
2
|
+
|
|
3
|
+
from .anthropic import (
|
|
4
|
+
AnthropicModelId,
|
|
5
|
+
AnthropicProvider,
|
|
6
|
+
)
|
|
7
|
+
from .base import BaseProvider, Params, Provider
|
|
8
|
+
from .google import GoogleModelId, GoogleProvider
|
|
9
|
+
from .load_provider import load, load_provider
|
|
10
|
+
from .mlx import MLXModelId, MLXProvider
|
|
11
|
+
from .model_id import ModelId
|
|
12
|
+
from .openai import (
|
|
13
|
+
OpenAIModelId,
|
|
14
|
+
OpenAIProvider,
|
|
15
|
+
)
|
|
16
|
+
from .provider_id import KNOWN_PROVIDER_IDS, ProviderId
|
|
17
|
+
from .provider_registry import get_provider_for_model, register_provider
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"KNOWN_PROVIDER_IDS",
|
|
21
|
+
"AnthropicModelId",
|
|
22
|
+
"AnthropicProvider",
|
|
23
|
+
"BaseProvider",
|
|
24
|
+
"GoogleModelId",
|
|
25
|
+
"GoogleProvider",
|
|
26
|
+
"MLXModelId",
|
|
27
|
+
"MLXProvider",
|
|
28
|
+
"ModelId",
|
|
29
|
+
"OpenAIModelId",
|
|
30
|
+
"OpenAIProvider",
|
|
31
|
+
"Params",
|
|
32
|
+
"Provider",
|
|
33
|
+
"ProviderId",
|
|
34
|
+
"get_provider_for_model",
|
|
35
|
+
"load",
|
|
36
|
+
"load_provider",
|
|
37
|
+
"register_provider",
|
|
38
|
+
]
|
|
@@ -5,12 +5,12 @@ from collections.abc import Callable
|
|
|
5
5
|
|
|
6
6
|
def create_import_error_stub(
|
|
7
7
|
package_name: str, client_name: str
|
|
8
|
-
) -> Callable: # pragma: no cover
|
|
8
|
+
) -> Callable[..., None]: # pragma: no cover
|
|
9
9
|
"""Create a stub that raises ImportError when called.
|
|
10
10
|
|
|
11
11
|
Args:
|
|
12
12
|
package_name: The package/extra name (e.g., "anthropic", "openai", "google")
|
|
13
|
-
client_name: The client name for the error message (e.g., "
|
|
13
|
+
client_name: The client name for the error message (e.g., "AnthropicProvider")
|
|
14
14
|
|
|
15
15
|
Returns:
|
|
16
16
|
A callable that raises `ImportError` with helpful message.
|
|
@@ -26,17 +26,19 @@ def create_import_error_stub(
|
|
|
26
26
|
return _raise_not_installed
|
|
27
27
|
|
|
28
28
|
|
|
29
|
-
def
|
|
30
|
-
|
|
29
|
+
def create_provider_stub(
|
|
30
|
+
package_name: str, provider_name: str
|
|
31
|
+
) -> type: # pragma: no cover
|
|
32
|
+
"""Create a stub provider class that raises ImportError when instantiated.
|
|
31
33
|
|
|
32
34
|
Args:
|
|
33
35
|
package_name: The package/extra name (e.g., "anthropic", "openai", "google")
|
|
34
|
-
|
|
36
|
+
provider_name: The client name for the error message (e.g., "AnthropicProvider")
|
|
35
37
|
|
|
36
38
|
Returns:
|
|
37
39
|
A stub class that raises `ImportError` on instantiation.
|
|
38
40
|
"""
|
|
39
|
-
error_fn = create_import_error_stub(package_name,
|
|
41
|
+
error_fn = create_import_error_stub(package_name, provider_name)
|
|
40
42
|
|
|
41
43
|
class _ClientStub:
|
|
42
44
|
"""Stub client that raises `ImportError` when instantiated."""
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""Anthropic client implementation."""
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from .model_id import AnthropicModelId
|
|
7
|
+
from .provider import AnthropicProvider
|
|
8
|
+
else:
|
|
9
|
+
try:
|
|
10
|
+
from .model_id import AnthropicModelId
|
|
11
|
+
from .provider import AnthropicProvider
|
|
12
|
+
except ImportError: # pragma: no cover
|
|
13
|
+
from .._missing_import_stubs import (
|
|
14
|
+
create_import_error_stub,
|
|
15
|
+
create_provider_stub,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
AnthropicProvider = create_provider_stub("anthropic", "AnthropicProvider")
|
|
19
|
+
AnthropicModelId = str
|
|
20
|
+
|
|
21
|
+
__all__ = [
|
|
22
|
+
"AnthropicModelId",
|
|
23
|
+
"AnthropicProvider",
|
|
24
|
+
]
|
|
@@ -30,7 +30,7 @@ from ....responses import (
|
|
|
30
30
|
RawMessageChunk,
|
|
31
31
|
RawStreamEventChunk,
|
|
32
32
|
)
|
|
33
|
-
from ..
|
|
33
|
+
from ..model_id import AnthropicModelId, model_name
|
|
34
34
|
|
|
35
35
|
ANTHROPIC_FINISH_REASON_MAP = {
|
|
36
36
|
"max_tokens": FinishReason.MAX_TOKENS,
|
|
@@ -65,8 +65,9 @@ def decode_response(
|
|
|
65
65
|
"""Convert Anthropic message to mirascope AssistantMessage."""
|
|
66
66
|
assistant_message = AssistantMessage(
|
|
67
67
|
content=[_decode_assistant_content(part) for part in response.content],
|
|
68
|
-
|
|
68
|
+
provider_id="anthropic",
|
|
69
69
|
model_id=model_id,
|
|
70
|
+
provider_model_name=model_name(model_id),
|
|
70
71
|
raw_message={
|
|
71
72
|
"role": response.role,
|
|
72
73
|
"content": [part.model_dump() for part in response.content],
|
|
@@ -227,7 +228,7 @@ def decode_stream(
|
|
|
227
228
|
"""Returns a ChunkIterator converted from an Anthropic MessageStreamManager"""
|
|
228
229
|
processor = _AnthropicChunkProcessor()
|
|
229
230
|
with anthropic_stream_manager as stream:
|
|
230
|
-
for event in stream._raw_stream:
|
|
231
|
+
for event in stream._raw_stream: # pyright: ignore[reportPrivateUsage]
|
|
231
232
|
yield from processor.process_event(event)
|
|
232
233
|
yield processor.raw_message_chunk()
|
|
233
234
|
|
|
@@ -238,7 +239,7 @@ async def decode_async_stream(
|
|
|
238
239
|
"""Returns an AsyncChunkIterator converted from an Anthropic MessageStreamManager"""
|
|
239
240
|
processor = _AnthropicChunkProcessor()
|
|
240
241
|
async with anthropic_stream_manager as stream:
|
|
241
|
-
async for event in stream._raw_stream:
|
|
242
|
+
async for event in stream._raw_stream: # pyright: ignore[reportPrivateUsage]
|
|
242
243
|
for item in processor.process_event(event):
|
|
243
244
|
yield item
|
|
244
245
|
yield processor.raw_message_chunk()
|