mirascope 2.0.0a1__py3-none-any.whl → 2.0.0a3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +2 -2
- mirascope/api/__init__.py +6 -0
- mirascope/api/_generated/README.md +207 -0
- mirascope/api/_generated/__init__.py +85 -0
- mirascope/api/_generated/client.py +155 -0
- mirascope/api/_generated/core/__init__.py +52 -0
- mirascope/api/_generated/core/api_error.py +23 -0
- mirascope/api/_generated/core/client_wrapper.py +58 -0
- mirascope/api/_generated/core/datetime_utils.py +30 -0
- mirascope/api/_generated/core/file.py +70 -0
- mirascope/api/_generated/core/force_multipart.py +16 -0
- mirascope/api/_generated/core/http_client.py +619 -0
- mirascope/api/_generated/core/http_response.py +55 -0
- mirascope/api/_generated/core/jsonable_encoder.py +102 -0
- mirascope/api/_generated/core/pydantic_utilities.py +310 -0
- mirascope/api/_generated/core/query_encoder.py +60 -0
- mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
- mirascope/api/_generated/core/request_options.py +35 -0
- mirascope/api/_generated/core/serialization.py +282 -0
- mirascope/api/_generated/docs/__init__.py +4 -0
- mirascope/api/_generated/docs/client.py +95 -0
- mirascope/api/_generated/docs/raw_client.py +132 -0
- mirascope/api/_generated/environment.py +9 -0
- mirascope/api/_generated/errors/__init__.py +7 -0
- mirascope/api/_generated/errors/bad_request_error.py +15 -0
- mirascope/api/_generated/health/__init__.py +7 -0
- mirascope/api/_generated/health/client.py +96 -0
- mirascope/api/_generated/health/raw_client.py +129 -0
- mirascope/api/_generated/health/types/__init__.py +8 -0
- mirascope/api/_generated/health/types/health_check_response.py +24 -0
- mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
- mirascope/api/_generated/reference.md +167 -0
- mirascope/api/_generated/traces/__init__.py +55 -0
- mirascope/api/_generated/traces/client.py +162 -0
- mirascope/api/_generated/traces/raw_client.py +168 -0
- mirascope/api/_generated/traces/types/__init__.py +95 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
- mirascope/api/_generated/types/__init__.py +21 -0
- mirascope/api/_generated/types/http_api_decode_error.py +31 -0
- mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
- mirascope/api/_generated/types/issue.py +44 -0
- mirascope/api/_generated/types/issue_tag.py +17 -0
- mirascope/api/_generated/types/property_key.py +7 -0
- mirascope/api/_generated/types/property_key_tag.py +29 -0
- mirascope/api/_generated/types/property_key_tag_tag.py +5 -0
- mirascope/api/client.py +255 -0
- mirascope/api/settings.py +81 -0
- mirascope/llm/__init__.py +41 -11
- mirascope/llm/calls/calls.py +81 -57
- mirascope/llm/calls/decorator.py +121 -115
- mirascope/llm/content/__init__.py +3 -2
- mirascope/llm/context/_utils.py +19 -6
- mirascope/llm/exceptions.py +30 -16
- mirascope/llm/formatting/_utils.py +9 -5
- mirascope/llm/formatting/format.py +2 -2
- mirascope/llm/formatting/from_call_args.py +2 -2
- mirascope/llm/messages/message.py +13 -5
- mirascope/llm/models/__init__.py +2 -2
- mirascope/llm/models/models.py +189 -81
- mirascope/llm/prompts/__init__.py +13 -12
- mirascope/llm/prompts/_utils.py +27 -24
- mirascope/llm/prompts/decorator.py +133 -204
- mirascope/llm/prompts/prompts.py +424 -0
- mirascope/llm/prompts/protocols.py +25 -59
- mirascope/llm/providers/__init__.py +38 -0
- mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
- mirascope/llm/providers/anthropic/__init__.py +24 -0
- mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +5 -4
- mirascope/llm/{clients → providers}/anthropic/_utils/encode.py +31 -10
- mirascope/llm/providers/anthropic/model_id.py +40 -0
- mirascope/llm/{clients/anthropic/clients.py → providers/anthropic/provider.py} +33 -418
- mirascope/llm/{clients → providers}/base/__init__.py +3 -3
- mirascope/llm/{clients → providers}/base/_utils.py +10 -7
- mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
- mirascope/llm/providers/google/__init__.py +21 -0
- mirascope/llm/{clients → providers}/google/_utils/decode.py +6 -4
- mirascope/llm/{clients → providers}/google/_utils/encode.py +30 -24
- mirascope/llm/providers/google/model_id.py +28 -0
- mirascope/llm/providers/google/provider.py +438 -0
- mirascope/llm/providers/load_provider.py +48 -0
- mirascope/llm/providers/mlx/__init__.py +24 -0
- mirascope/llm/providers/mlx/_utils.py +107 -0
- mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
- mirascope/llm/providers/mlx/encoding/base.py +69 -0
- mirascope/llm/providers/mlx/encoding/transformers.py +131 -0
- mirascope/llm/providers/mlx/mlx.py +237 -0
- mirascope/llm/providers/mlx/model_id.py +17 -0
- mirascope/llm/providers/mlx/provider.py +411 -0
- mirascope/llm/providers/model_id.py +16 -0
- mirascope/llm/providers/openai/__init__.py +6 -0
- mirascope/llm/providers/openai/completions/__init__.py +20 -0
- mirascope/llm/{clients/openai/responses → providers/openai/completions}/_utils/__init__.py +2 -0
- mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +5 -3
- mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +33 -23
- mirascope/llm/providers/openai/completions/provider.py +456 -0
- mirascope/llm/providers/openai/model_id.py +31 -0
- mirascope/llm/providers/openai/model_info.py +246 -0
- mirascope/llm/providers/openai/provider.py +386 -0
- mirascope/llm/providers/openai/responses/__init__.py +21 -0
- mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +5 -3
- mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +28 -17
- mirascope/llm/providers/openai/responses/provider.py +470 -0
- mirascope/llm/{clients → providers}/openai/shared/_utils.py +7 -3
- mirascope/llm/providers/provider_id.py +13 -0
- mirascope/llm/providers/provider_registry.py +167 -0
- mirascope/llm/responses/base_response.py +10 -5
- mirascope/llm/responses/base_stream_response.py +10 -5
- mirascope/llm/responses/response.py +24 -13
- mirascope/llm/responses/root_response.py +7 -12
- mirascope/llm/responses/stream_response.py +35 -23
- mirascope/llm/tools/__init__.py +9 -2
- mirascope/llm/tools/_utils.py +12 -3
- mirascope/llm/tools/decorator.py +10 -10
- mirascope/llm/tools/protocols.py +4 -4
- mirascope/llm/tools/tool_schema.py +44 -9
- mirascope/llm/tools/tools.py +12 -11
- mirascope/ops/__init__.py +156 -0
- mirascope/ops/_internal/__init__.py +5 -0
- mirascope/ops/_internal/closure.py +1118 -0
- mirascope/ops/_internal/configuration.py +126 -0
- mirascope/ops/_internal/context.py +76 -0
- mirascope/ops/_internal/exporters/__init__.py +26 -0
- mirascope/ops/_internal/exporters/exporters.py +342 -0
- mirascope/ops/_internal/exporters/processors.py +104 -0
- mirascope/ops/_internal/exporters/types.py +165 -0
- mirascope/ops/_internal/exporters/utils.py +29 -0
- mirascope/ops/_internal/instrumentation/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
- mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
- mirascope/ops/_internal/propagation.py +198 -0
- mirascope/ops/_internal/protocols.py +51 -0
- mirascope/ops/_internal/session.py +139 -0
- mirascope/ops/_internal/spans.py +232 -0
- mirascope/ops/_internal/traced_calls.py +371 -0
- mirascope/ops/_internal/traced_functions.py +394 -0
- mirascope/ops/_internal/tracing.py +276 -0
- mirascope/ops/_internal/types.py +13 -0
- mirascope/ops/_internal/utils.py +75 -0
- mirascope/ops/_internal/versioned_calls.py +512 -0
- mirascope/ops/_internal/versioned_functions.py +346 -0
- mirascope/ops/_internal/versioning.py +303 -0
- mirascope/ops/exceptions.py +21 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/METADATA +77 -1
- mirascope-2.0.0a3.dist-info/RECORD +206 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/WHEEL +1 -1
- mirascope/graphs/__init__.py +0 -22
- mirascope/graphs/finite_state_machine.py +0 -625
- mirascope/llm/agents/__init__.py +0 -15
- mirascope/llm/agents/agent.py +0 -97
- mirascope/llm/agents/agent_template.py +0 -45
- mirascope/llm/agents/decorator.py +0 -176
- mirascope/llm/calls/base_call.py +0 -33
- mirascope/llm/clients/__init__.py +0 -34
- mirascope/llm/clients/anthropic/__init__.py +0 -25
- mirascope/llm/clients/anthropic/model_ids.py +0 -8
- mirascope/llm/clients/google/__init__.py +0 -20
- mirascope/llm/clients/google/clients.py +0 -853
- mirascope/llm/clients/google/model_ids.py +0 -15
- mirascope/llm/clients/openai/__init__.py +0 -25
- mirascope/llm/clients/openai/completions/__init__.py +0 -28
- mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
- mirascope/llm/clients/openai/completions/clients.py +0 -833
- mirascope/llm/clients/openai/completions/model_ids.py +0 -8
- mirascope/llm/clients/openai/responses/__init__.py +0 -26
- mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
- mirascope/llm/clients/openai/responses/clients.py +0 -832
- mirascope/llm/clients/openai/responses/model_ids.py +0 -8
- mirascope/llm/clients/providers.py +0 -175
- mirascope-2.0.0a1.dist-info/RECORD +0 -102
- /mirascope/llm/{clients → providers}/anthropic/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
- /mirascope/llm/{clients → providers}/base/params.py +0 -0
- /mirascope/llm/{clients → providers}/google/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/google/message.py +0 -0
- /mirascope/llm/{clients/openai/completions → providers/openai/responses}/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/openai/shared/__init__.py +0 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/licenses/LICENSE +0 -0
mirascope/llm/calls/decorator.py
CHANGED
|
@@ -4,29 +4,24 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
from collections.abc import Sequence
|
|
6
6
|
from dataclasses import dataclass
|
|
7
|
-
from typing import Generic,
|
|
7
|
+
from typing import Generic, cast, overload
|
|
8
8
|
from typing_extensions import Unpack
|
|
9
9
|
|
|
10
|
-
from ..clients import (
|
|
11
|
-
AnthropicModelId,
|
|
12
|
-
GoogleModelId,
|
|
13
|
-
ModelId,
|
|
14
|
-
OpenAICompletionsModelId,
|
|
15
|
-
OpenAIResponsesModelId,
|
|
16
|
-
Params,
|
|
17
|
-
Provider,
|
|
18
|
-
)
|
|
19
10
|
from ..context import DepsT
|
|
20
11
|
from ..formatting import Format, FormattableT
|
|
21
12
|
from ..models import Model
|
|
22
13
|
from ..prompts import (
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
14
|
+
AsyncContextMessageTemplate,
|
|
15
|
+
AsyncContextPrompt,
|
|
16
|
+
AsyncMessageTemplate,
|
|
17
|
+
AsyncPrompt,
|
|
18
|
+
ContextMessageTemplate,
|
|
19
|
+
ContextPrompt,
|
|
20
|
+
MessageTemplate,
|
|
21
|
+
Prompt,
|
|
22
|
+
_utils,
|
|
29
23
|
)
|
|
24
|
+
from ..providers import ModelId, Params
|
|
30
25
|
from ..tools import (
|
|
31
26
|
AsyncContextTool,
|
|
32
27
|
AsyncContextToolkit,
|
|
@@ -44,16 +39,32 @@ from .calls import AsyncCall, AsyncContextCall, Call, ContextCall
|
|
|
44
39
|
|
|
45
40
|
@dataclass(kw_only=True)
|
|
46
41
|
class CallDecorator(Generic[ToolT, FormattableT]):
|
|
47
|
-
"""
|
|
42
|
+
"""Decorator for converting a `MessageTemplate` into a `Call`.
|
|
43
|
+
|
|
44
|
+
Takes a raw prompt function that returns message content and wraps it with tools,
|
|
45
|
+
format, and a model to create a `Call` that can be invoked directly without needing
|
|
46
|
+
to pass a model argument.
|
|
47
|
+
|
|
48
|
+
The decorator automatically detects whether the function is async or context-aware
|
|
49
|
+
and creates the appropriate `Call` variant (`Call`, `AsyncCall`, `ContextCall`, or `AsyncContextCall`).
|
|
50
|
+
|
|
51
|
+
Conceptually: `CallDecorator` = `PromptDecorator` + `Model`
|
|
52
|
+
Result: `Call` = `MessageTemplate` + tools + format + `Model`
|
|
53
|
+
"""
|
|
48
54
|
|
|
49
55
|
model: Model
|
|
56
|
+
"""The default model to use with this call. May be overridden."""
|
|
57
|
+
|
|
50
58
|
tools: Sequence[ToolT] | None
|
|
59
|
+
"""The tools that are included in the prompt, if any."""
|
|
60
|
+
|
|
51
61
|
format: type[FormattableT] | Format[FormattableT] | None
|
|
62
|
+
"""The structured output format off the prompt, if any."""
|
|
52
63
|
|
|
53
64
|
@overload
|
|
54
65
|
def __call__(
|
|
55
66
|
self: CallDecorator[AsyncTool | AsyncContextTool[DepsT], FormattableT],
|
|
56
|
-
fn:
|
|
67
|
+
fn: AsyncContextMessageTemplate[P, DepsT],
|
|
57
68
|
) -> AsyncContextCall[P, DepsT, FormattableT]:
|
|
58
69
|
"""Decorate an async context prompt into an AsyncContextCall."""
|
|
59
70
|
...
|
|
@@ -61,31 +72,31 @@ class CallDecorator(Generic[ToolT, FormattableT]):
|
|
|
61
72
|
@overload
|
|
62
73
|
def __call__(
|
|
63
74
|
self: CallDecorator[Tool | ContextTool[DepsT], FormattableT],
|
|
64
|
-
fn:
|
|
75
|
+
fn: ContextMessageTemplate[P, DepsT],
|
|
65
76
|
) -> ContextCall[P, DepsT, FormattableT]:
|
|
66
77
|
"""Decorate a context prompt into a ContextCall."""
|
|
67
78
|
...
|
|
68
79
|
|
|
69
80
|
@overload
|
|
70
81
|
def __call__(
|
|
71
|
-
self: CallDecorator[AsyncTool, FormattableT], fn:
|
|
82
|
+
self: CallDecorator[AsyncTool, FormattableT], fn: AsyncMessageTemplate[P]
|
|
72
83
|
) -> AsyncCall[P, FormattableT]:
|
|
73
84
|
"""Decorate an async prompt into an AsyncCall."""
|
|
74
85
|
...
|
|
75
86
|
|
|
76
87
|
@overload
|
|
77
88
|
def __call__(
|
|
78
|
-
self: CallDecorator[Tool, FormattableT], fn:
|
|
89
|
+
self: CallDecorator[Tool, FormattableT], fn: MessageTemplate[P]
|
|
79
90
|
) -> Call[P, FormattableT]:
|
|
80
91
|
"""Decorate a prompt into a Call."""
|
|
81
92
|
...
|
|
82
93
|
|
|
83
94
|
def __call__(
|
|
84
95
|
self,
|
|
85
|
-
fn:
|
|
86
|
-
|
|
|
87
|
-
|
|
|
88
|
-
|
|
|
96
|
+
fn: ContextMessageTemplate[P, DepsT]
|
|
97
|
+
| AsyncContextMessageTemplate[P, DepsT]
|
|
98
|
+
| MessageTemplate[P]
|
|
99
|
+
| AsyncMessageTemplate[P],
|
|
89
100
|
) -> (
|
|
90
101
|
ContextCall[P, DepsT, FormattableT]
|
|
91
102
|
| AsyncContextCall[P, DepsT, FormattableT]
|
|
@@ -93,123 +104,122 @@ class CallDecorator(Generic[ToolT, FormattableT]):
|
|
|
93
104
|
| AsyncCall[P, FormattableT]
|
|
94
105
|
):
|
|
95
106
|
"""Decorates a prompt into a Call or ContextCall."""
|
|
96
|
-
is_context =
|
|
97
|
-
is_async =
|
|
107
|
+
is_context = _utils.is_context_promptable(fn)
|
|
108
|
+
is_async = _utils.is_async_promptable(fn)
|
|
98
109
|
|
|
99
110
|
if is_context and is_async:
|
|
100
111
|
tools = cast(
|
|
101
112
|
Sequence[AsyncTool | AsyncContextTool[DepsT]] | None, self.tools
|
|
102
113
|
)
|
|
114
|
+
prompt = AsyncContextPrompt(
|
|
115
|
+
fn=fn,
|
|
116
|
+
toolkit=AsyncContextToolkit(tools=tools),
|
|
117
|
+
format=self.format,
|
|
118
|
+
)
|
|
103
119
|
return AsyncContextCall(
|
|
104
|
-
|
|
120
|
+
prompt=prompt,
|
|
105
121
|
default_model=self.model,
|
|
106
|
-
format=self.format,
|
|
107
|
-
toolkit=AsyncContextToolkit(tools=tools),
|
|
108
122
|
)
|
|
109
123
|
elif is_context:
|
|
110
124
|
tools = cast(Sequence[Tool | ContextTool[DepsT]] | None, self.tools)
|
|
125
|
+
prompt = ContextPrompt(
|
|
126
|
+
fn=fn,
|
|
127
|
+
toolkit=ContextToolkit(tools=tools),
|
|
128
|
+
format=self.format,
|
|
129
|
+
)
|
|
111
130
|
return ContextCall(
|
|
112
|
-
|
|
131
|
+
prompt=prompt,
|
|
113
132
|
default_model=self.model,
|
|
114
|
-
format=self.format,
|
|
115
|
-
toolkit=ContextToolkit(tools=tools),
|
|
116
133
|
)
|
|
117
134
|
elif is_async:
|
|
118
135
|
tools = cast(Sequence[AsyncTool] | None, self.tools)
|
|
136
|
+
prompt = AsyncPrompt(
|
|
137
|
+
fn=fn, toolkit=AsyncToolkit(tools=tools), format=self.format
|
|
138
|
+
)
|
|
119
139
|
return AsyncCall(
|
|
120
|
-
|
|
140
|
+
prompt=prompt,
|
|
121
141
|
default_model=self.model,
|
|
122
|
-
format=self.format,
|
|
123
|
-
toolkit=AsyncToolkit(tools=tools),
|
|
124
142
|
)
|
|
125
143
|
else:
|
|
126
144
|
tools = cast(Sequence[Tool] | None, self.tools)
|
|
145
|
+
prompt = Prompt(fn=fn, toolkit=Toolkit(tools=tools), format=self.format)
|
|
127
146
|
return Call(
|
|
128
|
-
|
|
147
|
+
prompt=prompt,
|
|
129
148
|
default_model=self.model,
|
|
130
|
-
format=self.format,
|
|
131
|
-
toolkit=Toolkit(tools=tools),
|
|
132
149
|
)
|
|
133
150
|
|
|
134
151
|
|
|
135
152
|
@overload
|
|
136
153
|
def call(
|
|
154
|
+
model: ModelId,
|
|
137
155
|
*,
|
|
138
|
-
|
|
139
|
-
model_id: AnthropicModelId,
|
|
140
|
-
tools: list[ToolT] | None = None,
|
|
156
|
+
tools: Sequence[ToolT] | None = None,
|
|
141
157
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
142
158
|
**params: Unpack[Params],
|
|
143
159
|
) -> CallDecorator[ToolT, FormattableT]:
|
|
144
|
-
"""
|
|
145
|
-
...
|
|
146
|
-
|
|
160
|
+
"""Decorator for converting prompt functions into LLM calls.
|
|
147
161
|
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
*,
|
|
151
|
-
provider: Literal["google"],
|
|
152
|
-
model_id: GoogleModelId,
|
|
153
|
-
tools: list[ToolT] | None = None,
|
|
154
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
155
|
-
**params: Unpack[Params],
|
|
156
|
-
) -> CallDecorator[ToolT, FormattableT]:
|
|
157
|
-
"""Decorate a prompt into a Call using Google models."""
|
|
162
|
+
This overload accepts a model ID string and allows additional params.
|
|
163
|
+
"""
|
|
158
164
|
...
|
|
159
165
|
|
|
160
166
|
|
|
161
167
|
@overload
|
|
162
168
|
def call(
|
|
169
|
+
model: Model,
|
|
163
170
|
*,
|
|
164
|
-
|
|
165
|
-
model_id: OpenAICompletionsModelId,
|
|
166
|
-
tools: list[ToolT] | None = None,
|
|
171
|
+
tools: Sequence[ToolT] | None = None,
|
|
167
172
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
168
|
-
**params: Unpack[Params],
|
|
169
173
|
) -> CallDecorator[ToolT, FormattableT]:
|
|
170
|
-
"""
|
|
171
|
-
...
|
|
172
|
-
|
|
174
|
+
"""Decorator for converting prompt functions into LLM calls.
|
|
173
175
|
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
*,
|
|
177
|
-
provider: Literal["openai:responses", "openai"],
|
|
178
|
-
model_id: OpenAIResponsesModelId,
|
|
179
|
-
tools: list[ToolT] | None = None,
|
|
180
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
181
|
-
**params: Unpack[Params],
|
|
182
|
-
) -> CallDecorator[ToolT, FormattableT]:
|
|
183
|
-
"""Decorate a prompt into a Call using OpenAI models (Responses API)."""
|
|
184
|
-
...
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
@overload
|
|
188
|
-
def call(
|
|
189
|
-
*,
|
|
190
|
-
provider: Provider,
|
|
191
|
-
model_id: ModelId,
|
|
192
|
-
tools: list[ToolT] | None = None,
|
|
193
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
194
|
-
**params: Unpack[Params],
|
|
195
|
-
) -> CallDecorator[ToolT, FormattableT]:
|
|
196
|
-
"""Decorate a prompt into a Call using a generic provider and model."""
|
|
176
|
+
This overload accepts a Model instance and does not allow additional params.
|
|
177
|
+
"""
|
|
197
178
|
...
|
|
198
179
|
|
|
199
180
|
|
|
200
181
|
def call(
|
|
182
|
+
model: ModelId | Model,
|
|
201
183
|
*,
|
|
202
|
-
|
|
203
|
-
model_id: ModelId,
|
|
204
|
-
tools: list[ToolT] | None = None,
|
|
184
|
+
tools: Sequence[ToolT] | None = None,
|
|
205
185
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
206
186
|
**params: Unpack[Params],
|
|
207
187
|
) -> CallDecorator[ToolT, FormattableT]:
|
|
208
|
-
"""
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
188
|
+
"""Decorates a `MessageTemplate` to create a `Call` that can be invoked directly.
|
|
189
|
+
|
|
190
|
+
The `llm.call` decorator is the most convenient way to use Mirascope. It transforms
|
|
191
|
+
a raw prompt function (that returns message content) into a `Call` object that bundles
|
|
192
|
+
the function with tools, format, and a model. The resulting `Call` can be invoked
|
|
193
|
+
directly to generate LLM responses without needing to pass a model argument.
|
|
194
|
+
|
|
195
|
+
The decorator automatically detects the function type:
|
|
196
|
+
- If the first parameter is named `'ctx'` with type `llm.Context[T]` (or a subclass thereof),
|
|
197
|
+
creates a `ContextCall`
|
|
198
|
+
- If the function is async, creates an `AsyncCall` or `AsyncContextCall`
|
|
199
|
+
- Otherwise, creates a regular `Call`
|
|
200
|
+
|
|
201
|
+
The model specified in the decorator can be overridden at runtime using the
|
|
202
|
+
`llm.model()` context manager. When overridden, the context model completely
|
|
203
|
+
replaces the decorated model, including all parameters.
|
|
204
|
+
|
|
205
|
+
Conceptual flow:
|
|
206
|
+
- `MessageTemplate`: raw function returning content
|
|
207
|
+
- `@llm.prompt`: `MessageTemplate` → `Prompt`
|
|
208
|
+
Includes tools and format, if applicable. Can be called by providing a `Model`.
|
|
209
|
+
- `@llm.call`: `MessageTemplate` → `Call`. Includes a model, tools, and format. The
|
|
210
|
+
model may be created on the fly from a model identifier and optional params, or
|
|
211
|
+
provided outright.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
model: A model ID string (e.g., "openai/gpt-4") or a `Model` instance
|
|
215
|
+
tools: Optional `Sequence` of tools to make available to the LLM
|
|
216
|
+
format: Optional response format class (`BaseModel`) or Format instance
|
|
217
|
+
**params: Additional call parameters (temperature, max_tokens, etc.)
|
|
218
|
+
Only available when passing a model ID string
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
A `CallDecorator` that converts prompt functions into `Call` variants
|
|
222
|
+
(`Call`, `AsyncCall`, `ContextCall`, or `AsyncContextCall`)
|
|
213
223
|
|
|
214
224
|
Example:
|
|
215
225
|
|
|
@@ -217,15 +227,12 @@ def call(
|
|
|
217
227
|
```python
|
|
218
228
|
from mirascope import llm
|
|
219
229
|
|
|
220
|
-
@llm.call(
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
)
|
|
224
|
-
def answer_question(question: str) -> str:
|
|
225
|
-
return f"Answer this question: {question}"
|
|
230
|
+
@llm.call("openai/gpt-4")
|
|
231
|
+
def recommend_book(genre: str):
|
|
232
|
+
return f"Please recommend a book in {genre}."
|
|
226
233
|
|
|
227
|
-
response: llm.Response =
|
|
228
|
-
print(response)
|
|
234
|
+
response: llm.Response = recommend_book("fantasy")
|
|
235
|
+
print(response.pretty())
|
|
229
236
|
```
|
|
230
237
|
|
|
231
238
|
Example:
|
|
@@ -236,20 +243,19 @@ def call(
|
|
|
236
243
|
from mirascope import llm
|
|
237
244
|
|
|
238
245
|
@dataclass
|
|
239
|
-
class
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
response = answer_question(ctx, "What is the capital of France?")
|
|
251
|
-
print(response)
|
|
246
|
+
class User:
|
|
247
|
+
name: str
|
|
248
|
+
age: int
|
|
249
|
+
|
|
250
|
+
@llm.call("openai/gpt-4")
|
|
251
|
+
def recommend_book(ctx: llm.Context[User], genre: str):
|
|
252
|
+
return f"Recommend a {genre} book for {ctx.deps.name}, age {ctx.deps.age}."
|
|
253
|
+
|
|
254
|
+
ctx = llm.Context(deps=User(name="Alice", age=15))
|
|
255
|
+
response = recommend_book(ctx, "fantasy")
|
|
256
|
+
print(response.pretty())
|
|
252
257
|
```
|
|
253
258
|
"""
|
|
254
|
-
|
|
259
|
+
if isinstance(model, str):
|
|
260
|
+
model = Model(model, **params)
|
|
255
261
|
return CallDecorator(model=model, tools=tools, format=format)
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from typing import TypeAlias
|
|
4
4
|
|
|
5
|
+
from ..types import Jsonable
|
|
5
6
|
from .audio import Audio, Base64AudioSource
|
|
6
7
|
from .document import (
|
|
7
8
|
Base64DocumentSource,
|
|
@@ -16,11 +17,11 @@ from .tool_call import ToolCall, ToolCallChunk, ToolCallEndChunk, ToolCallStartC
|
|
|
16
17
|
from .tool_output import ToolOutput
|
|
17
18
|
|
|
18
19
|
ContentPart: TypeAlias = (
|
|
19
|
-
Text | Image | Audio | Document | ToolOutput | ToolCall | Thought
|
|
20
|
+
Text | Image | Audio | Document | ToolOutput[Jsonable] | ToolCall | Thought
|
|
20
21
|
)
|
|
21
22
|
"""Content parts that may be included in a Message."""
|
|
22
23
|
|
|
23
|
-
UserContentPart: TypeAlias = Text | Image | Audio | Document | ToolOutput
|
|
24
|
+
UserContentPart: TypeAlias = Text | Image | Audio | Document | ToolOutput[Jsonable]
|
|
24
25
|
"""Content parts that can be included in a UserMessage."""
|
|
25
26
|
|
|
26
27
|
AssistantContentPart: TypeAlias = Text | ToolCall | Thought
|
mirascope/llm/context/_utils.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
import inspect
|
|
2
|
+
import typing
|
|
2
3
|
from collections.abc import Callable
|
|
3
|
-
from typing import get_origin
|
|
4
|
+
from typing import Any, get_origin
|
|
4
5
|
|
|
5
6
|
from .context import Context
|
|
6
7
|
|
|
7
8
|
|
|
8
|
-
def first_param_is_context(fn: Callable) -> bool:
|
|
9
|
+
def first_param_is_context(fn: Callable[..., Any]) -> bool:
|
|
9
10
|
"""Returns whether the first argument to a function is `ctx: Context`.
|
|
10
11
|
|
|
11
12
|
Also returns true if the first argument is a subclass of `Context`.
|
|
@@ -21,8 +22,20 @@ def first_param_is_context(fn: Callable) -> bool:
|
|
|
21
22
|
else:
|
|
22
23
|
first_param = params[0]
|
|
23
24
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
25
|
+
if first_param.name != "ctx":
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
hints = typing.get_type_hints(fn)
|
|
30
|
+
annotation = hints.get(first_param.name)
|
|
31
|
+
except (NameError, AttributeError, TypeError):
|
|
32
|
+
annotation = first_param.annotation
|
|
33
|
+
|
|
34
|
+
if annotation is None or annotation is inspect.Parameter.empty:
|
|
35
|
+
return False
|
|
36
|
+
|
|
37
|
+
type_is_context = get_origin(annotation) is Context
|
|
38
|
+
subclass_of_context = isinstance(annotation, type) and issubclass(
|
|
39
|
+
annotation, Context
|
|
27
40
|
)
|
|
28
|
-
return
|
|
41
|
+
return type_is_context or subclass_of_context
|
mirascope/llm/exceptions.py
CHANGED
|
@@ -1,25 +1,25 @@
|
|
|
1
|
-
"""Mirascope exception hierarchy for unified error handling across providers."""
|
|
1
|
+
"""Mirascope llm exception hierarchy for unified error handling across providers."""
|
|
2
2
|
|
|
3
3
|
from typing import TYPE_CHECKING
|
|
4
4
|
|
|
5
5
|
if TYPE_CHECKING:
|
|
6
|
-
from .clients import ModelId, Provider
|
|
7
6
|
from .formatting import FormattingMode
|
|
7
|
+
from .providers import ModelId, ProviderId
|
|
8
8
|
|
|
9
9
|
|
|
10
|
-
class
|
|
11
|
-
"""Base exception for all Mirascope errors."""
|
|
10
|
+
class MirascopeLLMError(Exception):
|
|
11
|
+
"""Base exception for all Mirascope LLM errors."""
|
|
12
12
|
|
|
13
13
|
original_exception: Exception | None
|
|
14
14
|
|
|
15
15
|
|
|
16
|
-
class APIError(
|
|
16
|
+
class APIError(MirascopeLLMError):
|
|
17
17
|
"""Base class for API-related errors."""
|
|
18
18
|
|
|
19
19
|
status_code: int | None
|
|
20
20
|
|
|
21
21
|
|
|
22
|
-
class ConnectionError(
|
|
22
|
+
class ConnectionError(MirascopeLLMError):
|
|
23
23
|
"""Raised when unable to connect to the API (network issues, timeouts)."""
|
|
24
24
|
|
|
25
25
|
|
|
@@ -39,33 +39,33 @@ class NotFoundError(APIError):
|
|
|
39
39
|
"""Raised when requested resource is not found (404)."""
|
|
40
40
|
|
|
41
41
|
|
|
42
|
-
class ToolNotFoundError(
|
|
42
|
+
class ToolNotFoundError(MirascopeLLMError):
|
|
43
43
|
"""Raised if a tool_call cannot be converted to any corresponding tool."""
|
|
44
44
|
|
|
45
45
|
|
|
46
|
-
class FeatureNotSupportedError(
|
|
46
|
+
class FeatureNotSupportedError(MirascopeLLMError):
|
|
47
47
|
"""Raised if a Mirascope feature is unsupported by chosen provider.
|
|
48
48
|
|
|
49
49
|
If compatibility is model-specific, then `model_id` should be specified.
|
|
50
50
|
If the feature is not supported by the provider at all, then it may be `None`."""
|
|
51
51
|
|
|
52
|
-
|
|
52
|
+
provider_id: "ProviderId"
|
|
53
53
|
model_id: "ModelId | None"
|
|
54
54
|
feature: str
|
|
55
55
|
|
|
56
56
|
def __init__(
|
|
57
57
|
self,
|
|
58
58
|
feature: str,
|
|
59
|
-
|
|
59
|
+
provider_id: "ProviderId",
|
|
60
60
|
model_id: "ModelId | None" = None,
|
|
61
61
|
message: str | None = None,
|
|
62
62
|
) -> None:
|
|
63
63
|
if message is None:
|
|
64
64
|
model_msg = f" for model '{model_id}'" if model_id is not None else ""
|
|
65
|
-
message = f"Feature '{feature}' is not supported by provider '{
|
|
65
|
+
message = f"Feature '{feature}' is not supported by provider '{provider_id}'{model_msg}"
|
|
66
66
|
super().__init__(message)
|
|
67
67
|
self.feature = feature
|
|
68
|
-
self.
|
|
68
|
+
self.provider_id = provider_id
|
|
69
69
|
self.model_id = model_id
|
|
70
70
|
|
|
71
71
|
|
|
@@ -77,16 +77,16 @@ class FormattingModeNotSupportedError(FeatureNotSupportedError):
|
|
|
77
77
|
def __init__(
|
|
78
78
|
self,
|
|
79
79
|
formatting_mode: "FormattingMode",
|
|
80
|
-
|
|
80
|
+
provider_id: "ProviderId",
|
|
81
81
|
model_id: "ModelId | None" = None,
|
|
82
82
|
message: str | None = None,
|
|
83
83
|
) -> None:
|
|
84
84
|
if message is None:
|
|
85
85
|
model_msg = f" for model '{model_id}'" if model_id is not None else ""
|
|
86
|
-
message = f"Formatting mode '{formatting_mode}' is not supported by provider '{
|
|
86
|
+
message = f"Formatting mode '{formatting_mode}' is not supported by provider '{provider_id}'{model_msg}"
|
|
87
87
|
super().__init__(
|
|
88
88
|
feature=f"formatting_mode:{formatting_mode}",
|
|
89
|
-
|
|
89
|
+
provider_id=provider_id,
|
|
90
90
|
model_id=model_id,
|
|
91
91
|
message=message,
|
|
92
92
|
)
|
|
@@ -101,5 +101,19 @@ class ServerError(APIError):
|
|
|
101
101
|
"""Raised for server-side errors (500+)."""
|
|
102
102
|
|
|
103
103
|
|
|
104
|
-
class TimeoutError(
|
|
104
|
+
class TimeoutError(MirascopeLLMError):
|
|
105
105
|
"""Raised when requests timeout or deadline exceeded."""
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class NoRegisteredProviderError(MirascopeLLMError):
|
|
109
|
+
"""Raised when no provider is registered for a given model_id."""
|
|
110
|
+
|
|
111
|
+
model_id: str
|
|
112
|
+
|
|
113
|
+
def __init__(self, model_id: str) -> None:
|
|
114
|
+
message = (
|
|
115
|
+
f"No provider registered for model '{model_id}'. "
|
|
116
|
+
f"Use llm.register_provider() to register a provider for this model."
|
|
117
|
+
)
|
|
118
|
+
super().__init__(message)
|
|
119
|
+
self.model_id = model_id
|
|
@@ -2,8 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
import inspect
|
|
4
4
|
import json
|
|
5
|
+
from typing import Any, cast
|
|
5
6
|
|
|
6
|
-
from ..tools import FORMAT_TOOL_NAME, ToolParameterSchema, ToolSchema
|
|
7
|
+
from ..tools import FORMAT_TOOL_NAME, ToolFn, ToolParameterSchema, ToolSchema
|
|
7
8
|
from .types import Format, FormattableT, FormattingMode
|
|
8
9
|
|
|
9
10
|
TOOL_MODE_INSTRUCTIONS = f"""Always respond to the user's query using the {FORMAT_TOOL_NAME} tool for structured output."""
|
|
@@ -27,7 +28,9 @@ def default_formatting_instructions(
|
|
|
27
28
|
return inspect.cleandoc(instructions)
|
|
28
29
|
|
|
29
30
|
|
|
30
|
-
def create_tool_schema(
|
|
31
|
+
def create_tool_schema(
|
|
32
|
+
format: Format[FormattableT],
|
|
33
|
+
) -> ToolSchema[ToolFn[..., None]]:
|
|
31
34
|
"""Convert a `Format` to a `ToolSchema` for format parsing.
|
|
32
35
|
|
|
33
36
|
Args:
|
|
@@ -37,13 +40,14 @@ def create_tool_schema(format: Format[FormattableT]) -> ToolSchema:
|
|
|
37
40
|
`ToolSchema` for the format tool
|
|
38
41
|
"""
|
|
39
42
|
|
|
40
|
-
schema_dict = format.schema.copy()
|
|
43
|
+
schema_dict: dict[str, Any] = format.schema.copy()
|
|
41
44
|
schema_dict["type"] = "object"
|
|
42
45
|
|
|
43
46
|
properties = schema_dict.get("properties")
|
|
44
47
|
if not properties or not isinstance(properties, dict):
|
|
45
48
|
properties = {} # pragma: no cover
|
|
46
|
-
|
|
49
|
+
properties = cast(dict[str, Any], properties)
|
|
50
|
+
required: list[str] = list(properties.keys())
|
|
47
51
|
|
|
48
52
|
description = (
|
|
49
53
|
f"Use this tool to extract data in {format.name} format for a final response."
|
|
@@ -64,7 +68,7 @@ def create_tool_schema(format: Format[FormattableT]) -> ToolSchema:
|
|
|
64
68
|
"Format tool function should not be called."
|
|
65
69
|
) # pragma: no cover
|
|
66
70
|
|
|
67
|
-
tool_schema = ToolSchema.__new__(ToolSchema)
|
|
71
|
+
tool_schema = cast(ToolSchema[ToolFn[..., None]], ToolSchema.__new__(ToolSchema))
|
|
68
72
|
tool_schema.fn = _unused_format_fn
|
|
69
73
|
tool_schema.name = FORMAT_TOOL_NAME
|
|
70
74
|
tool_schema.description = description
|
|
@@ -10,7 +10,7 @@ from ..content import AssistantContentPart, Text, UserContentPart
|
|
|
10
10
|
from ..types import Jsonable
|
|
11
11
|
|
|
12
12
|
if TYPE_CHECKING:
|
|
13
|
-
from ..
|
|
13
|
+
from ..providers import ModelId, ProviderId
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
@dataclass(kw_only=True)
|
|
@@ -51,12 +51,15 @@ class AssistantMessage:
|
|
|
51
51
|
name: str | None = None
|
|
52
52
|
"""A name identifying the creator of this message."""
|
|
53
53
|
|
|
54
|
-
|
|
54
|
+
provider_id: ProviderId | None
|
|
55
55
|
"""The LLM provider that generated this assistant message, if available."""
|
|
56
56
|
|
|
57
57
|
model_id: ModelId | None
|
|
58
58
|
"""The model identifier of the LLM that generated this assistant message, if available."""
|
|
59
59
|
|
|
60
|
+
provider_model_name: str | None
|
|
61
|
+
"""The provider-specific model identifier (e.g. "gpt-5:responses"), if available."""
|
|
62
|
+
|
|
60
63
|
raw_message: Jsonable | None
|
|
61
64
|
"""The provider-specific raw representation of this assistant message, if available.
|
|
62
65
|
|
|
@@ -149,8 +152,9 @@ def user(
|
|
|
149
152
|
def assistant(
|
|
150
153
|
content: AssistantContent,
|
|
151
154
|
*,
|
|
152
|
-
provider: Provider | None,
|
|
153
155
|
model_id: ModelId | None,
|
|
156
|
+
provider_id: ProviderId | None,
|
|
157
|
+
provider_model_name: str | None = None,
|
|
154
158
|
raw_message: Jsonable | None = None,
|
|
155
159
|
name: str | None = None,
|
|
156
160
|
) -> AssistantMessage:
|
|
@@ -159,8 +163,10 @@ def assistant(
|
|
|
159
163
|
Args:
|
|
160
164
|
content: The content of the message, which can be `str` or any `AssistantContent`,
|
|
161
165
|
or a sequence of assistant content pieces.
|
|
162
|
-
provider: Optional identifier of the provider that produced this message.
|
|
163
166
|
model_id: Optional id of the model that produced this message.
|
|
167
|
+
provider_id: Optional identifier of the provider that produced this message.
|
|
168
|
+
provider_model_name: Optional provider-specific model name. May include
|
|
169
|
+
provider-specific additional info (like api mode in "gpt-5:responses").
|
|
164
170
|
raw_message: Optional Jsonable object that contains the provider-specific
|
|
165
171
|
"raw" data representation of the content for this assistant message.
|
|
166
172
|
name: Optional name to identify a specific assistant in multi-party conversations.
|
|
@@ -168,6 +174,7 @@ def assistant(
|
|
|
168
174
|
Returns:
|
|
169
175
|
An `AssistantMessage`.
|
|
170
176
|
"""
|
|
177
|
+
|
|
171
178
|
if isinstance(content, str) or not isinstance(content, Sequence):
|
|
172
179
|
content = [content]
|
|
173
180
|
promoted_content = [
|
|
@@ -175,8 +182,9 @@ def assistant(
|
|
|
175
182
|
]
|
|
176
183
|
return AssistantMessage(
|
|
177
184
|
content=promoted_content,
|
|
178
|
-
|
|
185
|
+
provider_id=provider_id,
|
|
179
186
|
model_id=model_id,
|
|
187
|
+
provider_model_name=provider_model_name,
|
|
180
188
|
raw_message=raw_message,
|
|
181
189
|
name=name,
|
|
182
190
|
)
|