mirascope 2.0.0a1__py3-none-any.whl → 2.0.0a3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +2 -2
- mirascope/api/__init__.py +6 -0
- mirascope/api/_generated/README.md +207 -0
- mirascope/api/_generated/__init__.py +85 -0
- mirascope/api/_generated/client.py +155 -0
- mirascope/api/_generated/core/__init__.py +52 -0
- mirascope/api/_generated/core/api_error.py +23 -0
- mirascope/api/_generated/core/client_wrapper.py +58 -0
- mirascope/api/_generated/core/datetime_utils.py +30 -0
- mirascope/api/_generated/core/file.py +70 -0
- mirascope/api/_generated/core/force_multipart.py +16 -0
- mirascope/api/_generated/core/http_client.py +619 -0
- mirascope/api/_generated/core/http_response.py +55 -0
- mirascope/api/_generated/core/jsonable_encoder.py +102 -0
- mirascope/api/_generated/core/pydantic_utilities.py +310 -0
- mirascope/api/_generated/core/query_encoder.py +60 -0
- mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
- mirascope/api/_generated/core/request_options.py +35 -0
- mirascope/api/_generated/core/serialization.py +282 -0
- mirascope/api/_generated/docs/__init__.py +4 -0
- mirascope/api/_generated/docs/client.py +95 -0
- mirascope/api/_generated/docs/raw_client.py +132 -0
- mirascope/api/_generated/environment.py +9 -0
- mirascope/api/_generated/errors/__init__.py +7 -0
- mirascope/api/_generated/errors/bad_request_error.py +15 -0
- mirascope/api/_generated/health/__init__.py +7 -0
- mirascope/api/_generated/health/client.py +96 -0
- mirascope/api/_generated/health/raw_client.py +129 -0
- mirascope/api/_generated/health/types/__init__.py +8 -0
- mirascope/api/_generated/health/types/health_check_response.py +24 -0
- mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
- mirascope/api/_generated/reference.md +167 -0
- mirascope/api/_generated/traces/__init__.py +55 -0
- mirascope/api/_generated/traces/client.py +162 -0
- mirascope/api/_generated/traces/raw_client.py +168 -0
- mirascope/api/_generated/traces/types/__init__.py +95 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
- mirascope/api/_generated/types/__init__.py +21 -0
- mirascope/api/_generated/types/http_api_decode_error.py +31 -0
- mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
- mirascope/api/_generated/types/issue.py +44 -0
- mirascope/api/_generated/types/issue_tag.py +17 -0
- mirascope/api/_generated/types/property_key.py +7 -0
- mirascope/api/_generated/types/property_key_tag.py +29 -0
- mirascope/api/_generated/types/property_key_tag_tag.py +5 -0
- mirascope/api/client.py +255 -0
- mirascope/api/settings.py +81 -0
- mirascope/llm/__init__.py +41 -11
- mirascope/llm/calls/calls.py +81 -57
- mirascope/llm/calls/decorator.py +121 -115
- mirascope/llm/content/__init__.py +3 -2
- mirascope/llm/context/_utils.py +19 -6
- mirascope/llm/exceptions.py +30 -16
- mirascope/llm/formatting/_utils.py +9 -5
- mirascope/llm/formatting/format.py +2 -2
- mirascope/llm/formatting/from_call_args.py +2 -2
- mirascope/llm/messages/message.py +13 -5
- mirascope/llm/models/__init__.py +2 -2
- mirascope/llm/models/models.py +189 -81
- mirascope/llm/prompts/__init__.py +13 -12
- mirascope/llm/prompts/_utils.py +27 -24
- mirascope/llm/prompts/decorator.py +133 -204
- mirascope/llm/prompts/prompts.py +424 -0
- mirascope/llm/prompts/protocols.py +25 -59
- mirascope/llm/providers/__init__.py +38 -0
- mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
- mirascope/llm/providers/anthropic/__init__.py +24 -0
- mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +5 -4
- mirascope/llm/{clients → providers}/anthropic/_utils/encode.py +31 -10
- mirascope/llm/providers/anthropic/model_id.py +40 -0
- mirascope/llm/{clients/anthropic/clients.py → providers/anthropic/provider.py} +33 -418
- mirascope/llm/{clients → providers}/base/__init__.py +3 -3
- mirascope/llm/{clients → providers}/base/_utils.py +10 -7
- mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
- mirascope/llm/providers/google/__init__.py +21 -0
- mirascope/llm/{clients → providers}/google/_utils/decode.py +6 -4
- mirascope/llm/{clients → providers}/google/_utils/encode.py +30 -24
- mirascope/llm/providers/google/model_id.py +28 -0
- mirascope/llm/providers/google/provider.py +438 -0
- mirascope/llm/providers/load_provider.py +48 -0
- mirascope/llm/providers/mlx/__init__.py +24 -0
- mirascope/llm/providers/mlx/_utils.py +107 -0
- mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
- mirascope/llm/providers/mlx/encoding/base.py +69 -0
- mirascope/llm/providers/mlx/encoding/transformers.py +131 -0
- mirascope/llm/providers/mlx/mlx.py +237 -0
- mirascope/llm/providers/mlx/model_id.py +17 -0
- mirascope/llm/providers/mlx/provider.py +411 -0
- mirascope/llm/providers/model_id.py +16 -0
- mirascope/llm/providers/openai/__init__.py +6 -0
- mirascope/llm/providers/openai/completions/__init__.py +20 -0
- mirascope/llm/{clients/openai/responses → providers/openai/completions}/_utils/__init__.py +2 -0
- mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +5 -3
- mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +33 -23
- mirascope/llm/providers/openai/completions/provider.py +456 -0
- mirascope/llm/providers/openai/model_id.py +31 -0
- mirascope/llm/providers/openai/model_info.py +246 -0
- mirascope/llm/providers/openai/provider.py +386 -0
- mirascope/llm/providers/openai/responses/__init__.py +21 -0
- mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +5 -3
- mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +28 -17
- mirascope/llm/providers/openai/responses/provider.py +470 -0
- mirascope/llm/{clients → providers}/openai/shared/_utils.py +7 -3
- mirascope/llm/providers/provider_id.py +13 -0
- mirascope/llm/providers/provider_registry.py +167 -0
- mirascope/llm/responses/base_response.py +10 -5
- mirascope/llm/responses/base_stream_response.py +10 -5
- mirascope/llm/responses/response.py +24 -13
- mirascope/llm/responses/root_response.py +7 -12
- mirascope/llm/responses/stream_response.py +35 -23
- mirascope/llm/tools/__init__.py +9 -2
- mirascope/llm/tools/_utils.py +12 -3
- mirascope/llm/tools/decorator.py +10 -10
- mirascope/llm/tools/protocols.py +4 -4
- mirascope/llm/tools/tool_schema.py +44 -9
- mirascope/llm/tools/tools.py +12 -11
- mirascope/ops/__init__.py +156 -0
- mirascope/ops/_internal/__init__.py +5 -0
- mirascope/ops/_internal/closure.py +1118 -0
- mirascope/ops/_internal/configuration.py +126 -0
- mirascope/ops/_internal/context.py +76 -0
- mirascope/ops/_internal/exporters/__init__.py +26 -0
- mirascope/ops/_internal/exporters/exporters.py +342 -0
- mirascope/ops/_internal/exporters/processors.py +104 -0
- mirascope/ops/_internal/exporters/types.py +165 -0
- mirascope/ops/_internal/exporters/utils.py +29 -0
- mirascope/ops/_internal/instrumentation/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
- mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
- mirascope/ops/_internal/propagation.py +198 -0
- mirascope/ops/_internal/protocols.py +51 -0
- mirascope/ops/_internal/session.py +139 -0
- mirascope/ops/_internal/spans.py +232 -0
- mirascope/ops/_internal/traced_calls.py +371 -0
- mirascope/ops/_internal/traced_functions.py +394 -0
- mirascope/ops/_internal/tracing.py +276 -0
- mirascope/ops/_internal/types.py +13 -0
- mirascope/ops/_internal/utils.py +75 -0
- mirascope/ops/_internal/versioned_calls.py +512 -0
- mirascope/ops/_internal/versioned_functions.py +346 -0
- mirascope/ops/_internal/versioning.py +303 -0
- mirascope/ops/exceptions.py +21 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/METADATA +77 -1
- mirascope-2.0.0a3.dist-info/RECORD +206 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/WHEEL +1 -1
- mirascope/graphs/__init__.py +0 -22
- mirascope/graphs/finite_state_machine.py +0 -625
- mirascope/llm/agents/__init__.py +0 -15
- mirascope/llm/agents/agent.py +0 -97
- mirascope/llm/agents/agent_template.py +0 -45
- mirascope/llm/agents/decorator.py +0 -176
- mirascope/llm/calls/base_call.py +0 -33
- mirascope/llm/clients/__init__.py +0 -34
- mirascope/llm/clients/anthropic/__init__.py +0 -25
- mirascope/llm/clients/anthropic/model_ids.py +0 -8
- mirascope/llm/clients/google/__init__.py +0 -20
- mirascope/llm/clients/google/clients.py +0 -853
- mirascope/llm/clients/google/model_ids.py +0 -15
- mirascope/llm/clients/openai/__init__.py +0 -25
- mirascope/llm/clients/openai/completions/__init__.py +0 -28
- mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
- mirascope/llm/clients/openai/completions/clients.py +0 -833
- mirascope/llm/clients/openai/completions/model_ids.py +0 -8
- mirascope/llm/clients/openai/responses/__init__.py +0 -26
- mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
- mirascope/llm/clients/openai/responses/clients.py +0 -832
- mirascope/llm/clients/openai/responses/model_ids.py +0 -8
- mirascope/llm/clients/providers.py +0 -175
- mirascope-2.0.0a1.dist-info/RECORD +0 -102
- /mirascope/llm/{clients → providers}/anthropic/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
- /mirascope/llm/{clients → providers}/base/params.py +0 -0
- /mirascope/llm/{clients → providers}/google/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/google/message.py +0 -0
- /mirascope/llm/{clients/openai/completions → providers/openai/responses}/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/openai/shared/__init__.py +0 -0
- {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/licenses/LICENSE +0 -0
mirascope/llm/models/__init__.py
CHANGED
|
@@ -6,11 +6,11 @@ the model at runtime, and `llm.use_model()` retrieves the model from context or
|
|
|
6
6
|
creates a default one.
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
|
-
from .models import Model,
|
|
9
|
+
from .models import Model, model, model_from_context, use_model
|
|
10
10
|
|
|
11
11
|
__all__ = [
|
|
12
12
|
"Model",
|
|
13
|
-
"get_model_from_context",
|
|
14
13
|
"model",
|
|
14
|
+
"model_from_context",
|
|
15
15
|
"use_model",
|
|
16
16
|
]
|
mirascope/llm/models/models.py
CHANGED
|
@@ -2,16 +2,22 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from collections.abc import
|
|
6
|
-
from
|
|
7
|
-
from
|
|
8
|
-
from typing import
|
|
5
|
+
from collections.abc import Sequence
|
|
6
|
+
from contextvars import ContextVar, Token
|
|
7
|
+
from types import TracebackType
|
|
8
|
+
from typing import overload
|
|
9
9
|
from typing_extensions import Unpack
|
|
10
10
|
|
|
11
|
-
from ..clients import PROVIDERS, get_client
|
|
12
11
|
from ..context import Context, DepsT
|
|
13
12
|
from ..formatting import Format, FormattableT
|
|
14
13
|
from ..messages import Message, UserContent
|
|
14
|
+
from ..providers import (
|
|
15
|
+
ModelId,
|
|
16
|
+
Params,
|
|
17
|
+
Provider,
|
|
18
|
+
ProviderId,
|
|
19
|
+
get_provider_for_model,
|
|
20
|
+
)
|
|
15
21
|
from ..responses import (
|
|
16
22
|
AsyncContextResponse,
|
|
17
23
|
AsyncContextStreamResponse,
|
|
@@ -33,18 +39,10 @@ from ..tools import (
|
|
|
33
39
|
Toolkit,
|
|
34
40
|
)
|
|
35
41
|
|
|
36
|
-
if TYPE_CHECKING:
|
|
37
|
-
from ..clients import (
|
|
38
|
-
ModelId,
|
|
39
|
-
Params,
|
|
40
|
-
Provider,
|
|
41
|
-
)
|
|
42
|
-
|
|
43
|
-
|
|
44
42
|
MODEL_CONTEXT: ContextVar[Model | None] = ContextVar("MODEL_CONTEXT", default=None)
|
|
45
43
|
|
|
46
44
|
|
|
47
|
-
def
|
|
45
|
+
def model_from_context() -> Model | None:
|
|
48
46
|
"""Get the LLM currently set via context, if any."""
|
|
49
47
|
return MODEL_CONTEXT.get()
|
|
50
48
|
|
|
@@ -68,7 +66,7 @@ class Model:
|
|
|
68
66
|
|
|
69
67
|
def recommend_book(genre: str) -> llm.Response:
|
|
70
68
|
# Uses context model if available, otherwise creates default
|
|
71
|
-
model = llm.use_model(
|
|
69
|
+
model = llm.use_model("openai/gpt-5-mini")
|
|
72
70
|
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
73
71
|
return model.call(messages=[message])
|
|
74
72
|
|
|
@@ -76,7 +74,7 @@ class Model:
|
|
|
76
74
|
response = recommend_book("fantasy")
|
|
77
75
|
|
|
78
76
|
# Override with different model
|
|
79
|
-
with llm.model(provider="anthropic", model_id="claude-sonnet-4-
|
|
77
|
+
with llm.model(provider="anthropic", model_id="anthropic/claude-sonnet-4-5"):
|
|
80
78
|
response = recommend_book("fantasy") # Uses Claude
|
|
81
79
|
```
|
|
82
80
|
|
|
@@ -87,33 +85,73 @@ class Model:
|
|
|
87
85
|
|
|
88
86
|
def recommend_book(genre: str) -> llm.Response:
|
|
89
87
|
# Hardcoded model, cannot be overridden by context
|
|
90
|
-
model = llm.Model(
|
|
88
|
+
model = llm.Model("openai/gpt-5-mini")
|
|
91
89
|
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
92
90
|
return model.call(messages=[message])
|
|
93
91
|
```
|
|
94
92
|
"""
|
|
95
93
|
|
|
96
|
-
provider: Provider
|
|
97
|
-
"""The provider being used (e.g. `openai`)."""
|
|
98
|
-
|
|
99
94
|
model_id: ModelId
|
|
100
|
-
"""The model being used (e.g. `gpt-4o-mini`)."""
|
|
95
|
+
"""The model being used (e.g. `"openai/gpt-4o-mini"`)."""
|
|
101
96
|
|
|
102
97
|
params: Params
|
|
103
98
|
"""The default parameters for the model (temperature, max_tokens, etc.)."""
|
|
104
99
|
|
|
105
100
|
def __init__(
|
|
106
101
|
self,
|
|
107
|
-
provider: Provider,
|
|
108
102
|
model_id: ModelId,
|
|
109
103
|
**params: Unpack[Params],
|
|
110
104
|
) -> None:
|
|
111
|
-
"""Initialize the Model with
|
|
112
|
-
if
|
|
113
|
-
raise ValueError(
|
|
114
|
-
|
|
105
|
+
"""Initialize the Model with model_id and optional params."""
|
|
106
|
+
if "/" not in model_id:
|
|
107
|
+
raise ValueError(
|
|
108
|
+
"Invalid model_id format. Expected format: 'provider/model-name' "
|
|
109
|
+
f"(e.g., 'openai/gpt-4'). Got: '{model_id}'"
|
|
110
|
+
)
|
|
115
111
|
self.model_id = model_id
|
|
116
112
|
self.params = params
|
|
113
|
+
self._token_stack: list[Token[Model | None]] = []
|
|
114
|
+
|
|
115
|
+
@property
|
|
116
|
+
def provider(self) -> Provider:
|
|
117
|
+
"""The provider being used (e.g. an `OpenAIProvider`).
|
|
118
|
+
|
|
119
|
+
This property dynamically looks up the provider from the registry based on
|
|
120
|
+
the current model_id. This allows provider overrides via `llm.register_provider()`
|
|
121
|
+
to take effect even after the model instance is created.
|
|
122
|
+
|
|
123
|
+
Raises:
|
|
124
|
+
NoRegisteredProviderError: If no provider is available for the model_id
|
|
125
|
+
"""
|
|
126
|
+
return get_provider_for_model(self.model_id)
|
|
127
|
+
|
|
128
|
+
@property
|
|
129
|
+
def provider_id(self) -> ProviderId:
|
|
130
|
+
"""The string id of the provider being used (e.g. `"openai"`).
|
|
131
|
+
|
|
132
|
+
This property returns the `id` field of the dynamically resolved provider.
|
|
133
|
+
|
|
134
|
+
Raises:
|
|
135
|
+
NoRegisteredProviderError: If no provider is available for the model_id
|
|
136
|
+
"""
|
|
137
|
+
return self.provider.id
|
|
138
|
+
|
|
139
|
+
def __enter__(self) -> Model:
|
|
140
|
+
"""Enter the context manager, setting this model in context."""
|
|
141
|
+
token = MODEL_CONTEXT.set(self)
|
|
142
|
+
self._token_stack.append(token)
|
|
143
|
+
return self
|
|
144
|
+
|
|
145
|
+
def __exit__(
|
|
146
|
+
self,
|
|
147
|
+
exc_type: type[BaseException] | None,
|
|
148
|
+
exc_val: BaseException | None,
|
|
149
|
+
exc_tb: TracebackType | None,
|
|
150
|
+
) -> None:
|
|
151
|
+
"""Exit the context manager, resetting the model context."""
|
|
152
|
+
if self._token_stack:
|
|
153
|
+
token = self._token_stack.pop()
|
|
154
|
+
MODEL_CONTEXT.reset(token)
|
|
117
155
|
|
|
118
156
|
@overload
|
|
119
157
|
def call(
|
|
@@ -165,7 +203,7 @@ class Model:
|
|
|
165
203
|
Returns:
|
|
166
204
|
An `llm.Response` object containing the LLM-generated content.
|
|
167
205
|
"""
|
|
168
|
-
return
|
|
206
|
+
return self.provider.call(
|
|
169
207
|
model_id=self.model_id,
|
|
170
208
|
messages=messages,
|
|
171
209
|
tools=tools,
|
|
@@ -223,7 +261,7 @@ class Model:
|
|
|
223
261
|
Returns:
|
|
224
262
|
An `llm.AsyncResponse` object containing the LLM-generated content.
|
|
225
263
|
"""
|
|
226
|
-
return await
|
|
264
|
+
return await self.provider.call_async(
|
|
227
265
|
model_id=self.model_id,
|
|
228
266
|
messages=messages,
|
|
229
267
|
tools=tools,
|
|
@@ -281,7 +319,7 @@ class Model:
|
|
|
281
319
|
Returns:
|
|
282
320
|
An `llm.StreamResponse` object for iterating over the LLM-generated content.
|
|
283
321
|
"""
|
|
284
|
-
return
|
|
322
|
+
return self.provider.stream(
|
|
285
323
|
model_id=self.model_id,
|
|
286
324
|
messages=messages,
|
|
287
325
|
tools=tools,
|
|
@@ -293,7 +331,7 @@ class Model:
|
|
|
293
331
|
async def stream_async(
|
|
294
332
|
self,
|
|
295
333
|
*,
|
|
296
|
-
messages:
|
|
334
|
+
messages: Sequence[Message],
|
|
297
335
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
298
336
|
format: None = None,
|
|
299
337
|
) -> AsyncStreamResponse:
|
|
@@ -304,7 +342,7 @@ class Model:
|
|
|
304
342
|
async def stream_async(
|
|
305
343
|
self,
|
|
306
344
|
*,
|
|
307
|
-
messages:
|
|
345
|
+
messages: Sequence[Message],
|
|
308
346
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
309
347
|
format: type[FormattableT] | Format[FormattableT],
|
|
310
348
|
) -> AsyncStreamResponse[FormattableT]:
|
|
@@ -315,7 +353,7 @@ class Model:
|
|
|
315
353
|
async def stream_async(
|
|
316
354
|
self,
|
|
317
355
|
*,
|
|
318
|
-
messages:
|
|
356
|
+
messages: Sequence[Message],
|
|
319
357
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
320
358
|
format: type[FormattableT] | Format[FormattableT] | None,
|
|
321
359
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
@@ -325,7 +363,7 @@ class Model:
|
|
|
325
363
|
async def stream_async(
|
|
326
364
|
self,
|
|
327
365
|
*,
|
|
328
|
-
messages:
|
|
366
|
+
messages: Sequence[Message],
|
|
329
367
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
330
368
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
331
369
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
@@ -339,7 +377,7 @@ class Model:
|
|
|
339
377
|
Returns:
|
|
340
378
|
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
341
379
|
"""
|
|
342
|
-
return await
|
|
380
|
+
return await self.provider.stream_async(
|
|
343
381
|
model_id=self.model_id,
|
|
344
382
|
messages=messages,
|
|
345
383
|
tools=tools,
|
|
@@ -410,7 +448,7 @@ class Model:
|
|
|
410
448
|
Returns:
|
|
411
449
|
An `llm.ContextResponse` object containing the LLM-generated content.
|
|
412
450
|
"""
|
|
413
|
-
return
|
|
451
|
+
return self.provider.context_call(
|
|
414
452
|
ctx=ctx,
|
|
415
453
|
model_id=self.model_id,
|
|
416
454
|
messages=messages,
|
|
@@ -482,7 +520,7 @@ class Model:
|
|
|
482
520
|
Returns:
|
|
483
521
|
An `llm.AsyncContextResponse` object containing the LLM-generated content.
|
|
484
522
|
"""
|
|
485
|
-
return await
|
|
523
|
+
return await self.provider.context_call_async(
|
|
486
524
|
ctx=ctx,
|
|
487
525
|
model_id=self.model_id,
|
|
488
526
|
messages=messages,
|
|
@@ -558,7 +596,7 @@ class Model:
|
|
|
558
596
|
Returns:
|
|
559
597
|
An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
|
|
560
598
|
"""
|
|
561
|
-
return
|
|
599
|
+
return self.provider.context_stream(
|
|
562
600
|
ctx=ctx,
|
|
563
601
|
model_id=self.model_id,
|
|
564
602
|
messages=messages,
|
|
@@ -572,7 +610,7 @@ class Model:
|
|
|
572
610
|
self,
|
|
573
611
|
*,
|
|
574
612
|
ctx: Context[DepsT],
|
|
575
|
-
messages:
|
|
613
|
+
messages: Sequence[Message],
|
|
576
614
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
577
615
|
| AsyncContextToolkit[DepsT]
|
|
578
616
|
| None = None,
|
|
@@ -586,7 +624,7 @@ class Model:
|
|
|
586
624
|
self,
|
|
587
625
|
*,
|
|
588
626
|
ctx: Context[DepsT],
|
|
589
|
-
messages:
|
|
627
|
+
messages: Sequence[Message],
|
|
590
628
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
591
629
|
| AsyncContextToolkit[DepsT]
|
|
592
630
|
| None = None,
|
|
@@ -600,7 +638,7 @@ class Model:
|
|
|
600
638
|
self,
|
|
601
639
|
*,
|
|
602
640
|
ctx: Context[DepsT],
|
|
603
|
-
messages:
|
|
641
|
+
messages: Sequence[Message],
|
|
604
642
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
605
643
|
| AsyncContextToolkit[DepsT]
|
|
606
644
|
| None = None,
|
|
@@ -616,7 +654,7 @@ class Model:
|
|
|
616
654
|
self,
|
|
617
655
|
*,
|
|
618
656
|
ctx: Context[DepsT],
|
|
619
|
-
messages:
|
|
657
|
+
messages: Sequence[Message],
|
|
620
658
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
621
659
|
| AsyncContextToolkit[DepsT]
|
|
622
660
|
| None = None,
|
|
@@ -636,7 +674,7 @@ class Model:
|
|
|
636
674
|
Returns:
|
|
637
675
|
An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
638
676
|
"""
|
|
639
|
-
return await
|
|
677
|
+
return await self.provider.context_stream_async(
|
|
640
678
|
ctx=ctx,
|
|
641
679
|
model_id=self.model_id,
|
|
642
680
|
messages=messages,
|
|
@@ -696,7 +734,7 @@ class Model:
|
|
|
696
734
|
Returns:
|
|
697
735
|
A new `llm.Response` object containing the extended conversation.
|
|
698
736
|
"""
|
|
699
|
-
return
|
|
737
|
+
return self.provider.resume(
|
|
700
738
|
model_id=self.model_id,
|
|
701
739
|
response=response,
|
|
702
740
|
content=content,
|
|
@@ -754,7 +792,7 @@ class Model:
|
|
|
754
792
|
Returns:
|
|
755
793
|
A new `llm.AsyncResponse` object containing the extended conversation.
|
|
756
794
|
"""
|
|
757
|
-
return await
|
|
795
|
+
return await self.provider.resume_async(
|
|
758
796
|
model_id=self.model_id,
|
|
759
797
|
response=response,
|
|
760
798
|
content=content,
|
|
@@ -817,7 +855,7 @@ class Model:
|
|
|
817
855
|
Returns:
|
|
818
856
|
A new `llm.ContextResponse` object containing the extended conversation.
|
|
819
857
|
"""
|
|
820
|
-
return
|
|
858
|
+
return self.provider.context_resume(
|
|
821
859
|
ctx=ctx,
|
|
822
860
|
model_id=self.model_id,
|
|
823
861
|
response=response,
|
|
@@ -883,7 +921,7 @@ class Model:
|
|
|
883
921
|
Returns:
|
|
884
922
|
A new `llm.AsyncContextResponse` object containing the extended conversation.
|
|
885
923
|
"""
|
|
886
|
-
return await
|
|
924
|
+
return await self.provider.context_resume_async(
|
|
887
925
|
ctx=ctx,
|
|
888
926
|
model_id=self.model_id,
|
|
889
927
|
response=response,
|
|
@@ -942,7 +980,7 @@ class Model:
|
|
|
942
980
|
Returns:
|
|
943
981
|
A new `llm.StreamResponse` object for streaming the extended conversation.
|
|
944
982
|
"""
|
|
945
|
-
return
|
|
983
|
+
return self.provider.resume_stream(
|
|
946
984
|
model_id=self.model_id,
|
|
947
985
|
response=response,
|
|
948
986
|
content=content,
|
|
@@ -1000,7 +1038,7 @@ class Model:
|
|
|
1000
1038
|
Returns:
|
|
1001
1039
|
A new `llm.AsyncStreamResponse` object for asynchronously streaming the extended conversation.
|
|
1002
1040
|
"""
|
|
1003
|
-
return await
|
|
1041
|
+
return await self.provider.resume_stream_async(
|
|
1004
1042
|
model_id=self.model_id,
|
|
1005
1043
|
response=response,
|
|
1006
1044
|
content=content,
|
|
@@ -1069,7 +1107,7 @@ class Model:
|
|
|
1069
1107
|
Returns:
|
|
1070
1108
|
A new `llm.ContextStreamResponse` object for streaming the extended conversation.
|
|
1071
1109
|
"""
|
|
1072
|
-
return
|
|
1110
|
+
return self.provider.context_resume_stream(
|
|
1073
1111
|
ctx=ctx,
|
|
1074
1112
|
model_id=self.model_id,
|
|
1075
1113
|
response=response,
|
|
@@ -1141,7 +1179,7 @@ class Model:
|
|
|
1141
1179
|
Returns:
|
|
1142
1180
|
A new `llm.AsyncContextStreamResponse` object for asynchronously streaming the extended conversation.
|
|
1143
1181
|
"""
|
|
1144
|
-
return await
|
|
1182
|
+
return await self.provider.context_resume_stream_async(
|
|
1145
1183
|
ctx=ctx,
|
|
1146
1184
|
model_id=self.model_id,
|
|
1147
1185
|
response=response,
|
|
@@ -1150,71 +1188,139 @@ class Model:
|
|
|
1150
1188
|
)
|
|
1151
1189
|
|
|
1152
1190
|
|
|
1153
|
-
@contextmanager
|
|
1154
1191
|
def model(
|
|
1155
|
-
*,
|
|
1156
|
-
provider: Provider,
|
|
1157
1192
|
model_id: ModelId,
|
|
1158
1193
|
**params: Unpack[Params],
|
|
1159
|
-
) ->
|
|
1160
|
-
"""
|
|
1194
|
+
) -> Model:
|
|
1195
|
+
"""Helper for creating a `Model` instance (which may be used as a context manager).
|
|
1196
|
+
|
|
1197
|
+
This is just an alias for the `Model` constructor, added for convenience.
|
|
1198
|
+
|
|
1199
|
+
This function returns a `Model` instance that implements the context manager protocol.
|
|
1200
|
+
When used with a `with` statement, the model will be set in context and used by both
|
|
1201
|
+
`llm.use_model()` and `llm.call()` within that context. This allows you to override
|
|
1202
|
+
the default model at runtime without modifying function definitions.
|
|
1203
|
+
|
|
1204
|
+
The returned `Model` instance can also be stored and reused:
|
|
1161
1205
|
|
|
1162
|
-
|
|
1163
|
-
|
|
1206
|
+
```python
|
|
1207
|
+
m = llm.model("openai/gpt-4o")
|
|
1208
|
+
# Use directly
|
|
1209
|
+
response = m.call(messages=[...])
|
|
1210
|
+
# Or use as context manager
|
|
1211
|
+
with m:
|
|
1212
|
+
response = recommend_book("fantasy")
|
|
1213
|
+
```
|
|
1214
|
+
|
|
1215
|
+
When a model is set in context, it completely overrides any model ID or parameters
|
|
1216
|
+
specified in `llm.use_model()` or `llm.call()`. The context model's parameters take
|
|
1217
|
+
precedence, and any unset parameters use default values.
|
|
1164
1218
|
|
|
1165
1219
|
Args:
|
|
1166
|
-
|
|
1167
|
-
model_id: The specific model identifier for the chosen provider.
|
|
1220
|
+
model_id: A model ID string (e.g., "openai/gpt-4").
|
|
1168
1221
|
**params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
|
|
1169
1222
|
|
|
1223
|
+
Returns:
|
|
1224
|
+
A Model instance that can be used as a context manager.
|
|
1225
|
+
|
|
1170
1226
|
Raises:
|
|
1171
1227
|
ValueError: If the specified provider is not supported.
|
|
1172
1228
|
|
|
1173
1229
|
Example:
|
|
1230
|
+
With `llm.use_model()`
|
|
1174
1231
|
|
|
1175
1232
|
```python
|
|
1176
1233
|
import mirascope.llm as llm
|
|
1177
1234
|
|
|
1178
1235
|
def recommend_book(genre: str) -> llm.Response:
|
|
1179
|
-
model = llm.use_model(
|
|
1236
|
+
model = llm.use_model("openai/gpt-5-mini")
|
|
1180
1237
|
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
1181
1238
|
return model.call(messages=[message])
|
|
1182
1239
|
|
|
1183
1240
|
# Override the default model at runtime
|
|
1184
|
-
with llm.model(
|
|
1241
|
+
with llm.model("anthropic/claude-sonnet-4-5"):
|
|
1242
|
+
response = recommend_book("fantasy") # Uses Claude instead of GPT
|
|
1243
|
+
```
|
|
1244
|
+
|
|
1245
|
+
Example:
|
|
1246
|
+
With `llm.call()`
|
|
1247
|
+
|
|
1248
|
+
```python
|
|
1249
|
+
import mirascope.llm as llm
|
|
1250
|
+
|
|
1251
|
+
@llm.call("openai/gpt-5-mini")
|
|
1252
|
+
def recommend_book(genre: str):
|
|
1253
|
+
return f"Please recommend a {genre} book."
|
|
1254
|
+
|
|
1255
|
+
# Override the decorated model at runtime
|
|
1256
|
+
with llm.model("anthropic/claude-sonnet-4-0"):
|
|
1185
1257
|
response = recommend_book("fantasy") # Uses Claude instead of GPT
|
|
1186
1258
|
```
|
|
1259
|
+
|
|
1260
|
+
Example:
|
|
1261
|
+
Storing and reusing Model instances
|
|
1262
|
+
|
|
1263
|
+
```python
|
|
1264
|
+
import mirascope.llm as llm
|
|
1265
|
+
|
|
1266
|
+
# Create and store a model
|
|
1267
|
+
m = llm.model("openai/gpt-4o")
|
|
1268
|
+
|
|
1269
|
+
# Use it directly
|
|
1270
|
+
response = m.call(messages=[llm.messages.user("Hello!")])
|
|
1271
|
+
|
|
1272
|
+
# Or use it as a context manager
|
|
1273
|
+
with m:
|
|
1274
|
+
response = recommend_book("fantasy")
|
|
1275
|
+
```
|
|
1187
1276
|
"""
|
|
1188
|
-
|
|
1189
|
-
try:
|
|
1190
|
-
yield
|
|
1191
|
-
finally:
|
|
1192
|
-
MODEL_CONTEXT.reset(token)
|
|
1277
|
+
return Model(model_id, **params)
|
|
1193
1278
|
|
|
1194
1279
|
|
|
1280
|
+
@overload
|
|
1195
1281
|
def use_model(
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1282
|
+
model: ModelId,
|
|
1283
|
+
**params: Unpack[Params],
|
|
1284
|
+
) -> Model:
|
|
1285
|
+
"""Get the model from context if available, otherwise create a new `Model`.
|
|
1286
|
+
|
|
1287
|
+
This overload accepts a model ID string and allows additional params.
|
|
1288
|
+
"""
|
|
1289
|
+
...
|
|
1290
|
+
|
|
1291
|
+
|
|
1292
|
+
@overload
|
|
1293
|
+
def use_model(
|
|
1294
|
+
model: Model,
|
|
1295
|
+
) -> Model:
|
|
1296
|
+
"""Get the model from context if available, otherwise use the provided `Model`.
|
|
1297
|
+
|
|
1298
|
+
This overload accepts a `Model` instance and does not allow additional params.
|
|
1299
|
+
"""
|
|
1300
|
+
...
|
|
1301
|
+
|
|
1302
|
+
|
|
1303
|
+
def use_model(
|
|
1304
|
+
model: Model | ModelId,
|
|
1199
1305
|
**params: Unpack[Params],
|
|
1200
1306
|
) -> Model:
|
|
1201
|
-
"""Get the model from context if available, otherwise create a new Model
|
|
1307
|
+
"""Get the model from context if available, otherwise create a new `Model`.
|
|
1202
1308
|
|
|
1203
1309
|
This function checks if a model has been set in the context (via `llm.model()`
|
|
1204
|
-
context manager). If a model is found in the context, it returns that model
|
|
1205
|
-
|
|
1206
|
-
|
|
1310
|
+
context manager). If a model is found in the context, it returns that model,
|
|
1311
|
+
ignoring any model ID or parameters passed to this function. Otherwise, it creates
|
|
1312
|
+
and returns a new `llm.Model` instance with the provided arguments.
|
|
1207
1313
|
|
|
1208
1314
|
This allows you to write functions that work with a default model but can be
|
|
1209
1315
|
overridden at runtime using the `llm.model()` context manager.
|
|
1210
1316
|
|
|
1211
1317
|
Args:
|
|
1212
|
-
|
|
1213
|
-
model_id: The specific model identifier for the chosen provider.
|
|
1318
|
+
model: A model ID string (e.g., "openai/gpt-4") or a Model instance
|
|
1214
1319
|
**params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
|
|
1320
|
+
Only available when passing a model ID string
|
|
1215
1321
|
|
|
1216
1322
|
Returns:
|
|
1217
|
-
An `llm.Model` instance from context or a new instance with the specified settings.
|
|
1323
|
+
An `llm.Model` instance from context (if set) or a new instance with the specified settings.
|
|
1218
1324
|
|
|
1219
1325
|
Raises:
|
|
1220
1326
|
ValueError: If the specified provider is not supported.
|
|
@@ -1225,19 +1331,21 @@ def use_model(
|
|
|
1225
1331
|
import mirascope.llm as llm
|
|
1226
1332
|
|
|
1227
1333
|
def recommend_book(genre: str) -> llm.Response:
|
|
1228
|
-
model = llm.use_model(
|
|
1334
|
+
model = llm.use_model("openai/gpt-5-mini")
|
|
1229
1335
|
message = llm.messages.user(f"Please recommend a book in {genre}.")
|
|
1230
1336
|
return model.call(messages=[message])
|
|
1231
1337
|
|
|
1232
|
-
# Uses the default model (gpt-
|
|
1338
|
+
# Uses the default model (gpt-5-mini)
|
|
1233
1339
|
response = recommend_book("fantasy")
|
|
1234
1340
|
|
|
1235
1341
|
# Override with a different model
|
|
1236
|
-
with llm.model(provider="anthropic", model_id="claude-sonnet-4-
|
|
1342
|
+
with llm.model(provider="anthropic", model_id="anthropic/claude-sonnet-4-5"):
|
|
1237
1343
|
response = recommend_book("fantasy") # Uses Claude instead
|
|
1238
1344
|
```
|
|
1239
1345
|
"""
|
|
1240
|
-
context_model =
|
|
1346
|
+
context_model = model_from_context()
|
|
1241
1347
|
if context_model is not None:
|
|
1242
1348
|
return context_model
|
|
1243
|
-
|
|
1349
|
+
if isinstance(model, str):
|
|
1350
|
+
return Model(model, **params)
|
|
1351
|
+
return model
|
|
@@ -5,29 +5,30 @@ python functions.
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
from . import _utils
|
|
8
|
-
from .decorator import prompt
|
|
9
|
-
from .
|
|
8
|
+
from .decorator import PromptDecorator, prompt
|
|
9
|
+
from .prompts import (
|
|
10
10
|
AsyncContextPrompt,
|
|
11
|
-
AsyncContextPromptable,
|
|
12
11
|
AsyncPrompt,
|
|
13
|
-
AsyncPromptable,
|
|
14
12
|
ContextPrompt,
|
|
15
|
-
ContextPromptable,
|
|
16
13
|
Prompt,
|
|
17
|
-
|
|
18
|
-
|
|
14
|
+
)
|
|
15
|
+
from .protocols import (
|
|
16
|
+
AsyncContextMessageTemplate,
|
|
17
|
+
AsyncMessageTemplate,
|
|
18
|
+
ContextMessageTemplate,
|
|
19
|
+
MessageTemplate,
|
|
19
20
|
)
|
|
20
21
|
|
|
21
22
|
__all__ = [
|
|
23
|
+
"AsyncContextMessageTemplate",
|
|
22
24
|
"AsyncContextPrompt",
|
|
23
|
-
"
|
|
25
|
+
"AsyncMessageTemplate",
|
|
24
26
|
"AsyncPrompt",
|
|
25
|
-
"
|
|
27
|
+
"ContextMessageTemplate",
|
|
26
28
|
"ContextPrompt",
|
|
27
|
-
"
|
|
29
|
+
"MessageTemplate",
|
|
28
30
|
"Prompt",
|
|
29
|
-
"
|
|
30
|
-
"Promptable",
|
|
31
|
+
"PromptDecorator",
|
|
31
32
|
"_utils",
|
|
32
33
|
"prompt",
|
|
33
34
|
]
|
mirascope/llm/prompts/_utils.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import inspect
|
|
2
|
+
from collections.abc import Sequence
|
|
2
3
|
from typing_extensions import TypeIs
|
|
3
4
|
|
|
4
5
|
from ..context import DepsT, _utils as _context_utils
|
|
@@ -12,24 +13,26 @@ from ..messages import (
|
|
|
12
13
|
)
|
|
13
14
|
from ..types import P
|
|
14
15
|
from .protocols import (
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
16
|
+
AsyncContextMessageTemplate,
|
|
17
|
+
AsyncMessageTemplate,
|
|
18
|
+
ContextMessageTemplate,
|
|
19
|
+
MessageTemplate,
|
|
19
20
|
)
|
|
20
21
|
|
|
21
22
|
|
|
22
23
|
def is_messages(
|
|
23
|
-
messages_or_content:
|
|
24
|
-
) -> TypeIs[
|
|
25
|
-
if
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
24
|
+
messages_or_content: Sequence[Message] | UserContent,
|
|
25
|
+
) -> TypeIs[Sequence[Message]]:
|
|
26
|
+
if isinstance(messages_or_content, list):
|
|
27
|
+
if not messages_or_content:
|
|
28
|
+
raise ValueError("Empty array may not be used as message content")
|
|
29
|
+
return isinstance(
|
|
30
|
+
messages_or_content[0], SystemMessage | UserMessage | AssistantMessage
|
|
31
|
+
)
|
|
32
|
+
return False
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def promote_to_messages(result: Sequence[Message] | UserContent) -> Sequence[Message]:
|
|
33
36
|
"""Promote a prompt result to a list of messages.
|
|
34
37
|
|
|
35
38
|
If the result is already a list of Messages, returns it as-is.
|
|
@@ -41,20 +44,20 @@ def promote_to_messages(result: list[Message] | UserContent) -> list[Message]:
|
|
|
41
44
|
|
|
42
45
|
|
|
43
46
|
def is_context_promptable(
|
|
44
|
-
fn:
|
|
45
|
-
|
|
|
46
|
-
|
|
|
47
|
-
|
|
|
48
|
-
) -> TypeIs[
|
|
47
|
+
fn: ContextMessageTemplate[P, DepsT]
|
|
48
|
+
| AsyncContextMessageTemplate[P, DepsT]
|
|
49
|
+
| MessageTemplate[P]
|
|
50
|
+
| AsyncMessageTemplate[P],
|
|
51
|
+
) -> TypeIs[ContextMessageTemplate[P, DepsT] | AsyncContextMessageTemplate[P, DepsT]]:
|
|
49
52
|
"""Type guard to check if a function is a context promptable function."""
|
|
50
53
|
return _context_utils.first_param_is_context(fn)
|
|
51
54
|
|
|
52
55
|
|
|
53
56
|
def is_async_promptable(
|
|
54
|
-
fn:
|
|
55
|
-
|
|
|
56
|
-
|
|
|
57
|
-
|
|
|
58
|
-
) -> TypeIs[
|
|
57
|
+
fn: ContextMessageTemplate[P, DepsT]
|
|
58
|
+
| AsyncContextMessageTemplate[P, DepsT]
|
|
59
|
+
| MessageTemplate[P]
|
|
60
|
+
| AsyncMessageTemplate[P],
|
|
61
|
+
) -> TypeIs[AsyncMessageTemplate[P] | AsyncContextMessageTemplate[P, DepsT]]:
|
|
59
62
|
"""Type guard to check if a function is an async promptable function."""
|
|
60
63
|
return inspect.iscoroutinefunction(fn)
|