mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +2 -2
- mirascope/api/__init__.py +6 -0
- mirascope/api/_generated/README.md +207 -0
- mirascope/api/_generated/__init__.py +141 -0
- mirascope/api/_generated/client.py +163 -0
- mirascope/api/_generated/core/__init__.py +52 -0
- mirascope/api/_generated/core/api_error.py +23 -0
- mirascope/api/_generated/core/client_wrapper.py +58 -0
- mirascope/api/_generated/core/datetime_utils.py +30 -0
- mirascope/api/_generated/core/file.py +70 -0
- mirascope/api/_generated/core/force_multipart.py +16 -0
- mirascope/api/_generated/core/http_client.py +619 -0
- mirascope/api/_generated/core/http_response.py +55 -0
- mirascope/api/_generated/core/jsonable_encoder.py +102 -0
- mirascope/api/_generated/core/pydantic_utilities.py +310 -0
- mirascope/api/_generated/core/query_encoder.py +60 -0
- mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
- mirascope/api/_generated/core/request_options.py +35 -0
- mirascope/api/_generated/core/serialization.py +282 -0
- mirascope/api/_generated/docs/__init__.py +4 -0
- mirascope/api/_generated/docs/client.py +95 -0
- mirascope/api/_generated/docs/raw_client.py +132 -0
- mirascope/api/_generated/environment.py +9 -0
- mirascope/api/_generated/errors/__init__.py +17 -0
- mirascope/api/_generated/errors/bad_request_error.py +15 -0
- mirascope/api/_generated/errors/conflict_error.py +15 -0
- mirascope/api/_generated/errors/forbidden_error.py +15 -0
- mirascope/api/_generated/errors/internal_server_error.py +15 -0
- mirascope/api/_generated/errors/not_found_error.py +15 -0
- mirascope/api/_generated/health/__init__.py +7 -0
- mirascope/api/_generated/health/client.py +96 -0
- mirascope/api/_generated/health/raw_client.py +129 -0
- mirascope/api/_generated/health/types/__init__.py +8 -0
- mirascope/api/_generated/health/types/health_check_response.py +24 -0
- mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
- mirascope/api/_generated/organizations/__init__.py +25 -0
- mirascope/api/_generated/organizations/client.py +380 -0
- mirascope/api/_generated/organizations/raw_client.py +876 -0
- mirascope/api/_generated/organizations/types/__init__.py +23 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
- mirascope/api/_generated/projects/__init__.py +17 -0
- mirascope/api/_generated/projects/client.py +458 -0
- mirascope/api/_generated/projects/raw_client.py +1016 -0
- mirascope/api/_generated/projects/types/__init__.py +15 -0
- mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
- mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
- mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
- mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
- mirascope/api/_generated/reference.md +753 -0
- mirascope/api/_generated/traces/__init__.py +55 -0
- mirascope/api/_generated/traces/client.py +162 -0
- mirascope/api/_generated/traces/raw_client.py +168 -0
- mirascope/api/_generated/traces/types/__init__.py +95 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
- mirascope/api/_generated/types/__init__.py +37 -0
- mirascope/api/_generated/types/already_exists_error.py +24 -0
- mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
- mirascope/api/_generated/types/database_error.py +24 -0
- mirascope/api/_generated/types/database_error_tag.py +5 -0
- mirascope/api/_generated/types/http_api_decode_error.py +29 -0
- mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
- mirascope/api/_generated/types/issue.py +40 -0
- mirascope/api/_generated/types/issue_tag.py +17 -0
- mirascope/api/_generated/types/not_found_error_body.py +24 -0
- mirascope/api/_generated/types/not_found_error_tag.py +5 -0
- mirascope/api/_generated/types/permission_denied_error.py +24 -0
- mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
- mirascope/api/_generated/types/property_key.py +7 -0
- mirascope/api/_generated/types/property_key_key.py +27 -0
- mirascope/api/_generated/types/property_key_key_tag.py +5 -0
- mirascope/api/client.py +255 -0
- mirascope/api/settings.py +81 -0
- mirascope/llm/__init__.py +45 -11
- mirascope/llm/calls/calls.py +81 -57
- mirascope/llm/calls/decorator.py +121 -115
- mirascope/llm/content/__init__.py +3 -2
- mirascope/llm/context/_utils.py +19 -6
- mirascope/llm/exceptions.py +30 -16
- mirascope/llm/formatting/_utils.py +9 -5
- mirascope/llm/formatting/format.py +2 -2
- mirascope/llm/formatting/from_call_args.py +2 -2
- mirascope/llm/messages/message.py +13 -5
- mirascope/llm/models/__init__.py +2 -2
- mirascope/llm/models/models.py +189 -81
- mirascope/llm/prompts/__init__.py +13 -12
- mirascope/llm/prompts/_utils.py +27 -24
- mirascope/llm/prompts/decorator.py +133 -204
- mirascope/llm/prompts/prompts.py +424 -0
- mirascope/llm/prompts/protocols.py +25 -59
- mirascope/llm/providers/__init__.py +44 -0
- mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
- mirascope/llm/providers/anthropic/__init__.py +29 -0
- mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
- mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
- mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
- mirascope/llm/providers/anthropic/beta_provider.py +322 -0
- mirascope/llm/providers/anthropic/model_id.py +23 -0
- mirascope/llm/providers/anthropic/model_info.py +87 -0
- mirascope/llm/providers/anthropic/provider.py +416 -0
- mirascope/llm/{clients → providers}/base/__init__.py +3 -3
- mirascope/llm/{clients → providers}/base/_utils.py +25 -8
- mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
- mirascope/llm/providers/google/__init__.py +21 -0
- mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
- mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
- mirascope/llm/providers/google/model_id.py +22 -0
- mirascope/llm/providers/google/model_info.py +62 -0
- mirascope/llm/providers/google/provider.py +442 -0
- mirascope/llm/providers/load_provider.py +54 -0
- mirascope/llm/providers/mlx/__init__.py +24 -0
- mirascope/llm/providers/mlx/_utils.py +129 -0
- mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
- mirascope/llm/providers/mlx/encoding/base.py +69 -0
- mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
- mirascope/llm/providers/mlx/mlx.py +237 -0
- mirascope/llm/providers/mlx/model_id.py +17 -0
- mirascope/llm/providers/mlx/provider.py +415 -0
- mirascope/llm/providers/model_id.py +16 -0
- mirascope/llm/providers/ollama/__init__.py +19 -0
- mirascope/llm/providers/ollama/provider.py +71 -0
- mirascope/llm/providers/openai/__init__.py +6 -0
- mirascope/llm/providers/openai/completions/__init__.py +25 -0
- mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
- mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
- mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
- mirascope/llm/providers/openai/completions/base_provider.py +513 -0
- mirascope/llm/providers/openai/completions/provider.py +22 -0
- mirascope/llm/providers/openai/model_id.py +31 -0
- mirascope/llm/providers/openai/model_info.py +303 -0
- mirascope/llm/providers/openai/provider.py +398 -0
- mirascope/llm/providers/openai/responses/__init__.py +21 -0
- mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
- mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
- mirascope/llm/providers/openai/responses/provider.py +469 -0
- mirascope/llm/providers/provider_id.py +23 -0
- mirascope/llm/providers/provider_registry.py +169 -0
- mirascope/llm/providers/together/__init__.py +19 -0
- mirascope/llm/providers/together/provider.py +40 -0
- mirascope/llm/responses/__init__.py +3 -0
- mirascope/llm/responses/base_response.py +14 -5
- mirascope/llm/responses/base_stream_response.py +35 -6
- mirascope/llm/responses/finish_reason.py +1 -0
- mirascope/llm/responses/response.py +33 -13
- mirascope/llm/responses/root_response.py +12 -13
- mirascope/llm/responses/stream_response.py +35 -23
- mirascope/llm/responses/usage.py +95 -0
- mirascope/llm/tools/__init__.py +9 -2
- mirascope/llm/tools/_utils.py +12 -3
- mirascope/llm/tools/protocols.py +4 -4
- mirascope/llm/tools/tool_schema.py +44 -9
- mirascope/llm/tools/tools.py +10 -9
- mirascope/ops/__init__.py +156 -0
- mirascope/ops/_internal/__init__.py +5 -0
- mirascope/ops/_internal/closure.py +1118 -0
- mirascope/ops/_internal/configuration.py +126 -0
- mirascope/ops/_internal/context.py +76 -0
- mirascope/ops/_internal/exporters/__init__.py +26 -0
- mirascope/ops/_internal/exporters/exporters.py +342 -0
- mirascope/ops/_internal/exporters/processors.py +104 -0
- mirascope/ops/_internal/exporters/types.py +165 -0
- mirascope/ops/_internal/exporters/utils.py +29 -0
- mirascope/ops/_internal/instrumentation/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
- mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
- mirascope/ops/_internal/propagation.py +198 -0
- mirascope/ops/_internal/protocols.py +51 -0
- mirascope/ops/_internal/session.py +139 -0
- mirascope/ops/_internal/spans.py +232 -0
- mirascope/ops/_internal/traced_calls.py +371 -0
- mirascope/ops/_internal/traced_functions.py +394 -0
- mirascope/ops/_internal/tracing.py +276 -0
- mirascope/ops/_internal/types.py +13 -0
- mirascope/ops/_internal/utils.py +75 -0
- mirascope/ops/_internal/versioned_calls.py +512 -0
- mirascope/ops/_internal/versioned_functions.py +346 -0
- mirascope/ops/_internal/versioning.py +303 -0
- mirascope/ops/exceptions.py +21 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
- mirascope-2.0.0a4.dist-info/RECORD +247 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
- mirascope/graphs/__init__.py +0 -22
- mirascope/graphs/finite_state_machine.py +0 -625
- mirascope/llm/agents/__init__.py +0 -15
- mirascope/llm/agents/agent.py +0 -97
- mirascope/llm/agents/agent_template.py +0 -45
- mirascope/llm/agents/decorator.py +0 -176
- mirascope/llm/calls/base_call.py +0 -33
- mirascope/llm/clients/__init__.py +0 -34
- mirascope/llm/clients/anthropic/__init__.py +0 -25
- mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
- mirascope/llm/clients/anthropic/clients.py +0 -819
- mirascope/llm/clients/anthropic/model_ids.py +0 -8
- mirascope/llm/clients/google/__init__.py +0 -20
- mirascope/llm/clients/google/clients.py +0 -853
- mirascope/llm/clients/google/model_ids.py +0 -15
- mirascope/llm/clients/openai/__init__.py +0 -25
- mirascope/llm/clients/openai/completions/__init__.py +0 -28
- mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
- mirascope/llm/clients/openai/completions/clients.py +0 -833
- mirascope/llm/clients/openai/completions/model_ids.py +0 -8
- mirascope/llm/clients/openai/responses/__init__.py +0 -26
- mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
- mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
- mirascope/llm/clients/openai/responses/clients.py +0 -832
- mirascope/llm/clients/openai/responses/model_ids.py +0 -8
- mirascope/llm/clients/openai/shared/__init__.py +0 -7
- mirascope/llm/clients/openai/shared/_utils.py +0 -55
- mirascope/llm/clients/providers.py +0 -175
- mirascope-2.0.0a2.dist-info/RECORD +0 -102
- /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
- /mirascope/llm/{clients → providers}/base/params.py +0 -0
- /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/google/message.py +0 -0
- /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
mirascope/llm/calls/calls.py
CHANGED
|
@@ -5,6 +5,7 @@ from typing import Generic, overload
|
|
|
5
5
|
|
|
6
6
|
from ..context import Context, DepsT
|
|
7
7
|
from ..formatting import FormattableT
|
|
8
|
+
from ..models import Model, use_model
|
|
8
9
|
from ..prompts import (
|
|
9
10
|
AsyncContextPrompt,
|
|
10
11
|
AsyncPrompt,
|
|
@@ -21,19 +22,37 @@ from ..responses import (
|
|
|
21
22
|
Response,
|
|
22
23
|
StreamResponse,
|
|
23
24
|
)
|
|
24
|
-
from ..tools import (
|
|
25
|
-
AsyncContextToolkit,
|
|
26
|
-
AsyncToolkit,
|
|
27
|
-
ContextToolkit,
|
|
28
|
-
Toolkit,
|
|
29
|
-
)
|
|
30
25
|
from ..types import P
|
|
31
|
-
from .base_call import BaseCall
|
|
32
26
|
|
|
33
27
|
|
|
34
28
|
@dataclass
|
|
35
|
-
class
|
|
36
|
-
"""
|
|
29
|
+
class BaseCall:
|
|
30
|
+
"""Base class for all Call types with shared model functionality."""
|
|
31
|
+
|
|
32
|
+
default_model: Model
|
|
33
|
+
"""The default model that will be used if no model is set in context."""
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def model(self) -> Model:
|
|
37
|
+
"""The model used for generating responses. May be overwritten via `with llm.model(...)`."""
|
|
38
|
+
return use_model(self.default_model)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class Call(BaseCall, Generic[P, FormattableT]):
|
|
43
|
+
"""A call that directly generates LLM responses without requiring a model argument.
|
|
44
|
+
|
|
45
|
+
Created by decorating a `MessageTemplate` with `llm.call`. The decorated function
|
|
46
|
+
becomes directly callable to generate responses, with the `Model` bundled in.
|
|
47
|
+
|
|
48
|
+
A `Call` is essentially: `MessageTemplate` + tools + format + `Model`.
|
|
49
|
+
It can be invoked directly: `call(*args, **kwargs)` (no model argument needed).
|
|
50
|
+
|
|
51
|
+
The model can be overridden at runtime using `with llm.model(...)` context manager.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
prompt: Prompt[P, FormattableT]
|
|
55
|
+
"""The underlying Prompt instance that generates messages with tools and format."""
|
|
37
56
|
|
|
38
57
|
@overload
|
|
39
58
|
def __call__(
|
|
@@ -63,10 +82,7 @@ class Call(BaseCall[P, Prompt, Toolkit, FormattableT], Generic[P, FormattableT])
|
|
|
63
82
|
self, *args: P.args, **kwargs: P.kwargs
|
|
64
83
|
) -> Response | Response[FormattableT]:
|
|
65
84
|
"""Generates a response using the LLM."""
|
|
66
|
-
|
|
67
|
-
return self.model.call(
|
|
68
|
-
messages=messages, tools=self.toolkit, format=self.format
|
|
69
|
-
)
|
|
85
|
+
return self.prompt.call(self.model, *args, **kwargs)
|
|
70
86
|
|
|
71
87
|
@overload
|
|
72
88
|
def stream(
|
|
@@ -82,18 +98,24 @@ class Call(BaseCall[P, Prompt, Toolkit, FormattableT], Generic[P, FormattableT])
|
|
|
82
98
|
self, *args: P.args, **kwargs: P.kwargs
|
|
83
99
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
84
100
|
"""Generates a streaming response using the LLM."""
|
|
85
|
-
|
|
86
|
-
return self.model.stream(
|
|
87
|
-
messages=messages, tools=self.toolkit, format=self.format
|
|
88
|
-
)
|
|
101
|
+
return self.prompt.stream(self.model, *args, **kwargs)
|
|
89
102
|
|
|
90
103
|
|
|
91
104
|
@dataclass
|
|
92
|
-
class AsyncCall(
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
105
|
+
class AsyncCall(BaseCall, Generic[P, FormattableT]):
|
|
106
|
+
"""An async call that directly generates LLM responses without requiring a model argument.
|
|
107
|
+
|
|
108
|
+
Created by decorating an async `MessageTemplate` with `llm.call`. The decorated async
|
|
109
|
+
function becomes directly callable to generate responses asynchronously, with the `Model` bundled in.
|
|
110
|
+
|
|
111
|
+
An `AsyncCall` is essentially: async `MessageTemplate` + tools + format + `Model`.
|
|
112
|
+
It can be invoked directly: `await call(*args, **kwargs)` (no model argument needed).
|
|
113
|
+
|
|
114
|
+
The model can be overridden at runtime using `with llm.model(...)` context manager.
|
|
115
|
+
"""
|
|
116
|
+
|
|
117
|
+
prompt: AsyncPrompt[P, FormattableT]
|
|
118
|
+
"""The underlying AsyncPrompt instance that generates messages with tools and format."""
|
|
97
119
|
|
|
98
120
|
@overload
|
|
99
121
|
async def __call__(
|
|
@@ -108,7 +130,7 @@ class AsyncCall(
|
|
|
108
130
|
async def __call__(
|
|
109
131
|
self, *args: P.args, **kwargs: P.kwargs
|
|
110
132
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
111
|
-
"""Generates a
|
|
133
|
+
"""Generates a response using the LLM asynchronously."""
|
|
112
134
|
return await self.call(*args, **kwargs)
|
|
113
135
|
|
|
114
136
|
@overload
|
|
@@ -125,10 +147,7 @@ class AsyncCall(
|
|
|
125
147
|
self, *args: P.args, **kwargs: P.kwargs
|
|
126
148
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
127
149
|
"""Generates a response using the LLM asynchronously."""
|
|
128
|
-
|
|
129
|
-
return await self.model.call_async(
|
|
130
|
-
messages=messages, tools=self.toolkit, format=self.format
|
|
131
|
-
)
|
|
150
|
+
return await self.prompt.call(self.model, *args, **kwargs)
|
|
132
151
|
|
|
133
152
|
@overload
|
|
134
153
|
async def stream(
|
|
@@ -144,18 +163,25 @@ class AsyncCall(
|
|
|
144
163
|
self, *args: P.args, **kwargs: P.kwargs
|
|
145
164
|
) -> AsyncStreamResponse[FormattableT] | AsyncStreamResponse:
|
|
146
165
|
"""Generates a streaming response using the LLM asynchronously."""
|
|
147
|
-
|
|
148
|
-
return await self.model.stream_async(
|
|
149
|
-
messages=messages, tools=self.toolkit, format=self.format
|
|
150
|
-
)
|
|
166
|
+
return await self.prompt.stream(self.model, *args, **kwargs)
|
|
151
167
|
|
|
152
168
|
|
|
153
169
|
@dataclass
|
|
154
|
-
class ContextCall(
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
170
|
+
class ContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
|
|
171
|
+
"""A context-aware call that directly generates LLM responses without requiring a model argument.
|
|
172
|
+
|
|
173
|
+
Created by decorating a `ContextMessageTemplate` with `llm.call`. The decorated function
|
|
174
|
+
(with first parameter `'ctx'` of type `Context[DepsT]`) becomes directly callable to generate
|
|
175
|
+
responses with context dependencies, with the `Model` bundled in.
|
|
176
|
+
|
|
177
|
+
A `ContextCall` is essentially: `ContextMessageTemplate` + tools + format + `Model`.
|
|
178
|
+
It can be invoked directly: `call(ctx, *args, **kwargs)` (no model argument needed).
|
|
179
|
+
|
|
180
|
+
The model can be overridden at runtime using `with llm.model(...)` context manager.
|
|
181
|
+
"""
|
|
182
|
+
|
|
183
|
+
prompt: ContextPrompt[P, DepsT, FormattableT]
|
|
184
|
+
"""The underlying ContextPrompt instance that generates messages with tools and format."""
|
|
159
185
|
|
|
160
186
|
@overload
|
|
161
187
|
def __call__(
|
|
@@ -199,10 +225,7 @@ class ContextCall(
|
|
|
199
225
|
self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
|
|
200
226
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
201
227
|
"""Generates a response using the LLM."""
|
|
202
|
-
|
|
203
|
-
return self.model.context_call(
|
|
204
|
-
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
205
|
-
)
|
|
228
|
+
return self.prompt.call(self.model, ctx, *args, **kwargs)
|
|
206
229
|
|
|
207
230
|
@overload
|
|
208
231
|
def stream(
|
|
@@ -226,18 +249,25 @@ class ContextCall(
|
|
|
226
249
|
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
227
250
|
):
|
|
228
251
|
"""Generates a streaming response using the LLM."""
|
|
229
|
-
|
|
230
|
-
return self.model.context_stream(
|
|
231
|
-
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
232
|
-
)
|
|
252
|
+
return self.prompt.stream(self.model, ctx, *args, **kwargs)
|
|
233
253
|
|
|
234
254
|
|
|
235
255
|
@dataclass
|
|
236
|
-
class AsyncContextCall(
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
256
|
+
class AsyncContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
|
|
257
|
+
"""An async context-aware call that directly generates LLM responses without requiring a model argument.
|
|
258
|
+
|
|
259
|
+
Created by decorating an async `ContextMessageTemplate` with `llm.call`. The decorated async
|
|
260
|
+
function (with first parameter `'ctx'` of type `Context[DepsT]`) becomes directly callable to generate
|
|
261
|
+
responses asynchronously with context dependencies, with the `Model` bundled in.
|
|
262
|
+
|
|
263
|
+
An `AsyncContextCall` is essentially: async `ContextMessageTemplate` + tools + format + `Model`.
|
|
264
|
+
It can be invoked directly: `await call(ctx, *args, **kwargs)` (no model argument needed).
|
|
265
|
+
|
|
266
|
+
The model can be overridden at runtime using `with llm.model(...)` context manager.
|
|
267
|
+
"""
|
|
268
|
+
|
|
269
|
+
prompt: AsyncContextPrompt[P, DepsT, FormattableT]
|
|
270
|
+
"""The underlying AsyncContextPrompt instance that generates messages with tools and format."""
|
|
241
271
|
|
|
242
272
|
@overload
|
|
243
273
|
async def __call__(
|
|
@@ -281,10 +311,7 @@ class AsyncContextCall(
|
|
|
281
311
|
self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
|
|
282
312
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
283
313
|
"""Generates a response using the LLM asynchronously."""
|
|
284
|
-
|
|
285
|
-
return await self.model.context_call_async(
|
|
286
|
-
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
287
|
-
)
|
|
314
|
+
return await self.prompt.call(self.model, ctx, *args, **kwargs)
|
|
288
315
|
|
|
289
316
|
@overload
|
|
290
317
|
async def stream(
|
|
@@ -309,7 +336,4 @@ class AsyncContextCall(
|
|
|
309
336
|
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
310
337
|
):
|
|
311
338
|
"""Generates a streaming response using the LLM asynchronously."""
|
|
312
|
-
|
|
313
|
-
return await self.model.context_stream_async(
|
|
314
|
-
ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
|
|
315
|
-
)
|
|
339
|
+
return await self.prompt.stream(self.model, ctx, *args, **kwargs)
|
mirascope/llm/calls/decorator.py
CHANGED
|
@@ -4,29 +4,24 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
from collections.abc import Sequence
|
|
6
6
|
from dataclasses import dataclass
|
|
7
|
-
from typing import Generic,
|
|
7
|
+
from typing import Generic, cast, overload
|
|
8
8
|
from typing_extensions import Unpack
|
|
9
9
|
|
|
10
|
-
from ..clients import (
|
|
11
|
-
AnthropicModelId,
|
|
12
|
-
GoogleModelId,
|
|
13
|
-
ModelId,
|
|
14
|
-
OpenAICompletionsModelId,
|
|
15
|
-
OpenAIResponsesModelId,
|
|
16
|
-
Params,
|
|
17
|
-
Provider,
|
|
18
|
-
)
|
|
19
10
|
from ..context import DepsT
|
|
20
11
|
from ..formatting import Format, FormattableT
|
|
21
12
|
from ..models import Model
|
|
22
13
|
from ..prompts import (
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
14
|
+
AsyncContextMessageTemplate,
|
|
15
|
+
AsyncContextPrompt,
|
|
16
|
+
AsyncMessageTemplate,
|
|
17
|
+
AsyncPrompt,
|
|
18
|
+
ContextMessageTemplate,
|
|
19
|
+
ContextPrompt,
|
|
20
|
+
MessageTemplate,
|
|
21
|
+
Prompt,
|
|
22
|
+
_utils,
|
|
29
23
|
)
|
|
24
|
+
from ..providers import ModelId, Params
|
|
30
25
|
from ..tools import (
|
|
31
26
|
AsyncContextTool,
|
|
32
27
|
AsyncContextToolkit,
|
|
@@ -44,16 +39,32 @@ from .calls import AsyncCall, AsyncContextCall, Call, ContextCall
|
|
|
44
39
|
|
|
45
40
|
@dataclass(kw_only=True)
|
|
46
41
|
class CallDecorator(Generic[ToolT, FormattableT]):
|
|
47
|
-
"""
|
|
42
|
+
"""Decorator for converting a `MessageTemplate` into a `Call`.
|
|
43
|
+
|
|
44
|
+
Takes a raw prompt function that returns message content and wraps it with tools,
|
|
45
|
+
format, and a model to create a `Call` that can be invoked directly without needing
|
|
46
|
+
to pass a model argument.
|
|
47
|
+
|
|
48
|
+
The decorator automatically detects whether the function is async or context-aware
|
|
49
|
+
and creates the appropriate `Call` variant (`Call`, `AsyncCall`, `ContextCall`, or `AsyncContextCall`).
|
|
50
|
+
|
|
51
|
+
Conceptually: `CallDecorator` = `PromptDecorator` + `Model`
|
|
52
|
+
Result: `Call` = `MessageTemplate` + tools + format + `Model`
|
|
53
|
+
"""
|
|
48
54
|
|
|
49
55
|
model: Model
|
|
56
|
+
"""The default model to use with this call. May be overridden."""
|
|
57
|
+
|
|
50
58
|
tools: Sequence[ToolT] | None
|
|
59
|
+
"""The tools that are included in the prompt, if any."""
|
|
60
|
+
|
|
51
61
|
format: type[FormattableT] | Format[FormattableT] | None
|
|
62
|
+
"""The structured output format off the prompt, if any."""
|
|
52
63
|
|
|
53
64
|
@overload
|
|
54
65
|
def __call__(
|
|
55
66
|
self: CallDecorator[AsyncTool | AsyncContextTool[DepsT], FormattableT],
|
|
56
|
-
fn:
|
|
67
|
+
fn: AsyncContextMessageTemplate[P, DepsT],
|
|
57
68
|
) -> AsyncContextCall[P, DepsT, FormattableT]:
|
|
58
69
|
"""Decorate an async context prompt into an AsyncContextCall."""
|
|
59
70
|
...
|
|
@@ -61,31 +72,31 @@ class CallDecorator(Generic[ToolT, FormattableT]):
|
|
|
61
72
|
@overload
|
|
62
73
|
def __call__(
|
|
63
74
|
self: CallDecorator[Tool | ContextTool[DepsT], FormattableT],
|
|
64
|
-
fn:
|
|
75
|
+
fn: ContextMessageTemplate[P, DepsT],
|
|
65
76
|
) -> ContextCall[P, DepsT, FormattableT]:
|
|
66
77
|
"""Decorate a context prompt into a ContextCall."""
|
|
67
78
|
...
|
|
68
79
|
|
|
69
80
|
@overload
|
|
70
81
|
def __call__(
|
|
71
|
-
self: CallDecorator[AsyncTool, FormattableT], fn:
|
|
82
|
+
self: CallDecorator[AsyncTool, FormattableT], fn: AsyncMessageTemplate[P]
|
|
72
83
|
) -> AsyncCall[P, FormattableT]:
|
|
73
84
|
"""Decorate an async prompt into an AsyncCall."""
|
|
74
85
|
...
|
|
75
86
|
|
|
76
87
|
@overload
|
|
77
88
|
def __call__(
|
|
78
|
-
self: CallDecorator[Tool, FormattableT], fn:
|
|
89
|
+
self: CallDecorator[Tool, FormattableT], fn: MessageTemplate[P]
|
|
79
90
|
) -> Call[P, FormattableT]:
|
|
80
91
|
"""Decorate a prompt into a Call."""
|
|
81
92
|
...
|
|
82
93
|
|
|
83
94
|
def __call__(
|
|
84
95
|
self,
|
|
85
|
-
fn:
|
|
86
|
-
|
|
|
87
|
-
|
|
|
88
|
-
|
|
|
96
|
+
fn: ContextMessageTemplate[P, DepsT]
|
|
97
|
+
| AsyncContextMessageTemplate[P, DepsT]
|
|
98
|
+
| MessageTemplate[P]
|
|
99
|
+
| AsyncMessageTemplate[P],
|
|
89
100
|
) -> (
|
|
90
101
|
ContextCall[P, DepsT, FormattableT]
|
|
91
102
|
| AsyncContextCall[P, DepsT, FormattableT]
|
|
@@ -93,123 +104,122 @@ class CallDecorator(Generic[ToolT, FormattableT]):
|
|
|
93
104
|
| AsyncCall[P, FormattableT]
|
|
94
105
|
):
|
|
95
106
|
"""Decorates a prompt into a Call or ContextCall."""
|
|
96
|
-
is_context =
|
|
97
|
-
is_async =
|
|
107
|
+
is_context = _utils.is_context_promptable(fn)
|
|
108
|
+
is_async = _utils.is_async_promptable(fn)
|
|
98
109
|
|
|
99
110
|
if is_context and is_async:
|
|
100
111
|
tools = cast(
|
|
101
112
|
Sequence[AsyncTool | AsyncContextTool[DepsT]] | None, self.tools
|
|
102
113
|
)
|
|
114
|
+
prompt = AsyncContextPrompt(
|
|
115
|
+
fn=fn,
|
|
116
|
+
toolkit=AsyncContextToolkit(tools=tools),
|
|
117
|
+
format=self.format,
|
|
118
|
+
)
|
|
103
119
|
return AsyncContextCall(
|
|
104
|
-
|
|
120
|
+
prompt=prompt,
|
|
105
121
|
default_model=self.model,
|
|
106
|
-
format=self.format,
|
|
107
|
-
toolkit=AsyncContextToolkit(tools=tools),
|
|
108
122
|
)
|
|
109
123
|
elif is_context:
|
|
110
124
|
tools = cast(Sequence[Tool | ContextTool[DepsT]] | None, self.tools)
|
|
125
|
+
prompt = ContextPrompt(
|
|
126
|
+
fn=fn,
|
|
127
|
+
toolkit=ContextToolkit(tools=tools),
|
|
128
|
+
format=self.format,
|
|
129
|
+
)
|
|
111
130
|
return ContextCall(
|
|
112
|
-
|
|
131
|
+
prompt=prompt,
|
|
113
132
|
default_model=self.model,
|
|
114
|
-
format=self.format,
|
|
115
|
-
toolkit=ContextToolkit(tools=tools),
|
|
116
133
|
)
|
|
117
134
|
elif is_async:
|
|
118
135
|
tools = cast(Sequence[AsyncTool] | None, self.tools)
|
|
136
|
+
prompt = AsyncPrompt(
|
|
137
|
+
fn=fn, toolkit=AsyncToolkit(tools=tools), format=self.format
|
|
138
|
+
)
|
|
119
139
|
return AsyncCall(
|
|
120
|
-
|
|
140
|
+
prompt=prompt,
|
|
121
141
|
default_model=self.model,
|
|
122
|
-
format=self.format,
|
|
123
|
-
toolkit=AsyncToolkit(tools=tools),
|
|
124
142
|
)
|
|
125
143
|
else:
|
|
126
144
|
tools = cast(Sequence[Tool] | None, self.tools)
|
|
145
|
+
prompt = Prompt(fn=fn, toolkit=Toolkit(tools=tools), format=self.format)
|
|
127
146
|
return Call(
|
|
128
|
-
|
|
147
|
+
prompt=prompt,
|
|
129
148
|
default_model=self.model,
|
|
130
|
-
format=self.format,
|
|
131
|
-
toolkit=Toolkit(tools=tools),
|
|
132
149
|
)
|
|
133
150
|
|
|
134
151
|
|
|
135
152
|
@overload
|
|
136
153
|
def call(
|
|
154
|
+
model: ModelId,
|
|
137
155
|
*,
|
|
138
|
-
|
|
139
|
-
model_id: AnthropicModelId,
|
|
140
|
-
tools: list[ToolT] | None = None,
|
|
156
|
+
tools: Sequence[ToolT] | None = None,
|
|
141
157
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
142
158
|
**params: Unpack[Params],
|
|
143
159
|
) -> CallDecorator[ToolT, FormattableT]:
|
|
144
|
-
"""
|
|
145
|
-
...
|
|
146
|
-
|
|
160
|
+
"""Decorator for converting prompt functions into LLM calls.
|
|
147
161
|
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
*,
|
|
151
|
-
provider: Literal["google"],
|
|
152
|
-
model_id: GoogleModelId,
|
|
153
|
-
tools: list[ToolT] | None = None,
|
|
154
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
155
|
-
**params: Unpack[Params],
|
|
156
|
-
) -> CallDecorator[ToolT, FormattableT]:
|
|
157
|
-
"""Decorate a prompt into a Call using Google models."""
|
|
162
|
+
This overload accepts a model ID string and allows additional params.
|
|
163
|
+
"""
|
|
158
164
|
...
|
|
159
165
|
|
|
160
166
|
|
|
161
167
|
@overload
|
|
162
168
|
def call(
|
|
169
|
+
model: Model,
|
|
163
170
|
*,
|
|
164
|
-
|
|
165
|
-
model_id: OpenAICompletionsModelId,
|
|
166
|
-
tools: list[ToolT] | None = None,
|
|
171
|
+
tools: Sequence[ToolT] | None = None,
|
|
167
172
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
168
|
-
**params: Unpack[Params],
|
|
169
173
|
) -> CallDecorator[ToolT, FormattableT]:
|
|
170
|
-
"""
|
|
171
|
-
...
|
|
172
|
-
|
|
174
|
+
"""Decorator for converting prompt functions into LLM calls.
|
|
173
175
|
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
*,
|
|
177
|
-
provider: Literal["openai:responses", "openai"],
|
|
178
|
-
model_id: OpenAIResponsesModelId,
|
|
179
|
-
tools: list[ToolT] | None = None,
|
|
180
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
181
|
-
**params: Unpack[Params],
|
|
182
|
-
) -> CallDecorator[ToolT, FormattableT]:
|
|
183
|
-
"""Decorate a prompt into a Call using OpenAI models (Responses API)."""
|
|
184
|
-
...
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
@overload
|
|
188
|
-
def call(
|
|
189
|
-
*,
|
|
190
|
-
provider: Provider,
|
|
191
|
-
model_id: ModelId,
|
|
192
|
-
tools: list[ToolT] | None = None,
|
|
193
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
194
|
-
**params: Unpack[Params],
|
|
195
|
-
) -> CallDecorator[ToolT, FormattableT]:
|
|
196
|
-
"""Decorate a prompt into a Call using a generic provider and model."""
|
|
176
|
+
This overload accepts a Model instance and does not allow additional params.
|
|
177
|
+
"""
|
|
197
178
|
...
|
|
198
179
|
|
|
199
180
|
|
|
200
181
|
def call(
|
|
182
|
+
model: ModelId | Model,
|
|
201
183
|
*,
|
|
202
|
-
|
|
203
|
-
model_id: ModelId,
|
|
204
|
-
tools: list[ToolT] | None = None,
|
|
184
|
+
tools: Sequence[ToolT] | None = None,
|
|
205
185
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
206
186
|
**params: Unpack[Params],
|
|
207
187
|
) -> CallDecorator[ToolT, FormattableT]:
|
|
208
|
-
"""
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
188
|
+
"""Decorates a `MessageTemplate` to create a `Call` that can be invoked directly.
|
|
189
|
+
|
|
190
|
+
The `llm.call` decorator is the most convenient way to use Mirascope. It transforms
|
|
191
|
+
a raw prompt function (that returns message content) into a `Call` object that bundles
|
|
192
|
+
the function with tools, format, and a model. The resulting `Call` can be invoked
|
|
193
|
+
directly to generate LLM responses without needing to pass a model argument.
|
|
194
|
+
|
|
195
|
+
The decorator automatically detects the function type:
|
|
196
|
+
- If the first parameter is named `'ctx'` with type `llm.Context[T]` (or a subclass thereof),
|
|
197
|
+
creates a `ContextCall`
|
|
198
|
+
- If the function is async, creates an `AsyncCall` or `AsyncContextCall`
|
|
199
|
+
- Otherwise, creates a regular `Call`
|
|
200
|
+
|
|
201
|
+
The model specified in the decorator can be overridden at runtime using the
|
|
202
|
+
`llm.model()` context manager. When overridden, the context model completely
|
|
203
|
+
replaces the decorated model, including all parameters.
|
|
204
|
+
|
|
205
|
+
Conceptual flow:
|
|
206
|
+
- `MessageTemplate`: raw function returning content
|
|
207
|
+
- `@llm.prompt`: `MessageTemplate` → `Prompt`
|
|
208
|
+
Includes tools and format, if applicable. Can be called by providing a `Model`.
|
|
209
|
+
- `@llm.call`: `MessageTemplate` → `Call`. Includes a model, tools, and format. The
|
|
210
|
+
model may be created on the fly from a model identifier and optional params, or
|
|
211
|
+
provided outright.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
model: A model ID string (e.g., "openai/gpt-4") or a `Model` instance
|
|
215
|
+
tools: Optional `Sequence` of tools to make available to the LLM
|
|
216
|
+
format: Optional response format class (`BaseModel`) or Format instance
|
|
217
|
+
**params: Additional call parameters (temperature, max_tokens, etc.)
|
|
218
|
+
Only available when passing a model ID string
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
A `CallDecorator` that converts prompt functions into `Call` variants
|
|
222
|
+
(`Call`, `AsyncCall`, `ContextCall`, or `AsyncContextCall`)
|
|
213
223
|
|
|
214
224
|
Example:
|
|
215
225
|
|
|
@@ -217,15 +227,12 @@ def call(
|
|
|
217
227
|
```python
|
|
218
228
|
from mirascope import llm
|
|
219
229
|
|
|
220
|
-
@llm.call(
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
)
|
|
224
|
-
def answer_question(question: str) -> str:
|
|
225
|
-
return f"Answer this question: {question}"
|
|
230
|
+
@llm.call("openai/gpt-4")
|
|
231
|
+
def recommend_book(genre: str):
|
|
232
|
+
return f"Please recommend a book in {genre}."
|
|
226
233
|
|
|
227
|
-
response: llm.Response =
|
|
228
|
-
print(response)
|
|
234
|
+
response: llm.Response = recommend_book("fantasy")
|
|
235
|
+
print(response.pretty())
|
|
229
236
|
```
|
|
230
237
|
|
|
231
238
|
Example:
|
|
@@ -236,20 +243,19 @@ def call(
|
|
|
236
243
|
from mirascope import llm
|
|
237
244
|
|
|
238
245
|
@dataclass
|
|
239
|
-
class
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
response = answer_question(ctx, "What is the capital of France?")
|
|
251
|
-
print(response)
|
|
246
|
+
class User:
|
|
247
|
+
name: str
|
|
248
|
+
age: int
|
|
249
|
+
|
|
250
|
+
@llm.call("openai/gpt-4")
|
|
251
|
+
def recommend_book(ctx: llm.Context[User], genre: str):
|
|
252
|
+
return f"Recommend a {genre} book for {ctx.deps.name}, age {ctx.deps.age}."
|
|
253
|
+
|
|
254
|
+
ctx = llm.Context(deps=User(name="Alice", age=15))
|
|
255
|
+
response = recommend_book(ctx, "fantasy")
|
|
256
|
+
print(response.pretty())
|
|
252
257
|
```
|
|
253
258
|
"""
|
|
254
|
-
|
|
259
|
+
if isinstance(model, str):
|
|
260
|
+
model = Model(model, **params)
|
|
255
261
|
return CallDecorator(model=model, tools=tools, format=format)
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from typing import TypeAlias
|
|
4
4
|
|
|
5
|
+
from ..types import Jsonable
|
|
5
6
|
from .audio import Audio, Base64AudioSource
|
|
6
7
|
from .document import (
|
|
7
8
|
Base64DocumentSource,
|
|
@@ -16,11 +17,11 @@ from .tool_call import ToolCall, ToolCallChunk, ToolCallEndChunk, ToolCallStartC
|
|
|
16
17
|
from .tool_output import ToolOutput
|
|
17
18
|
|
|
18
19
|
ContentPart: TypeAlias = (
|
|
19
|
-
Text | Image | Audio | Document | ToolOutput | ToolCall | Thought
|
|
20
|
+
Text | Image | Audio | Document | ToolOutput[Jsonable] | ToolCall | Thought
|
|
20
21
|
)
|
|
21
22
|
"""Content parts that may be included in a Message."""
|
|
22
23
|
|
|
23
|
-
UserContentPart: TypeAlias = Text | Image | Audio | Document | ToolOutput
|
|
24
|
+
UserContentPart: TypeAlias = Text | Image | Audio | Document | ToolOutput[Jsonable]
|
|
24
25
|
"""Content parts that can be included in a UserMessage."""
|
|
25
26
|
|
|
26
27
|
AssistantContentPart: TypeAlias = Text | ToolCall | Thought
|
mirascope/llm/context/_utils.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
import inspect
|
|
2
|
+
import typing
|
|
2
3
|
from collections.abc import Callable
|
|
3
|
-
from typing import get_origin
|
|
4
|
+
from typing import Any, get_origin
|
|
4
5
|
|
|
5
6
|
from .context import Context
|
|
6
7
|
|
|
7
8
|
|
|
8
|
-
def first_param_is_context(fn: Callable) -> bool:
|
|
9
|
+
def first_param_is_context(fn: Callable[..., Any]) -> bool:
|
|
9
10
|
"""Returns whether the first argument to a function is `ctx: Context`.
|
|
10
11
|
|
|
11
12
|
Also returns true if the first argument is a subclass of `Context`.
|
|
@@ -21,8 +22,20 @@ def first_param_is_context(fn: Callable) -> bool:
|
|
|
21
22
|
else:
|
|
22
23
|
first_param = params[0]
|
|
23
24
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
25
|
+
if first_param.name != "ctx":
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
hints = typing.get_type_hints(fn)
|
|
30
|
+
annotation = hints.get(first_param.name)
|
|
31
|
+
except (NameError, AttributeError, TypeError):
|
|
32
|
+
annotation = first_param.annotation
|
|
33
|
+
|
|
34
|
+
if annotation is None or annotation is inspect.Parameter.empty:
|
|
35
|
+
return False
|
|
36
|
+
|
|
37
|
+
type_is_context = get_origin(annotation) is Context
|
|
38
|
+
subclass_of_context = isinstance(annotation, type) and issubclass(
|
|
39
|
+
annotation, Context
|
|
27
40
|
)
|
|
28
|
-
return
|
|
41
|
+
return type_is_context or subclass_of_context
|