mirascope 2.0.0a6__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/api/_generated/__init__.py +186 -5
- mirascope/api/_generated/annotations/client.py +38 -6
- mirascope/api/_generated/annotations/raw_client.py +366 -47
- mirascope/api/_generated/annotations/types/annotations_create_response.py +19 -6
- mirascope/api/_generated/annotations/types/annotations_get_response.py +19 -6
- mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +22 -7
- mirascope/api/_generated/annotations/types/annotations_update_response.py +19 -6
- mirascope/api/_generated/api_keys/__init__.py +12 -2
- mirascope/api/_generated/api_keys/client.py +107 -6
- mirascope/api/_generated/api_keys/raw_client.py +486 -38
- mirascope/api/_generated/api_keys/types/__init__.py +7 -1
- mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
- mirascope/api/_generated/client.py +36 -0
- mirascope/api/_generated/docs/raw_client.py +71 -9
- mirascope/api/_generated/environment.py +3 -3
- mirascope/api/_generated/environments/__init__.py +6 -0
- mirascope/api/_generated/environments/client.py +158 -9
- mirascope/api/_generated/environments/raw_client.py +620 -52
- mirascope/api/_generated/environments/types/__init__.py +10 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
- mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
- mirascope/api/_generated/{organizations/types/organizations_credits_response.py → environments/types/environments_get_analytics_response_top_models_item.py} +6 -3
- mirascope/api/_generated/errors/__init__.py +6 -0
- mirascope/api/_generated/errors/bad_request_error.py +5 -2
- mirascope/api/_generated/errors/conflict_error.py +5 -2
- mirascope/api/_generated/errors/payment_required_error.py +15 -0
- mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
- mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
- mirascope/api/_generated/functions/__init__.py +10 -0
- mirascope/api/_generated/functions/client.py +222 -8
- mirascope/api/_generated/functions/raw_client.py +975 -134
- mirascope/api/_generated/functions/types/__init__.py +28 -4
- mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
- mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
- mirascope/api/_generated/health/raw_client.py +74 -10
- mirascope/api/_generated/organization_invitations/__init__.py +33 -0
- mirascope/api/_generated/organization_invitations/client.py +546 -0
- mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
- mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
- mirascope/api/_generated/organization_memberships/__init__.py +19 -0
- mirascope/api/_generated/organization_memberships/client.py +302 -0
- mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
- mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
- mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/organizations/__init__.py +26 -2
- mirascope/api/_generated/organizations/client.py +442 -20
- mirascope/api/_generated/organizations/raw_client.py +1763 -164
- mirascope/api/_generated/organizations/types/__init__.py +48 -2
- mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
- mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
- mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
- mirascope/api/_generated/project_memberships/__init__.py +25 -0
- mirascope/api/_generated/project_memberships/client.py +437 -0
- mirascope/api/_generated/project_memberships/raw_client.py +1039 -0
- mirascope/api/_generated/project_memberships/types/__init__.py +29 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
- mirascope/api/_generated/projects/raw_client.py +415 -58
- mirascope/api/_generated/reference.md +2767 -397
- mirascope/api/_generated/tags/__init__.py +19 -0
- mirascope/api/_generated/tags/client.py +504 -0
- mirascope/api/_generated/tags/raw_client.py +1288 -0
- mirascope/api/_generated/tags/types/__init__.py +17 -0
- mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
- mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
- mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
- mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
- mirascope/api/_generated/token_cost/__init__.py +7 -0
- mirascope/api/_generated/token_cost/client.py +160 -0
- mirascope/api/_generated/token_cost/raw_client.py +264 -0
- mirascope/api/_generated/token_cost/types/__init__.py +8 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
- mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
- mirascope/api/_generated/traces/__init__.py +20 -0
- mirascope/api/_generated/traces/client.py +543 -0
- mirascope/api/_generated/traces/raw_client.py +1366 -96
- mirascope/api/_generated/traces/types/__init__.py +28 -0
- mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +6 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
- mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +0 -2
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
- mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
- mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
- mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +10 -1
- mirascope/api/_generated/types/__init__.py +32 -2
- mirascope/api/_generated/types/bad_request_error_body.py +50 -0
- mirascope/api/_generated/types/date.py +3 -0
- mirascope/api/_generated/types/immutable_resource_error.py +22 -0
- mirascope/api/_generated/types/internal_server_error_body.py +3 -3
- mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
- mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
- mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
- mirascope/api/_generated/types/rate_limit_error.py +31 -0
- mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
- mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
- mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
- mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
- mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
- mirascope/api/settings.py +19 -1
- mirascope/llm/__init__.py +53 -10
- mirascope/llm/calls/__init__.py +2 -1
- mirascope/llm/calls/calls.py +3 -1
- mirascope/llm/calls/decorator.py +21 -7
- mirascope/llm/content/tool_output.py +22 -5
- mirascope/llm/exceptions.py +284 -71
- mirascope/llm/formatting/__init__.py +17 -0
- mirascope/llm/formatting/format.py +112 -35
- mirascope/llm/formatting/output_parser.py +178 -0
- mirascope/llm/formatting/partial.py +80 -7
- mirascope/llm/formatting/primitives.py +192 -0
- mirascope/llm/formatting/types.py +20 -8
- mirascope/llm/messages/__init__.py +3 -0
- mirascope/llm/messages/_utils.py +34 -0
- mirascope/llm/models/__init__.py +5 -0
- mirascope/llm/models/models.py +137 -69
- mirascope/llm/{providers/base → models}/params.py +7 -57
- mirascope/llm/models/thinking_config.py +61 -0
- mirascope/llm/prompts/_utils.py +0 -32
- mirascope/llm/prompts/decorator.py +16 -5
- mirascope/llm/prompts/prompts.py +131 -68
- mirascope/llm/providers/__init__.py +1 -4
- mirascope/llm/providers/anthropic/_utils/__init__.py +2 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +18 -9
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +62 -13
- mirascope/llm/providers/anthropic/_utils/decode.py +18 -9
- mirascope/llm/providers/anthropic/_utils/encode.py +26 -7
- mirascope/llm/providers/anthropic/_utils/errors.py +2 -2
- mirascope/llm/providers/anthropic/beta_provider.py +64 -18
- mirascope/llm/providers/anthropic/provider.py +91 -33
- mirascope/llm/providers/base/__init__.py +0 -4
- mirascope/llm/providers/base/_utils.py +55 -6
- mirascope/llm/providers/base/base_provider.py +116 -37
- mirascope/llm/providers/google/_utils/__init__.py +2 -0
- mirascope/llm/providers/google/_utils/decode.py +20 -7
- mirascope/llm/providers/google/_utils/encode.py +26 -7
- mirascope/llm/providers/google/_utils/errors.py +3 -2
- mirascope/llm/providers/google/provider.py +64 -18
- mirascope/llm/providers/mirascope/_utils.py +13 -17
- mirascope/llm/providers/mirascope/provider.py +49 -18
- mirascope/llm/providers/mlx/_utils.py +7 -2
- mirascope/llm/providers/mlx/encoding/base.py +5 -2
- mirascope/llm/providers/mlx/encoding/transformers.py +5 -2
- mirascope/llm/providers/mlx/mlx.py +23 -6
- mirascope/llm/providers/mlx/provider.py +42 -13
- mirascope/llm/providers/openai/_utils/errors.py +2 -2
- mirascope/llm/providers/openai/completions/_utils/encode.py +20 -16
- mirascope/llm/providers/openai/completions/base_provider.py +40 -11
- mirascope/llm/providers/openai/provider.py +40 -10
- mirascope/llm/providers/openai/responses/_utils/__init__.py +2 -0
- mirascope/llm/providers/openai/responses/_utils/decode.py +19 -6
- mirascope/llm/providers/openai/responses/_utils/encode.py +22 -10
- mirascope/llm/providers/openai/responses/provider.py +56 -18
- mirascope/llm/providers/provider_registry.py +93 -19
- mirascope/llm/responses/__init__.py +6 -1
- mirascope/llm/responses/_utils.py +102 -12
- mirascope/llm/responses/base_response.py +5 -2
- mirascope/llm/responses/base_stream_response.py +115 -25
- mirascope/llm/responses/response.py +2 -1
- mirascope/llm/responses/root_response.py +89 -17
- mirascope/llm/responses/stream_response.py +6 -9
- mirascope/llm/tools/decorator.py +9 -4
- mirascope/llm/tools/tool_schema.py +12 -6
- mirascope/llm/tools/toolkit.py +35 -27
- mirascope/llm/tools/tools.py +45 -20
- mirascope/ops/__init__.py +4 -0
- mirascope/ops/_internal/configuration.py +82 -31
- mirascope/ops/_internal/exporters/exporters.py +64 -11
- mirascope/ops/_internal/instrumentation/llm/common.py +530 -0
- mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +1 -1
- mirascope/ops/_internal/instrumentation/llm/llm.py +116 -1242
- mirascope/ops/_internal/instrumentation/llm/model.py +1798 -0
- mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
- mirascope/ops/_internal/instrumentation/llm/serialize.py +300 -0
- mirascope/ops/_internal/protocols.py +83 -1
- mirascope/ops/_internal/traced_calls.py +4 -0
- mirascope/ops/_internal/traced_functions.py +118 -8
- mirascope/ops/_internal/tracing.py +78 -1
- mirascope/ops/_internal/utils.py +52 -4
- {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/METADATA +12 -11
- mirascope-2.0.1.dist-info/RECORD +423 -0
- {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/licenses/LICENSE +1 -1
- mirascope-2.0.0a6.dist-info/RECORD +0 -316
- {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/WHEEL +0 -0
mirascope/llm/models/models.py
CHANGED
|
@@ -9,11 +9,10 @@ from typing import overload
|
|
|
9
9
|
from typing_extensions import Unpack
|
|
10
10
|
|
|
11
11
|
from ..context import Context, DepsT
|
|
12
|
-
from ..formatting import Format, FormattableT
|
|
13
|
-
from ..messages import Message, UserContent
|
|
12
|
+
from ..formatting import Format, FormattableT, OutputParser
|
|
13
|
+
from ..messages import Message, UserContent, promote_to_messages
|
|
14
14
|
from ..providers import (
|
|
15
15
|
ModelId,
|
|
16
|
-
Params,
|
|
17
16
|
Provider,
|
|
18
17
|
ProviderId,
|
|
19
18
|
get_provider_for_model,
|
|
@@ -38,6 +37,7 @@ from ..tools import (
|
|
|
38
37
|
Tool,
|
|
39
38
|
Toolkit,
|
|
40
39
|
)
|
|
40
|
+
from .params import Params
|
|
41
41
|
|
|
42
42
|
MODEL_CONTEXT: ContextVar[Model | None] = ContextVar("MODEL_CONTEXT", default=None)
|
|
43
43
|
|
|
@@ -67,8 +67,7 @@ class Model:
|
|
|
67
67
|
def recommend_book(genre: str) -> llm.Response:
|
|
68
68
|
# Uses context model if available, otherwise creates default
|
|
69
69
|
model = llm.use_model("openai/gpt-5-mini")
|
|
70
|
-
|
|
71
|
-
return model.call(messages=[message])
|
|
70
|
+
return model.call(f"Please recommend a book in {genre}.")
|
|
72
71
|
|
|
73
72
|
# Uses default model
|
|
74
73
|
response = recommend_book("fantasy")
|
|
@@ -86,8 +85,7 @@ class Model:
|
|
|
86
85
|
def recommend_book(genre: str) -> llm.Response:
|
|
87
86
|
# Hardcoded model, cannot be overridden by context
|
|
88
87
|
model = llm.Model("openai/gpt-5-mini")
|
|
89
|
-
|
|
90
|
-
return model.call(messages=[message])
|
|
88
|
+
return model.call(f"Please recommend a book in {genre}.")
|
|
91
89
|
```
|
|
92
90
|
"""
|
|
93
91
|
|
|
@@ -156,8 +154,8 @@ class Model:
|
|
|
156
154
|
@overload
|
|
157
155
|
def call(
|
|
158
156
|
self,
|
|
157
|
+
content: UserContent | Sequence[Message],
|
|
159
158
|
*,
|
|
160
|
-
messages: Sequence[Message],
|
|
161
159
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
162
160
|
format: None = None,
|
|
163
161
|
) -> Response:
|
|
@@ -167,8 +165,8 @@ class Model:
|
|
|
167
165
|
@overload
|
|
168
166
|
def call(
|
|
169
167
|
self,
|
|
168
|
+
content: UserContent | Sequence[Message],
|
|
170
169
|
*,
|
|
171
|
-
messages: Sequence[Message],
|
|
172
170
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
173
171
|
format: type[FormattableT] | Format[FormattableT],
|
|
174
172
|
) -> Response[FormattableT]:
|
|
@@ -178,31 +176,40 @@ class Model:
|
|
|
178
176
|
@overload
|
|
179
177
|
def call(
|
|
180
178
|
self,
|
|
179
|
+
content: UserContent | Sequence[Message],
|
|
181
180
|
*,
|
|
182
|
-
messages: Sequence[Message],
|
|
183
181
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
184
|
-
format: type[FormattableT]
|
|
182
|
+
format: type[FormattableT]
|
|
183
|
+
| Format[FormattableT]
|
|
184
|
+
| OutputParser[FormattableT]
|
|
185
|
+
| None,
|
|
185
186
|
) -> Response | Response[FormattableT]:
|
|
186
187
|
"""Generate an `llm.Response` with an optional response format."""
|
|
187
188
|
...
|
|
188
189
|
|
|
189
190
|
def call(
|
|
190
191
|
self,
|
|
192
|
+
content: UserContent | Sequence[Message],
|
|
191
193
|
*,
|
|
192
|
-
messages: Sequence[Message],
|
|
193
194
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
194
|
-
format: type[FormattableT]
|
|
195
|
+
format: type[FormattableT]
|
|
196
|
+
| Format[FormattableT]
|
|
197
|
+
| OutputParser[FormattableT]
|
|
198
|
+
| None = None,
|
|
195
199
|
) -> Response | Response[FormattableT]:
|
|
196
200
|
"""Generate an `llm.Response` by synchronously calling this model's LLM provider.
|
|
197
201
|
|
|
198
202
|
Args:
|
|
199
|
-
|
|
203
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
204
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
205
|
+
Messages for full control.
|
|
200
206
|
tools: Optional tools that the model may invoke.
|
|
201
207
|
format: Optional response format specifier.
|
|
202
208
|
|
|
203
209
|
Returns:
|
|
204
210
|
An `llm.Response` object containing the LLM-generated content.
|
|
205
211
|
"""
|
|
212
|
+
messages = promote_to_messages(content)
|
|
206
213
|
return self.provider.call(
|
|
207
214
|
model_id=self.model_id,
|
|
208
215
|
messages=messages,
|
|
@@ -214,8 +221,8 @@ class Model:
|
|
|
214
221
|
@overload
|
|
215
222
|
async def call_async(
|
|
216
223
|
self,
|
|
224
|
+
content: UserContent | Sequence[Message],
|
|
217
225
|
*,
|
|
218
|
-
messages: Sequence[Message],
|
|
219
226
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
220
227
|
format: None = None,
|
|
221
228
|
) -> AsyncResponse:
|
|
@@ -225,8 +232,8 @@ class Model:
|
|
|
225
232
|
@overload
|
|
226
233
|
async def call_async(
|
|
227
234
|
self,
|
|
235
|
+
content: UserContent | Sequence[Message],
|
|
228
236
|
*,
|
|
229
|
-
messages: Sequence[Message],
|
|
230
237
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
231
238
|
format: type[FormattableT] | Format[FormattableT],
|
|
232
239
|
) -> AsyncResponse[FormattableT]:
|
|
@@ -236,31 +243,40 @@ class Model:
|
|
|
236
243
|
@overload
|
|
237
244
|
async def call_async(
|
|
238
245
|
self,
|
|
246
|
+
content: UserContent | Sequence[Message],
|
|
239
247
|
*,
|
|
240
|
-
messages: Sequence[Message],
|
|
241
248
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
242
|
-
format: type[FormattableT]
|
|
249
|
+
format: type[FormattableT]
|
|
250
|
+
| Format[FormattableT]
|
|
251
|
+
| OutputParser[FormattableT]
|
|
252
|
+
| None,
|
|
243
253
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
244
254
|
"""Generate an `llm.AsyncResponse` with an optional response format."""
|
|
245
255
|
...
|
|
246
256
|
|
|
247
257
|
async def call_async(
|
|
248
258
|
self,
|
|
259
|
+
content: UserContent | Sequence[Message],
|
|
249
260
|
*,
|
|
250
|
-
messages: Sequence[Message],
|
|
251
261
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
252
|
-
format: type[FormattableT]
|
|
262
|
+
format: type[FormattableT]
|
|
263
|
+
| Format[FormattableT]
|
|
264
|
+
| OutputParser[FormattableT]
|
|
265
|
+
| None = None,
|
|
253
266
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
254
267
|
"""Generate an `llm.AsyncResponse` by asynchronously calling this model's LLM provider.
|
|
255
268
|
|
|
256
269
|
Args:
|
|
257
|
-
|
|
270
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
271
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
272
|
+
Messages for full control.
|
|
258
273
|
tools: Optional tools that the model may invoke.
|
|
259
274
|
format: Optional response format specifier.
|
|
260
275
|
|
|
261
276
|
Returns:
|
|
262
277
|
An `llm.AsyncResponse` object containing the LLM-generated content.
|
|
263
278
|
"""
|
|
279
|
+
messages = promote_to_messages(content)
|
|
264
280
|
return await self.provider.call_async(
|
|
265
281
|
model_id=self.model_id,
|
|
266
282
|
messages=messages,
|
|
@@ -272,8 +288,8 @@ class Model:
|
|
|
272
288
|
@overload
|
|
273
289
|
def stream(
|
|
274
290
|
self,
|
|
291
|
+
content: UserContent | Sequence[Message],
|
|
275
292
|
*,
|
|
276
|
-
messages: Sequence[Message],
|
|
277
293
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
278
294
|
format: None = None,
|
|
279
295
|
) -> StreamResponse:
|
|
@@ -283,8 +299,8 @@ class Model:
|
|
|
283
299
|
@overload
|
|
284
300
|
def stream(
|
|
285
301
|
self,
|
|
302
|
+
content: UserContent | Sequence[Message],
|
|
286
303
|
*,
|
|
287
|
-
messages: Sequence[Message],
|
|
288
304
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
289
305
|
format: type[FormattableT] | Format[FormattableT],
|
|
290
306
|
) -> StreamResponse[FormattableT]:
|
|
@@ -294,31 +310,40 @@ class Model:
|
|
|
294
310
|
@overload
|
|
295
311
|
def stream(
|
|
296
312
|
self,
|
|
313
|
+
content: UserContent | Sequence[Message],
|
|
297
314
|
*,
|
|
298
|
-
messages: Sequence[Message],
|
|
299
315
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
300
|
-
format: type[FormattableT]
|
|
316
|
+
format: type[FormattableT]
|
|
317
|
+
| Format[FormattableT]
|
|
318
|
+
| OutputParser[FormattableT]
|
|
319
|
+
| None,
|
|
301
320
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
302
321
|
"""Stream an `llm.StreamResponse` with an optional response format."""
|
|
303
322
|
...
|
|
304
323
|
|
|
305
324
|
def stream(
|
|
306
325
|
self,
|
|
326
|
+
content: UserContent | Sequence[Message],
|
|
307
327
|
*,
|
|
308
|
-
messages: Sequence[Message],
|
|
309
328
|
tools: Sequence[Tool] | Toolkit | None = None,
|
|
310
|
-
format: type[FormattableT]
|
|
329
|
+
format: type[FormattableT]
|
|
330
|
+
| Format[FormattableT]
|
|
331
|
+
| OutputParser[FormattableT]
|
|
332
|
+
| None = None,
|
|
311
333
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
312
334
|
"""Generate an `llm.StreamResponse` by synchronously streaming from this model's LLM provider.
|
|
313
335
|
|
|
314
336
|
Args:
|
|
315
|
-
|
|
337
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
338
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
339
|
+
Messages for full control.
|
|
316
340
|
tools: Optional tools that the model may invoke.
|
|
317
341
|
format: Optional response format specifier.
|
|
318
342
|
|
|
319
343
|
Returns:
|
|
320
344
|
An `llm.StreamResponse` object for iterating over the LLM-generated content.
|
|
321
345
|
"""
|
|
346
|
+
messages = promote_to_messages(content)
|
|
322
347
|
return self.provider.stream(
|
|
323
348
|
model_id=self.model_id,
|
|
324
349
|
messages=messages,
|
|
@@ -330,8 +355,8 @@ class Model:
|
|
|
330
355
|
@overload
|
|
331
356
|
async def stream_async(
|
|
332
357
|
self,
|
|
358
|
+
content: UserContent | Sequence[Message],
|
|
333
359
|
*,
|
|
334
|
-
messages: Sequence[Message],
|
|
335
360
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
336
361
|
format: None = None,
|
|
337
362
|
) -> AsyncStreamResponse:
|
|
@@ -341,8 +366,8 @@ class Model:
|
|
|
341
366
|
@overload
|
|
342
367
|
async def stream_async(
|
|
343
368
|
self,
|
|
369
|
+
content: UserContent | Sequence[Message],
|
|
344
370
|
*,
|
|
345
|
-
messages: Sequence[Message],
|
|
346
371
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
347
372
|
format: type[FormattableT] | Format[FormattableT],
|
|
348
373
|
) -> AsyncStreamResponse[FormattableT]:
|
|
@@ -352,31 +377,40 @@ class Model:
|
|
|
352
377
|
@overload
|
|
353
378
|
async def stream_async(
|
|
354
379
|
self,
|
|
380
|
+
content: UserContent | Sequence[Message],
|
|
355
381
|
*,
|
|
356
|
-
messages: Sequence[Message],
|
|
357
382
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
358
|
-
format: type[FormattableT]
|
|
383
|
+
format: type[FormattableT]
|
|
384
|
+
| Format[FormattableT]
|
|
385
|
+
| OutputParser[FormattableT]
|
|
386
|
+
| None,
|
|
359
387
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
360
388
|
"""Stream an `llm.AsyncStreamResponse` with an optional response format."""
|
|
361
389
|
...
|
|
362
390
|
|
|
363
391
|
async def stream_async(
|
|
364
392
|
self,
|
|
393
|
+
content: UserContent | Sequence[Message],
|
|
365
394
|
*,
|
|
366
|
-
messages: Sequence[Message],
|
|
367
395
|
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
368
|
-
format: type[FormattableT]
|
|
396
|
+
format: type[FormattableT]
|
|
397
|
+
| Format[FormattableT]
|
|
398
|
+
| OutputParser[FormattableT]
|
|
399
|
+
| None = None,
|
|
369
400
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
370
401
|
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from this model's LLM provider.
|
|
371
402
|
|
|
372
403
|
Args:
|
|
373
|
-
|
|
404
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
405
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
406
|
+
Messages for full control.
|
|
374
407
|
tools: Optional tools that the model may invoke.
|
|
375
408
|
format: Optional response format specifier.
|
|
376
409
|
|
|
377
410
|
Returns:
|
|
378
411
|
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
379
412
|
"""
|
|
413
|
+
messages = promote_to_messages(content)
|
|
380
414
|
return await self.provider.stream_async(
|
|
381
415
|
model_id=self.model_id,
|
|
382
416
|
messages=messages,
|
|
@@ -388,9 +422,9 @@ class Model:
|
|
|
388
422
|
@overload
|
|
389
423
|
def context_call(
|
|
390
424
|
self,
|
|
425
|
+
content: UserContent | Sequence[Message],
|
|
391
426
|
*,
|
|
392
427
|
ctx: Context[DepsT],
|
|
393
|
-
messages: Sequence[Message],
|
|
394
428
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
395
429
|
| ContextToolkit[DepsT]
|
|
396
430
|
| None = None,
|
|
@@ -402,9 +436,9 @@ class Model:
|
|
|
402
436
|
@overload
|
|
403
437
|
def context_call(
|
|
404
438
|
self,
|
|
439
|
+
content: UserContent | Sequence[Message],
|
|
405
440
|
*,
|
|
406
441
|
ctx: Context[DepsT],
|
|
407
|
-
messages: Sequence[Message],
|
|
408
442
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
409
443
|
| ContextToolkit[DepsT]
|
|
410
444
|
| None = None,
|
|
@@ -416,38 +450,47 @@ class Model:
|
|
|
416
450
|
@overload
|
|
417
451
|
def context_call(
|
|
418
452
|
self,
|
|
453
|
+
content: UserContent | Sequence[Message],
|
|
419
454
|
*,
|
|
420
455
|
ctx: Context[DepsT],
|
|
421
|
-
messages: Sequence[Message],
|
|
422
456
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
423
457
|
| ContextToolkit[DepsT]
|
|
424
458
|
| None = None,
|
|
425
|
-
format: type[FormattableT]
|
|
459
|
+
format: type[FormattableT]
|
|
460
|
+
| Format[FormattableT]
|
|
461
|
+
| OutputParser[FormattableT]
|
|
462
|
+
| None,
|
|
426
463
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
427
464
|
"""Generate an `llm.ContextResponse` with an optional response format."""
|
|
428
465
|
...
|
|
429
466
|
|
|
430
467
|
def context_call(
|
|
431
468
|
self,
|
|
469
|
+
content: UserContent | Sequence[Message],
|
|
432
470
|
*,
|
|
433
471
|
ctx: Context[DepsT],
|
|
434
|
-
messages: Sequence[Message],
|
|
435
472
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
436
473
|
| ContextToolkit[DepsT]
|
|
437
474
|
| None = None,
|
|
438
|
-
format: type[FormattableT]
|
|
475
|
+
format: type[FormattableT]
|
|
476
|
+
| Format[FormattableT]
|
|
477
|
+
| OutputParser[FormattableT]
|
|
478
|
+
| None = None,
|
|
439
479
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
440
480
|
"""Generate an `llm.ContextResponse` by synchronously calling this model's LLM provider.
|
|
441
481
|
|
|
442
482
|
Args:
|
|
483
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
484
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
485
|
+
Messages for full control.
|
|
443
486
|
ctx: Context object with dependencies for tools.
|
|
444
|
-
messages: Messages to send to the LLM.
|
|
445
487
|
tools: Optional tools that the model may invoke.
|
|
446
488
|
format: Optional response format specifier.
|
|
447
489
|
|
|
448
490
|
Returns:
|
|
449
491
|
An `llm.ContextResponse` object containing the LLM-generated content.
|
|
450
492
|
"""
|
|
493
|
+
messages = promote_to_messages(content)
|
|
451
494
|
return self.provider.context_call(
|
|
452
495
|
ctx=ctx,
|
|
453
496
|
model_id=self.model_id,
|
|
@@ -460,9 +503,9 @@ class Model:
|
|
|
460
503
|
@overload
|
|
461
504
|
async def context_call_async(
|
|
462
505
|
self,
|
|
506
|
+
content: UserContent | Sequence[Message],
|
|
463
507
|
*,
|
|
464
508
|
ctx: Context[DepsT],
|
|
465
|
-
messages: Sequence[Message],
|
|
466
509
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
467
510
|
| AsyncContextToolkit[DepsT]
|
|
468
511
|
| None = None,
|
|
@@ -474,9 +517,9 @@ class Model:
|
|
|
474
517
|
@overload
|
|
475
518
|
async def context_call_async(
|
|
476
519
|
self,
|
|
520
|
+
content: UserContent | Sequence[Message],
|
|
477
521
|
*,
|
|
478
522
|
ctx: Context[DepsT],
|
|
479
|
-
messages: Sequence[Message],
|
|
480
523
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
481
524
|
| AsyncContextToolkit[DepsT]
|
|
482
525
|
| None = None,
|
|
@@ -488,38 +531,47 @@ class Model:
|
|
|
488
531
|
@overload
|
|
489
532
|
async def context_call_async(
|
|
490
533
|
self,
|
|
534
|
+
content: UserContent | Sequence[Message],
|
|
491
535
|
*,
|
|
492
536
|
ctx: Context[DepsT],
|
|
493
|
-
messages: Sequence[Message],
|
|
494
537
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
495
538
|
| AsyncContextToolkit[DepsT]
|
|
496
539
|
| None = None,
|
|
497
|
-
format: type[FormattableT]
|
|
540
|
+
format: type[FormattableT]
|
|
541
|
+
| Format[FormattableT]
|
|
542
|
+
| OutputParser[FormattableT]
|
|
543
|
+
| None,
|
|
498
544
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
499
545
|
"""Generate an `llm.AsyncContextResponse` with an optional response format."""
|
|
500
546
|
...
|
|
501
547
|
|
|
502
548
|
async def context_call_async(
|
|
503
549
|
self,
|
|
550
|
+
content: UserContent | Sequence[Message],
|
|
504
551
|
*,
|
|
505
552
|
ctx: Context[DepsT],
|
|
506
|
-
messages: Sequence[Message],
|
|
507
553
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
508
554
|
| AsyncContextToolkit[DepsT]
|
|
509
555
|
| None = None,
|
|
510
|
-
format: type[FormattableT]
|
|
556
|
+
format: type[FormattableT]
|
|
557
|
+
| Format[FormattableT]
|
|
558
|
+
| OutputParser[FormattableT]
|
|
559
|
+
| None = None,
|
|
511
560
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
512
561
|
"""Generate an `llm.AsyncContextResponse` by asynchronously calling this model's LLM provider.
|
|
513
562
|
|
|
514
563
|
Args:
|
|
564
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
565
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
566
|
+
Messages for full control.
|
|
515
567
|
ctx: Context object with dependencies for tools.
|
|
516
|
-
messages: Messages to send to the LLM.
|
|
517
568
|
tools: Optional tools that the model may invoke.
|
|
518
569
|
format: Optional response format specifier.
|
|
519
570
|
|
|
520
571
|
Returns:
|
|
521
572
|
An `llm.AsyncContextResponse` object containing the LLM-generated content.
|
|
522
573
|
"""
|
|
574
|
+
messages = promote_to_messages(content)
|
|
523
575
|
return await self.provider.context_call_async(
|
|
524
576
|
ctx=ctx,
|
|
525
577
|
model_id=self.model_id,
|
|
@@ -532,9 +584,9 @@ class Model:
|
|
|
532
584
|
@overload
|
|
533
585
|
def context_stream(
|
|
534
586
|
self,
|
|
587
|
+
content: UserContent | Sequence[Message],
|
|
535
588
|
*,
|
|
536
589
|
ctx: Context[DepsT],
|
|
537
|
-
messages: Sequence[Message],
|
|
538
590
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
539
591
|
| ContextToolkit[DepsT]
|
|
540
592
|
| None = None,
|
|
@@ -546,9 +598,9 @@ class Model:
|
|
|
546
598
|
@overload
|
|
547
599
|
def context_stream(
|
|
548
600
|
self,
|
|
601
|
+
content: UserContent | Sequence[Message],
|
|
549
602
|
*,
|
|
550
603
|
ctx: Context[DepsT],
|
|
551
|
-
messages: Sequence[Message],
|
|
552
604
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
553
605
|
| ContextToolkit[DepsT]
|
|
554
606
|
| None = None,
|
|
@@ -560,13 +612,16 @@ class Model:
|
|
|
560
612
|
@overload
|
|
561
613
|
def context_stream(
|
|
562
614
|
self,
|
|
615
|
+
content: UserContent | Sequence[Message],
|
|
563
616
|
*,
|
|
564
617
|
ctx: Context[DepsT],
|
|
565
|
-
messages: Sequence[Message],
|
|
566
618
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
567
619
|
| ContextToolkit[DepsT]
|
|
568
620
|
| None = None,
|
|
569
|
-
format: type[FormattableT]
|
|
621
|
+
format: type[FormattableT]
|
|
622
|
+
| Format[FormattableT]
|
|
623
|
+
| OutputParser[FormattableT]
|
|
624
|
+
| None,
|
|
570
625
|
) -> (
|
|
571
626
|
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
572
627
|
):
|
|
@@ -575,27 +630,33 @@ class Model:
|
|
|
575
630
|
|
|
576
631
|
def context_stream(
|
|
577
632
|
self,
|
|
633
|
+
content: UserContent | Sequence[Message],
|
|
578
634
|
*,
|
|
579
635
|
ctx: Context[DepsT],
|
|
580
|
-
messages: Sequence[Message],
|
|
581
636
|
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
582
637
|
| ContextToolkit[DepsT]
|
|
583
638
|
| None = None,
|
|
584
|
-
format: type[FormattableT]
|
|
639
|
+
format: type[FormattableT]
|
|
640
|
+
| Format[FormattableT]
|
|
641
|
+
| OutputParser[FormattableT]
|
|
642
|
+
| None = None,
|
|
585
643
|
) -> (
|
|
586
644
|
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
587
645
|
):
|
|
588
646
|
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from this model's LLM provider.
|
|
589
647
|
|
|
590
648
|
Args:
|
|
649
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
650
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
651
|
+
Messages for full control.
|
|
591
652
|
ctx: Context object with dependencies for tools.
|
|
592
|
-
messages: Messages to send to the LLM.
|
|
593
653
|
tools: Optional tools that the model may invoke.
|
|
594
654
|
format: Optional response format specifier.
|
|
595
655
|
|
|
596
656
|
Returns:
|
|
597
657
|
An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
|
|
598
658
|
"""
|
|
659
|
+
messages = promote_to_messages(content)
|
|
599
660
|
return self.provider.context_stream(
|
|
600
661
|
ctx=ctx,
|
|
601
662
|
model_id=self.model_id,
|
|
@@ -608,9 +669,9 @@ class Model:
|
|
|
608
669
|
@overload
|
|
609
670
|
async def context_stream_async(
|
|
610
671
|
self,
|
|
672
|
+
content: UserContent | Sequence[Message],
|
|
611
673
|
*,
|
|
612
674
|
ctx: Context[DepsT],
|
|
613
|
-
messages: Sequence[Message],
|
|
614
675
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
615
676
|
| AsyncContextToolkit[DepsT]
|
|
616
677
|
| None = None,
|
|
@@ -622,9 +683,9 @@ class Model:
|
|
|
622
683
|
@overload
|
|
623
684
|
async def context_stream_async(
|
|
624
685
|
self,
|
|
686
|
+
content: UserContent | Sequence[Message],
|
|
625
687
|
*,
|
|
626
688
|
ctx: Context[DepsT],
|
|
627
|
-
messages: Sequence[Message],
|
|
628
689
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
629
690
|
| AsyncContextToolkit[DepsT]
|
|
630
691
|
| None = None,
|
|
@@ -636,13 +697,16 @@ class Model:
|
|
|
636
697
|
@overload
|
|
637
698
|
async def context_stream_async(
|
|
638
699
|
self,
|
|
700
|
+
content: UserContent | Sequence[Message],
|
|
639
701
|
*,
|
|
640
702
|
ctx: Context[DepsT],
|
|
641
|
-
messages: Sequence[Message],
|
|
642
703
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
643
704
|
| AsyncContextToolkit[DepsT]
|
|
644
705
|
| None = None,
|
|
645
|
-
format: type[FormattableT]
|
|
706
|
+
format: type[FormattableT]
|
|
707
|
+
| Format[FormattableT]
|
|
708
|
+
| OutputParser[FormattableT]
|
|
709
|
+
| None,
|
|
646
710
|
) -> (
|
|
647
711
|
AsyncContextStreamResponse[DepsT, None]
|
|
648
712
|
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
@@ -652,13 +716,16 @@ class Model:
|
|
|
652
716
|
|
|
653
717
|
async def context_stream_async(
|
|
654
718
|
self,
|
|
719
|
+
content: UserContent | Sequence[Message],
|
|
655
720
|
*,
|
|
656
721
|
ctx: Context[DepsT],
|
|
657
|
-
messages: Sequence[Message],
|
|
658
722
|
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
659
723
|
| AsyncContextToolkit[DepsT]
|
|
660
724
|
| None = None,
|
|
661
|
-
format: type[FormattableT]
|
|
725
|
+
format: type[FormattableT]
|
|
726
|
+
| Format[FormattableT]
|
|
727
|
+
| OutputParser[FormattableT]
|
|
728
|
+
| None = None,
|
|
662
729
|
) -> (
|
|
663
730
|
AsyncContextStreamResponse[DepsT, None]
|
|
664
731
|
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
@@ -666,14 +733,17 @@ class Model:
|
|
|
666
733
|
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from this model's LLM provider.
|
|
667
734
|
|
|
668
735
|
Args:
|
|
736
|
+
content: Content to send to the LLM. Can be a string (converted to user
|
|
737
|
+
message), UserContent, a sequence of UserContent, or a sequence of
|
|
738
|
+
Messages for full control.
|
|
669
739
|
ctx: Context object with dependencies for tools.
|
|
670
|
-
messages: Messages to send to the LLM.
|
|
671
740
|
tools: Optional tools that the model may invoke.
|
|
672
741
|
format: Optional response format specifier.
|
|
673
742
|
|
|
674
743
|
Returns:
|
|
675
744
|
An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
676
745
|
"""
|
|
746
|
+
messages = promote_to_messages(content)
|
|
677
747
|
return await self.provider.context_stream_async(
|
|
678
748
|
ctx=ctx,
|
|
679
749
|
model_id=self.model_id,
|
|
@@ -1206,7 +1276,7 @@ def model(
|
|
|
1206
1276
|
```python
|
|
1207
1277
|
m = llm.model("openai/gpt-4o")
|
|
1208
1278
|
# Use directly
|
|
1209
|
-
response = m.call(
|
|
1279
|
+
response = m.call("Hello!")
|
|
1210
1280
|
# Or use as context manager
|
|
1211
1281
|
with m:
|
|
1212
1282
|
response = recommend_book("fantasy")
|
|
@@ -1234,8 +1304,7 @@ def model(
|
|
|
1234
1304
|
|
|
1235
1305
|
def recommend_book(genre: str) -> llm.Response:
|
|
1236
1306
|
model = llm.use_model("openai/gpt-5-mini")
|
|
1237
|
-
|
|
1238
|
-
return model.call(messages=[message])
|
|
1307
|
+
return model.call(f"Please recommend a book in {genre}.")
|
|
1239
1308
|
|
|
1240
1309
|
# Override the default model at runtime
|
|
1241
1310
|
with llm.model("anthropic/claude-sonnet-4-5"):
|
|
@@ -1267,7 +1336,7 @@ def model(
|
|
|
1267
1336
|
m = llm.model("openai/gpt-4o")
|
|
1268
1337
|
|
|
1269
1338
|
# Use it directly
|
|
1270
|
-
response = m.call(
|
|
1339
|
+
response = m.call("Hello!")
|
|
1271
1340
|
|
|
1272
1341
|
# Or use it as a context manager
|
|
1273
1342
|
with m:
|
|
@@ -1332,8 +1401,7 @@ def use_model(
|
|
|
1332
1401
|
|
|
1333
1402
|
def recommend_book(genre: str) -> llm.Response:
|
|
1334
1403
|
model = llm.use_model("openai/gpt-5-mini")
|
|
1335
|
-
|
|
1336
|
-
return model.call(messages=[message])
|
|
1404
|
+
return model.call(f"Please recommend a book in {genre}.")
|
|
1337
1405
|
|
|
1338
1406
|
# Uses the default model (gpt-5-mini)
|
|
1339
1407
|
response = recommend_book("fantasy")
|