mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +2 -2
- mirascope/api/__init__.py +6 -0
- mirascope/api/_generated/README.md +207 -0
- mirascope/api/_generated/__init__.py +141 -0
- mirascope/api/_generated/client.py +163 -0
- mirascope/api/_generated/core/__init__.py +52 -0
- mirascope/api/_generated/core/api_error.py +23 -0
- mirascope/api/_generated/core/client_wrapper.py +58 -0
- mirascope/api/_generated/core/datetime_utils.py +30 -0
- mirascope/api/_generated/core/file.py +70 -0
- mirascope/api/_generated/core/force_multipart.py +16 -0
- mirascope/api/_generated/core/http_client.py +619 -0
- mirascope/api/_generated/core/http_response.py +55 -0
- mirascope/api/_generated/core/jsonable_encoder.py +102 -0
- mirascope/api/_generated/core/pydantic_utilities.py +310 -0
- mirascope/api/_generated/core/query_encoder.py +60 -0
- mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
- mirascope/api/_generated/core/request_options.py +35 -0
- mirascope/api/_generated/core/serialization.py +282 -0
- mirascope/api/_generated/docs/__init__.py +4 -0
- mirascope/api/_generated/docs/client.py +95 -0
- mirascope/api/_generated/docs/raw_client.py +132 -0
- mirascope/api/_generated/environment.py +9 -0
- mirascope/api/_generated/errors/__init__.py +17 -0
- mirascope/api/_generated/errors/bad_request_error.py +15 -0
- mirascope/api/_generated/errors/conflict_error.py +15 -0
- mirascope/api/_generated/errors/forbidden_error.py +15 -0
- mirascope/api/_generated/errors/internal_server_error.py +15 -0
- mirascope/api/_generated/errors/not_found_error.py +15 -0
- mirascope/api/_generated/health/__init__.py +7 -0
- mirascope/api/_generated/health/client.py +96 -0
- mirascope/api/_generated/health/raw_client.py +129 -0
- mirascope/api/_generated/health/types/__init__.py +8 -0
- mirascope/api/_generated/health/types/health_check_response.py +24 -0
- mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
- mirascope/api/_generated/organizations/__init__.py +25 -0
- mirascope/api/_generated/organizations/client.py +380 -0
- mirascope/api/_generated/organizations/raw_client.py +876 -0
- mirascope/api/_generated/organizations/types/__init__.py +23 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
- mirascope/api/_generated/projects/__init__.py +17 -0
- mirascope/api/_generated/projects/client.py +458 -0
- mirascope/api/_generated/projects/raw_client.py +1016 -0
- mirascope/api/_generated/projects/types/__init__.py +15 -0
- mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
- mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
- mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
- mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
- mirascope/api/_generated/reference.md +753 -0
- mirascope/api/_generated/traces/__init__.py +55 -0
- mirascope/api/_generated/traces/client.py +162 -0
- mirascope/api/_generated/traces/raw_client.py +168 -0
- mirascope/api/_generated/traces/types/__init__.py +95 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
- mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
- mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
- mirascope/api/_generated/types/__init__.py +37 -0
- mirascope/api/_generated/types/already_exists_error.py +24 -0
- mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
- mirascope/api/_generated/types/database_error.py +24 -0
- mirascope/api/_generated/types/database_error_tag.py +5 -0
- mirascope/api/_generated/types/http_api_decode_error.py +29 -0
- mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
- mirascope/api/_generated/types/issue.py +40 -0
- mirascope/api/_generated/types/issue_tag.py +17 -0
- mirascope/api/_generated/types/not_found_error_body.py +24 -0
- mirascope/api/_generated/types/not_found_error_tag.py +5 -0
- mirascope/api/_generated/types/permission_denied_error.py +24 -0
- mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
- mirascope/api/_generated/types/property_key.py +7 -0
- mirascope/api/_generated/types/property_key_key.py +27 -0
- mirascope/api/_generated/types/property_key_key_tag.py +5 -0
- mirascope/api/client.py +255 -0
- mirascope/api/settings.py +81 -0
- mirascope/llm/__init__.py +45 -11
- mirascope/llm/calls/calls.py +81 -57
- mirascope/llm/calls/decorator.py +121 -115
- mirascope/llm/content/__init__.py +3 -2
- mirascope/llm/context/_utils.py +19 -6
- mirascope/llm/exceptions.py +30 -16
- mirascope/llm/formatting/_utils.py +9 -5
- mirascope/llm/formatting/format.py +2 -2
- mirascope/llm/formatting/from_call_args.py +2 -2
- mirascope/llm/messages/message.py +13 -5
- mirascope/llm/models/__init__.py +2 -2
- mirascope/llm/models/models.py +189 -81
- mirascope/llm/prompts/__init__.py +13 -12
- mirascope/llm/prompts/_utils.py +27 -24
- mirascope/llm/prompts/decorator.py +133 -204
- mirascope/llm/prompts/prompts.py +424 -0
- mirascope/llm/prompts/protocols.py +25 -59
- mirascope/llm/providers/__init__.py +44 -0
- mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
- mirascope/llm/providers/anthropic/__init__.py +29 -0
- mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
- mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
- mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
- mirascope/llm/providers/anthropic/beta_provider.py +322 -0
- mirascope/llm/providers/anthropic/model_id.py +23 -0
- mirascope/llm/providers/anthropic/model_info.py +87 -0
- mirascope/llm/providers/anthropic/provider.py +416 -0
- mirascope/llm/{clients → providers}/base/__init__.py +3 -3
- mirascope/llm/{clients → providers}/base/_utils.py +25 -8
- mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
- mirascope/llm/providers/google/__init__.py +21 -0
- mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
- mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
- mirascope/llm/providers/google/model_id.py +22 -0
- mirascope/llm/providers/google/model_info.py +62 -0
- mirascope/llm/providers/google/provider.py +442 -0
- mirascope/llm/providers/load_provider.py +54 -0
- mirascope/llm/providers/mlx/__init__.py +24 -0
- mirascope/llm/providers/mlx/_utils.py +129 -0
- mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
- mirascope/llm/providers/mlx/encoding/base.py +69 -0
- mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
- mirascope/llm/providers/mlx/mlx.py +237 -0
- mirascope/llm/providers/mlx/model_id.py +17 -0
- mirascope/llm/providers/mlx/provider.py +415 -0
- mirascope/llm/providers/model_id.py +16 -0
- mirascope/llm/providers/ollama/__init__.py +19 -0
- mirascope/llm/providers/ollama/provider.py +71 -0
- mirascope/llm/providers/openai/__init__.py +6 -0
- mirascope/llm/providers/openai/completions/__init__.py +25 -0
- mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
- mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
- mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
- mirascope/llm/providers/openai/completions/base_provider.py +513 -0
- mirascope/llm/providers/openai/completions/provider.py +22 -0
- mirascope/llm/providers/openai/model_id.py +31 -0
- mirascope/llm/providers/openai/model_info.py +303 -0
- mirascope/llm/providers/openai/provider.py +398 -0
- mirascope/llm/providers/openai/responses/__init__.py +21 -0
- mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
- mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
- mirascope/llm/providers/openai/responses/provider.py +469 -0
- mirascope/llm/providers/provider_id.py +23 -0
- mirascope/llm/providers/provider_registry.py +169 -0
- mirascope/llm/providers/together/__init__.py +19 -0
- mirascope/llm/providers/together/provider.py +40 -0
- mirascope/llm/responses/__init__.py +3 -0
- mirascope/llm/responses/base_response.py +14 -5
- mirascope/llm/responses/base_stream_response.py +35 -6
- mirascope/llm/responses/finish_reason.py +1 -0
- mirascope/llm/responses/response.py +33 -13
- mirascope/llm/responses/root_response.py +12 -13
- mirascope/llm/responses/stream_response.py +35 -23
- mirascope/llm/responses/usage.py +95 -0
- mirascope/llm/tools/__init__.py +9 -2
- mirascope/llm/tools/_utils.py +12 -3
- mirascope/llm/tools/protocols.py +4 -4
- mirascope/llm/tools/tool_schema.py +44 -9
- mirascope/llm/tools/tools.py +10 -9
- mirascope/ops/__init__.py +156 -0
- mirascope/ops/_internal/__init__.py +5 -0
- mirascope/ops/_internal/closure.py +1118 -0
- mirascope/ops/_internal/configuration.py +126 -0
- mirascope/ops/_internal/context.py +76 -0
- mirascope/ops/_internal/exporters/__init__.py +26 -0
- mirascope/ops/_internal/exporters/exporters.py +342 -0
- mirascope/ops/_internal/exporters/processors.py +104 -0
- mirascope/ops/_internal/exporters/types.py +165 -0
- mirascope/ops/_internal/exporters/utils.py +29 -0
- mirascope/ops/_internal/instrumentation/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
- mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
- mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
- mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
- mirascope/ops/_internal/propagation.py +198 -0
- mirascope/ops/_internal/protocols.py +51 -0
- mirascope/ops/_internal/session.py +139 -0
- mirascope/ops/_internal/spans.py +232 -0
- mirascope/ops/_internal/traced_calls.py +371 -0
- mirascope/ops/_internal/traced_functions.py +394 -0
- mirascope/ops/_internal/tracing.py +276 -0
- mirascope/ops/_internal/types.py +13 -0
- mirascope/ops/_internal/utils.py +75 -0
- mirascope/ops/_internal/versioned_calls.py +512 -0
- mirascope/ops/_internal/versioned_functions.py +346 -0
- mirascope/ops/_internal/versioning.py +303 -0
- mirascope/ops/exceptions.py +21 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
- mirascope-2.0.0a4.dist-info/RECORD +247 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
- mirascope/graphs/__init__.py +0 -22
- mirascope/graphs/finite_state_machine.py +0 -625
- mirascope/llm/agents/__init__.py +0 -15
- mirascope/llm/agents/agent.py +0 -97
- mirascope/llm/agents/agent_template.py +0 -45
- mirascope/llm/agents/decorator.py +0 -176
- mirascope/llm/calls/base_call.py +0 -33
- mirascope/llm/clients/__init__.py +0 -34
- mirascope/llm/clients/anthropic/__init__.py +0 -25
- mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
- mirascope/llm/clients/anthropic/clients.py +0 -819
- mirascope/llm/clients/anthropic/model_ids.py +0 -8
- mirascope/llm/clients/google/__init__.py +0 -20
- mirascope/llm/clients/google/clients.py +0 -853
- mirascope/llm/clients/google/model_ids.py +0 -15
- mirascope/llm/clients/openai/__init__.py +0 -25
- mirascope/llm/clients/openai/completions/__init__.py +0 -28
- mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
- mirascope/llm/clients/openai/completions/clients.py +0 -833
- mirascope/llm/clients/openai/completions/model_ids.py +0 -8
- mirascope/llm/clients/openai/responses/__init__.py +0 -26
- mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
- mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
- mirascope/llm/clients/openai/responses/clients.py +0 -832
- mirascope/llm/clients/openai/responses/model_ids.py +0 -8
- mirascope/llm/clients/openai/shared/__init__.py +0 -7
- mirascope/llm/clients/openai/shared/_utils.py +0 -55
- mirascope/llm/clients/providers.py +0 -175
- mirascope-2.0.0a2.dist-info/RECORD +0 -102
- /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
- /mirascope/llm/{clients → providers}/base/params.py +0 -0
- /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
- /mirascope/llm/{clients → providers}/google/message.py +0 -0
- /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
- {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,833 +0,0 @@
|
|
|
1
|
-
"""OpenAI client implementation."""
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
from collections.abc import Sequence
|
|
5
|
-
from contextvars import ContextVar
|
|
6
|
-
from functools import lru_cache
|
|
7
|
-
from typing import overload
|
|
8
|
-
from typing_extensions import Unpack
|
|
9
|
-
|
|
10
|
-
from openai import AsyncOpenAI, OpenAI
|
|
11
|
-
|
|
12
|
-
from ....context import Context, DepsT
|
|
13
|
-
from ....formatting import Format, FormattableT
|
|
14
|
-
from ....messages import Message
|
|
15
|
-
from ....responses import (
|
|
16
|
-
AsyncContextResponse,
|
|
17
|
-
AsyncContextStreamResponse,
|
|
18
|
-
AsyncResponse,
|
|
19
|
-
AsyncStreamResponse,
|
|
20
|
-
ContextResponse,
|
|
21
|
-
ContextStreamResponse,
|
|
22
|
-
Response,
|
|
23
|
-
StreamResponse,
|
|
24
|
-
)
|
|
25
|
-
from ....tools import (
|
|
26
|
-
AsyncContextTool,
|
|
27
|
-
AsyncContextToolkit,
|
|
28
|
-
AsyncTool,
|
|
29
|
-
AsyncToolkit,
|
|
30
|
-
ContextTool,
|
|
31
|
-
ContextToolkit,
|
|
32
|
-
Tool,
|
|
33
|
-
Toolkit,
|
|
34
|
-
)
|
|
35
|
-
from ...base import BaseClient, Params
|
|
36
|
-
from . import _utils
|
|
37
|
-
from .model_ids import OpenAICompletionsModelId
|
|
38
|
-
|
|
39
|
-
OPENAI_COMPLETIONS_CLIENT_CONTEXT: ContextVar["OpenAICompletionsClient | None"] = (
|
|
40
|
-
ContextVar("OPENAI_COMPLETIONS_CLIENT_CONTEXT", default=None)
|
|
41
|
-
)
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
@lru_cache(maxsize=256)
|
|
45
|
-
def _openai_singleton(
|
|
46
|
-
api_key: str | None, base_url: str | None
|
|
47
|
-
) -> "OpenAICompletionsClient":
|
|
48
|
-
"""Return a cached OpenAI client instance for the given parameters."""
|
|
49
|
-
return OpenAICompletionsClient(api_key=api_key, base_url=base_url)
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def client(
|
|
53
|
-
*, api_key: str | None = None, base_url: str | None = None
|
|
54
|
-
) -> "OpenAICompletionsClient":
|
|
55
|
-
"""Create or retrieve an OpenAI client with the given parameters.
|
|
56
|
-
|
|
57
|
-
If a client has already been created with these parameters, it will be
|
|
58
|
-
retrieved from cache and returned.
|
|
59
|
-
|
|
60
|
-
Args:
|
|
61
|
-
api_key: API key for authentication. If None, uses OPENAI_API_KEY env var.
|
|
62
|
-
base_url: Base URL for the API. If None, uses OPENAI_BASE_URL env var.
|
|
63
|
-
|
|
64
|
-
Returns:
|
|
65
|
-
An OpenAI client instance.
|
|
66
|
-
"""
|
|
67
|
-
api_key = api_key or os.getenv("OPENAI_API_KEY")
|
|
68
|
-
base_url = base_url or os.getenv("OPENAI_BASE_URL")
|
|
69
|
-
return _openai_singleton(api_key, base_url)
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
def get_client() -> "OpenAICompletionsClient":
|
|
73
|
-
"""Retrieve the current OpenAI client from context, or a global default.
|
|
74
|
-
|
|
75
|
-
Returns:
|
|
76
|
-
The current OpenAI client from context if available, otherwise
|
|
77
|
-
a global default client based on environment variables.
|
|
78
|
-
"""
|
|
79
|
-
ctx_client = OPENAI_COMPLETIONS_CLIENT_CONTEXT.get()
|
|
80
|
-
return ctx_client or client()
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
class OpenAICompletionsClient(BaseClient[OpenAICompletionsModelId, OpenAI]):
|
|
84
|
-
"""The client for the OpenAI LLM model."""
|
|
85
|
-
|
|
86
|
-
@property
|
|
87
|
-
def _context_var(self) -> ContextVar["OpenAICompletionsClient | None"]:
|
|
88
|
-
return OPENAI_COMPLETIONS_CLIENT_CONTEXT
|
|
89
|
-
|
|
90
|
-
def __init__(
|
|
91
|
-
self, *, api_key: str | None = None, base_url: str | None = None
|
|
92
|
-
) -> None:
|
|
93
|
-
"""Initialize the OpenAI client."""
|
|
94
|
-
self.client = OpenAI(api_key=api_key, base_url=base_url)
|
|
95
|
-
self.async_client = AsyncOpenAI(api_key=api_key, base_url=base_url)
|
|
96
|
-
|
|
97
|
-
@overload
|
|
98
|
-
def call(
|
|
99
|
-
self,
|
|
100
|
-
*,
|
|
101
|
-
model_id: OpenAICompletionsModelId,
|
|
102
|
-
messages: Sequence[Message],
|
|
103
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
104
|
-
format: None = None,
|
|
105
|
-
**params: Unpack[Params],
|
|
106
|
-
) -> Response:
|
|
107
|
-
"""Generate an `llm.Response` without a response format."""
|
|
108
|
-
...
|
|
109
|
-
|
|
110
|
-
@overload
|
|
111
|
-
def call(
|
|
112
|
-
self,
|
|
113
|
-
*,
|
|
114
|
-
model_id: OpenAICompletionsModelId,
|
|
115
|
-
messages: Sequence[Message],
|
|
116
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
117
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
118
|
-
**params: Unpack[Params],
|
|
119
|
-
) -> Response[FormattableT]:
|
|
120
|
-
"""Generate an `llm.Response` with a response format."""
|
|
121
|
-
...
|
|
122
|
-
|
|
123
|
-
@overload
|
|
124
|
-
def call(
|
|
125
|
-
self,
|
|
126
|
-
*,
|
|
127
|
-
model_id: OpenAICompletionsModelId,
|
|
128
|
-
messages: Sequence[Message],
|
|
129
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
130
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
131
|
-
**params: Unpack[Params],
|
|
132
|
-
) -> Response | Response[FormattableT]:
|
|
133
|
-
"""Generate an `llm.Response` with an optional response format."""
|
|
134
|
-
...
|
|
135
|
-
|
|
136
|
-
def call(
|
|
137
|
-
self,
|
|
138
|
-
*,
|
|
139
|
-
model_id: OpenAICompletionsModelId,
|
|
140
|
-
messages: Sequence[Message],
|
|
141
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
142
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
143
|
-
**params: Unpack[Params],
|
|
144
|
-
) -> Response | Response[FormattableT]:
|
|
145
|
-
"""Generate an `llm.Response` by synchronously calling the OpenAI ChatCompletions API.
|
|
146
|
-
|
|
147
|
-
Args:
|
|
148
|
-
model_id: Model identifier to use.
|
|
149
|
-
messages: Messages to send to the LLM.
|
|
150
|
-
tools: Optional tools that the model may invoke.
|
|
151
|
-
format: Optional response format specifier.
|
|
152
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
153
|
-
|
|
154
|
-
Returns:
|
|
155
|
-
An `llm.Response` object containing the LLM-generated content.
|
|
156
|
-
"""
|
|
157
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
158
|
-
model_id=model_id,
|
|
159
|
-
messages=messages,
|
|
160
|
-
tools=tools,
|
|
161
|
-
format=format,
|
|
162
|
-
params=params,
|
|
163
|
-
)
|
|
164
|
-
|
|
165
|
-
openai_response = self.client.chat.completions.create(**kwargs)
|
|
166
|
-
|
|
167
|
-
assistant_message, finish_reason = _utils.decode_response(
|
|
168
|
-
openai_response, model_id
|
|
169
|
-
)
|
|
170
|
-
|
|
171
|
-
return Response(
|
|
172
|
-
raw=openai_response,
|
|
173
|
-
provider="openai:completions",
|
|
174
|
-
model_id=model_id,
|
|
175
|
-
params=params,
|
|
176
|
-
tools=tools,
|
|
177
|
-
input_messages=input_messages,
|
|
178
|
-
assistant_message=assistant_message,
|
|
179
|
-
finish_reason=finish_reason,
|
|
180
|
-
format=format,
|
|
181
|
-
)
|
|
182
|
-
|
|
183
|
-
@overload
|
|
184
|
-
def context_call(
|
|
185
|
-
self,
|
|
186
|
-
*,
|
|
187
|
-
ctx: Context[DepsT],
|
|
188
|
-
model_id: OpenAICompletionsModelId,
|
|
189
|
-
messages: Sequence[Message],
|
|
190
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
191
|
-
| ContextToolkit[DepsT]
|
|
192
|
-
| None = None,
|
|
193
|
-
format: None = None,
|
|
194
|
-
**params: Unpack[Params],
|
|
195
|
-
) -> ContextResponse[DepsT, None]:
|
|
196
|
-
"""Generate an `llm.ContextResponse` without a response format."""
|
|
197
|
-
...
|
|
198
|
-
|
|
199
|
-
@overload
|
|
200
|
-
def context_call(
|
|
201
|
-
self,
|
|
202
|
-
*,
|
|
203
|
-
ctx: Context[DepsT],
|
|
204
|
-
model_id: OpenAICompletionsModelId,
|
|
205
|
-
messages: Sequence[Message],
|
|
206
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
207
|
-
| ContextToolkit[DepsT]
|
|
208
|
-
| None = None,
|
|
209
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
210
|
-
**params: Unpack[Params],
|
|
211
|
-
) -> ContextResponse[DepsT, FormattableT]:
|
|
212
|
-
"""Generate an `llm.ContextResponse` with a response format."""
|
|
213
|
-
...
|
|
214
|
-
|
|
215
|
-
@overload
|
|
216
|
-
def context_call(
|
|
217
|
-
self,
|
|
218
|
-
*,
|
|
219
|
-
ctx: Context[DepsT],
|
|
220
|
-
model_id: OpenAICompletionsModelId,
|
|
221
|
-
messages: Sequence[Message],
|
|
222
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
223
|
-
| ContextToolkit[DepsT]
|
|
224
|
-
| None = None,
|
|
225
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
226
|
-
**params: Unpack[Params],
|
|
227
|
-
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
228
|
-
"""Generate an `llm.ContextResponse` with an optional response format."""
|
|
229
|
-
...
|
|
230
|
-
|
|
231
|
-
def context_call(
|
|
232
|
-
self,
|
|
233
|
-
*,
|
|
234
|
-
ctx: Context[DepsT],
|
|
235
|
-
model_id: OpenAICompletionsModelId,
|
|
236
|
-
messages: Sequence[Message],
|
|
237
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
238
|
-
| ContextToolkit[DepsT]
|
|
239
|
-
| None = None,
|
|
240
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
241
|
-
**params: Unpack[Params],
|
|
242
|
-
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
243
|
-
"""Generate an `llm.ContextResponse` by synchronously calling the OpenAI ChatCompletions API.
|
|
244
|
-
|
|
245
|
-
Args:
|
|
246
|
-
ctx: Context object with dependencies for tools.
|
|
247
|
-
model_id: Model identifier to use.
|
|
248
|
-
messages: Messages to send to the LLM.
|
|
249
|
-
tools: Optional tools that the model may invoke.
|
|
250
|
-
format: Optional response format specifier.
|
|
251
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
252
|
-
|
|
253
|
-
Returns:
|
|
254
|
-
An `llm.ContextResponse` object containing the LLM-generated content.
|
|
255
|
-
"""
|
|
256
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
257
|
-
model_id=model_id,
|
|
258
|
-
messages=messages,
|
|
259
|
-
tools=tools,
|
|
260
|
-
format=format,
|
|
261
|
-
params=params,
|
|
262
|
-
)
|
|
263
|
-
|
|
264
|
-
openai_response = self.client.chat.completions.create(**kwargs)
|
|
265
|
-
|
|
266
|
-
assistant_message, finish_reason = _utils.decode_response(
|
|
267
|
-
openai_response, model_id
|
|
268
|
-
)
|
|
269
|
-
|
|
270
|
-
return ContextResponse(
|
|
271
|
-
raw=openai_response,
|
|
272
|
-
provider="openai:completions",
|
|
273
|
-
model_id=model_id,
|
|
274
|
-
params=params,
|
|
275
|
-
tools=tools,
|
|
276
|
-
input_messages=input_messages,
|
|
277
|
-
assistant_message=assistant_message,
|
|
278
|
-
finish_reason=finish_reason,
|
|
279
|
-
format=format,
|
|
280
|
-
)
|
|
281
|
-
|
|
282
|
-
@overload
|
|
283
|
-
async def call_async(
|
|
284
|
-
self,
|
|
285
|
-
*,
|
|
286
|
-
model_id: OpenAICompletionsModelId,
|
|
287
|
-
messages: Sequence[Message],
|
|
288
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
289
|
-
format: None = None,
|
|
290
|
-
**params: Unpack[Params],
|
|
291
|
-
) -> AsyncResponse:
|
|
292
|
-
"""Generate an `llm.AsyncResponse` without a response format."""
|
|
293
|
-
...
|
|
294
|
-
|
|
295
|
-
@overload
|
|
296
|
-
async def call_async(
|
|
297
|
-
self,
|
|
298
|
-
*,
|
|
299
|
-
model_id: OpenAICompletionsModelId,
|
|
300
|
-
messages: Sequence[Message],
|
|
301
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
302
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
303
|
-
**params: Unpack[Params],
|
|
304
|
-
) -> AsyncResponse[FormattableT]:
|
|
305
|
-
"""Generate an `llm.AsyncResponse` with a response format."""
|
|
306
|
-
...
|
|
307
|
-
|
|
308
|
-
@overload
|
|
309
|
-
async def call_async(
|
|
310
|
-
self,
|
|
311
|
-
*,
|
|
312
|
-
model_id: OpenAICompletionsModelId,
|
|
313
|
-
messages: Sequence[Message],
|
|
314
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
315
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
316
|
-
**params: Unpack[Params],
|
|
317
|
-
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
318
|
-
"""Generate an `llm.AsyncResponse` with an optional response format."""
|
|
319
|
-
...
|
|
320
|
-
|
|
321
|
-
async def call_async(
|
|
322
|
-
self,
|
|
323
|
-
*,
|
|
324
|
-
model_id: OpenAICompletionsModelId,
|
|
325
|
-
messages: Sequence[Message],
|
|
326
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
327
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
328
|
-
**params: Unpack[Params],
|
|
329
|
-
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
330
|
-
"""Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI ChatCompletions API.
|
|
331
|
-
|
|
332
|
-
Args:
|
|
333
|
-
model_id: Model identifier to use.
|
|
334
|
-
messages: Messages to send to the LLM.
|
|
335
|
-
tools: Optional tools that the model may invoke.
|
|
336
|
-
format: Optional response format specifier.
|
|
337
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
338
|
-
|
|
339
|
-
Returns:
|
|
340
|
-
An `llm.AsyncResponse` object containing the LLM-generated content.
|
|
341
|
-
"""
|
|
342
|
-
|
|
343
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
344
|
-
model_id=model_id,
|
|
345
|
-
params=params,
|
|
346
|
-
messages=messages,
|
|
347
|
-
tools=tools,
|
|
348
|
-
format=format,
|
|
349
|
-
)
|
|
350
|
-
|
|
351
|
-
openai_response = await self.async_client.chat.completions.create(**kwargs)
|
|
352
|
-
|
|
353
|
-
assistant_message, finish_reason = _utils.decode_response(
|
|
354
|
-
openai_response, model_id
|
|
355
|
-
)
|
|
356
|
-
|
|
357
|
-
return AsyncResponse(
|
|
358
|
-
raw=openai_response,
|
|
359
|
-
provider="openai:completions",
|
|
360
|
-
model_id=model_id,
|
|
361
|
-
params=params,
|
|
362
|
-
tools=tools,
|
|
363
|
-
input_messages=input_messages,
|
|
364
|
-
assistant_message=assistant_message,
|
|
365
|
-
finish_reason=finish_reason,
|
|
366
|
-
format=format,
|
|
367
|
-
)
|
|
368
|
-
|
|
369
|
-
@overload
|
|
370
|
-
async def context_call_async(
|
|
371
|
-
self,
|
|
372
|
-
*,
|
|
373
|
-
ctx: Context[DepsT],
|
|
374
|
-
model_id: OpenAICompletionsModelId,
|
|
375
|
-
messages: Sequence[Message],
|
|
376
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
377
|
-
| AsyncContextToolkit[DepsT]
|
|
378
|
-
| None = None,
|
|
379
|
-
format: None = None,
|
|
380
|
-
**params: Unpack[Params],
|
|
381
|
-
) -> AsyncContextResponse[DepsT, None]:
|
|
382
|
-
"""Generate an `llm.AsyncContextResponse` without a response format."""
|
|
383
|
-
...
|
|
384
|
-
|
|
385
|
-
@overload
|
|
386
|
-
async def context_call_async(
|
|
387
|
-
self,
|
|
388
|
-
*,
|
|
389
|
-
ctx: Context[DepsT],
|
|
390
|
-
model_id: OpenAICompletionsModelId,
|
|
391
|
-
messages: Sequence[Message],
|
|
392
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
393
|
-
| AsyncContextToolkit[DepsT]
|
|
394
|
-
| None = None,
|
|
395
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
396
|
-
**params: Unpack[Params],
|
|
397
|
-
) -> AsyncContextResponse[DepsT, FormattableT]:
|
|
398
|
-
"""Generate an `llm.AsyncContextResponse` with a response format."""
|
|
399
|
-
...
|
|
400
|
-
|
|
401
|
-
@overload
|
|
402
|
-
async def context_call_async(
|
|
403
|
-
self,
|
|
404
|
-
*,
|
|
405
|
-
ctx: Context[DepsT],
|
|
406
|
-
model_id: OpenAICompletionsModelId,
|
|
407
|
-
messages: Sequence[Message],
|
|
408
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
409
|
-
| AsyncContextToolkit[DepsT]
|
|
410
|
-
| None = None,
|
|
411
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
412
|
-
**params: Unpack[Params],
|
|
413
|
-
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
414
|
-
"""Generate an `llm.AsyncContextResponse` with an optional response format."""
|
|
415
|
-
...
|
|
416
|
-
|
|
417
|
-
async def context_call_async(
|
|
418
|
-
self,
|
|
419
|
-
*,
|
|
420
|
-
ctx: Context[DepsT],
|
|
421
|
-
model_id: OpenAICompletionsModelId,
|
|
422
|
-
messages: Sequence[Message],
|
|
423
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
424
|
-
| AsyncContextToolkit[DepsT]
|
|
425
|
-
| None = None,
|
|
426
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
427
|
-
**params: Unpack[Params],
|
|
428
|
-
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
429
|
-
"""Generate an `llm.AsyncContextResponse` by asynchronously calling the OpenAI ChatCompletions API.
|
|
430
|
-
|
|
431
|
-
Args:
|
|
432
|
-
ctx: Context object with dependencies for tools.
|
|
433
|
-
model_id: Model identifier to use.
|
|
434
|
-
messages: Messages to send to the LLM.
|
|
435
|
-
tools: Optional tools that the model may invoke.
|
|
436
|
-
format: Optional response format specifier.
|
|
437
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
438
|
-
|
|
439
|
-
Returns:
|
|
440
|
-
An `llm.AsyncContextResponse` object containing the LLM-generated content.
|
|
441
|
-
"""
|
|
442
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
443
|
-
model_id=model_id,
|
|
444
|
-
params=params,
|
|
445
|
-
messages=messages,
|
|
446
|
-
tools=tools,
|
|
447
|
-
format=format,
|
|
448
|
-
)
|
|
449
|
-
|
|
450
|
-
openai_response = await self.async_client.chat.completions.create(**kwargs)
|
|
451
|
-
|
|
452
|
-
assistant_message, finish_reason = _utils.decode_response(
|
|
453
|
-
openai_response, model_id
|
|
454
|
-
)
|
|
455
|
-
|
|
456
|
-
return AsyncContextResponse(
|
|
457
|
-
raw=openai_response,
|
|
458
|
-
provider="openai:completions",
|
|
459
|
-
model_id=model_id,
|
|
460
|
-
params=params,
|
|
461
|
-
tools=tools,
|
|
462
|
-
input_messages=input_messages,
|
|
463
|
-
assistant_message=assistant_message,
|
|
464
|
-
finish_reason=finish_reason,
|
|
465
|
-
format=format,
|
|
466
|
-
)
|
|
467
|
-
|
|
468
|
-
@overload
|
|
469
|
-
def stream(
|
|
470
|
-
self,
|
|
471
|
-
*,
|
|
472
|
-
model_id: OpenAICompletionsModelId,
|
|
473
|
-
messages: Sequence[Message],
|
|
474
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
475
|
-
format: None = None,
|
|
476
|
-
**params: Unpack[Params],
|
|
477
|
-
) -> StreamResponse:
|
|
478
|
-
"""Stream an `llm.StreamResponse` without a response format."""
|
|
479
|
-
...
|
|
480
|
-
|
|
481
|
-
@overload
|
|
482
|
-
def stream(
|
|
483
|
-
self,
|
|
484
|
-
*,
|
|
485
|
-
model_id: OpenAICompletionsModelId,
|
|
486
|
-
messages: Sequence[Message],
|
|
487
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
488
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
489
|
-
**params: Unpack[Params],
|
|
490
|
-
) -> StreamResponse[FormattableT]:
|
|
491
|
-
"""Stream an `llm.StreamResponse` with a response format."""
|
|
492
|
-
...
|
|
493
|
-
|
|
494
|
-
@overload
|
|
495
|
-
def stream(
|
|
496
|
-
self,
|
|
497
|
-
*,
|
|
498
|
-
model_id: OpenAICompletionsModelId,
|
|
499
|
-
messages: Sequence[Message],
|
|
500
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
501
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
502
|
-
**params: Unpack[Params],
|
|
503
|
-
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
504
|
-
"""Stream an `llm.StreamResponse` with an optional response format."""
|
|
505
|
-
...
|
|
506
|
-
|
|
507
|
-
def stream(
|
|
508
|
-
self,
|
|
509
|
-
*,
|
|
510
|
-
model_id: OpenAICompletionsModelId,
|
|
511
|
-
messages: Sequence[Message],
|
|
512
|
-
tools: Sequence[Tool] | Toolkit | None = None,
|
|
513
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
514
|
-
**params: Unpack[Params],
|
|
515
|
-
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
516
|
-
"""Generate an `llm.StreamResponse` by synchronously streaming from the OpenAI ChatCompletions API.
|
|
517
|
-
|
|
518
|
-
Args:
|
|
519
|
-
model_id: Model identifier to use.
|
|
520
|
-
messages: Messages to send to the LLM.
|
|
521
|
-
tools: Optional tools that the model may invoke.
|
|
522
|
-
format: Optional response format specifier.
|
|
523
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
524
|
-
|
|
525
|
-
Returns:
|
|
526
|
-
An `llm.StreamResponse` object for iterating over the LLM-generated content.
|
|
527
|
-
"""
|
|
528
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
529
|
-
model_id=model_id,
|
|
530
|
-
messages=messages,
|
|
531
|
-
tools=tools,
|
|
532
|
-
format=format,
|
|
533
|
-
params=params,
|
|
534
|
-
)
|
|
535
|
-
|
|
536
|
-
openai_stream = self.client.chat.completions.create(
|
|
537
|
-
**kwargs,
|
|
538
|
-
stream=True,
|
|
539
|
-
)
|
|
540
|
-
|
|
541
|
-
chunk_iterator = _utils.decode_stream(openai_stream)
|
|
542
|
-
|
|
543
|
-
return StreamResponse(
|
|
544
|
-
provider="openai:completions",
|
|
545
|
-
model_id=model_id,
|
|
546
|
-
params=params,
|
|
547
|
-
tools=tools,
|
|
548
|
-
input_messages=input_messages,
|
|
549
|
-
chunk_iterator=chunk_iterator,
|
|
550
|
-
format=format,
|
|
551
|
-
)
|
|
552
|
-
|
|
553
|
-
@overload
|
|
554
|
-
def context_stream(
|
|
555
|
-
self,
|
|
556
|
-
*,
|
|
557
|
-
ctx: Context[DepsT],
|
|
558
|
-
model_id: OpenAICompletionsModelId,
|
|
559
|
-
messages: Sequence[Message],
|
|
560
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
561
|
-
| ContextToolkit[DepsT]
|
|
562
|
-
| None = None,
|
|
563
|
-
format: None = None,
|
|
564
|
-
**params: Unpack[Params],
|
|
565
|
-
) -> ContextStreamResponse[DepsT]:
|
|
566
|
-
"""Stream an `llm.ContextStreamResponse` without a response format."""
|
|
567
|
-
...
|
|
568
|
-
|
|
569
|
-
@overload
|
|
570
|
-
def context_stream(
|
|
571
|
-
self,
|
|
572
|
-
*,
|
|
573
|
-
ctx: Context[DepsT],
|
|
574
|
-
model_id: OpenAICompletionsModelId,
|
|
575
|
-
messages: Sequence[Message],
|
|
576
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
577
|
-
| ContextToolkit[DepsT]
|
|
578
|
-
| None = None,
|
|
579
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
580
|
-
**params: Unpack[Params],
|
|
581
|
-
) -> ContextStreamResponse[DepsT, FormattableT]:
|
|
582
|
-
"""Stream an `llm.ContextStreamResponse` with a response format."""
|
|
583
|
-
...
|
|
584
|
-
|
|
585
|
-
@overload
|
|
586
|
-
def context_stream(
|
|
587
|
-
self,
|
|
588
|
-
*,
|
|
589
|
-
ctx: Context[DepsT],
|
|
590
|
-
model_id: OpenAICompletionsModelId,
|
|
591
|
-
messages: Sequence[Message],
|
|
592
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
593
|
-
| ContextToolkit[DepsT]
|
|
594
|
-
| None = None,
|
|
595
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
596
|
-
**params: Unpack[Params],
|
|
597
|
-
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
598
|
-
"""Stream an `llm.ContextStreamResponse` with an optional response format."""
|
|
599
|
-
...
|
|
600
|
-
|
|
601
|
-
def context_stream(
|
|
602
|
-
self,
|
|
603
|
-
*,
|
|
604
|
-
ctx: Context[DepsT],
|
|
605
|
-
model_id: OpenAICompletionsModelId,
|
|
606
|
-
messages: Sequence[Message],
|
|
607
|
-
tools: Sequence[Tool | ContextTool[DepsT]]
|
|
608
|
-
| ContextToolkit[DepsT]
|
|
609
|
-
| None = None,
|
|
610
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
611
|
-
**params: Unpack[Params],
|
|
612
|
-
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
613
|
-
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from the OpenAI ChatCompletions API.
|
|
614
|
-
|
|
615
|
-
Args:
|
|
616
|
-
ctx: Context object with dependencies for tools.
|
|
617
|
-
model_id: Model identifier to use.
|
|
618
|
-
messages: Messages to send to the LLM.
|
|
619
|
-
tools: Optional tools that the model may invoke.
|
|
620
|
-
format: Optional response format specifier.
|
|
621
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
622
|
-
|
|
623
|
-
Returns:
|
|
624
|
-
An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
|
|
625
|
-
"""
|
|
626
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
627
|
-
model_id=model_id,
|
|
628
|
-
messages=messages,
|
|
629
|
-
tools=tools,
|
|
630
|
-
format=format,
|
|
631
|
-
params=params,
|
|
632
|
-
)
|
|
633
|
-
|
|
634
|
-
openai_stream = self.client.chat.completions.create(
|
|
635
|
-
**kwargs,
|
|
636
|
-
stream=True,
|
|
637
|
-
)
|
|
638
|
-
|
|
639
|
-
chunk_iterator = _utils.decode_stream(openai_stream)
|
|
640
|
-
|
|
641
|
-
return ContextStreamResponse(
|
|
642
|
-
provider="openai:completions",
|
|
643
|
-
model_id=model_id,
|
|
644
|
-
params=params,
|
|
645
|
-
tools=tools,
|
|
646
|
-
input_messages=input_messages,
|
|
647
|
-
chunk_iterator=chunk_iterator,
|
|
648
|
-
format=format,
|
|
649
|
-
)
|
|
650
|
-
|
|
651
|
-
@overload
|
|
652
|
-
async def stream_async(
|
|
653
|
-
self,
|
|
654
|
-
*,
|
|
655
|
-
model_id: OpenAICompletionsModelId,
|
|
656
|
-
messages: Sequence[Message],
|
|
657
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
658
|
-
format: None = None,
|
|
659
|
-
**params: Unpack[Params],
|
|
660
|
-
) -> AsyncStreamResponse:
|
|
661
|
-
"""Stream an `llm.AsyncStreamResponse` without a response format."""
|
|
662
|
-
...
|
|
663
|
-
|
|
664
|
-
@overload
|
|
665
|
-
async def stream_async(
|
|
666
|
-
self,
|
|
667
|
-
*,
|
|
668
|
-
model_id: OpenAICompletionsModelId,
|
|
669
|
-
messages: Sequence[Message],
|
|
670
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
671
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
672
|
-
**params: Unpack[Params],
|
|
673
|
-
) -> AsyncStreamResponse[FormattableT]:
|
|
674
|
-
"""Stream an `llm.AsyncStreamResponse` with a response format."""
|
|
675
|
-
...
|
|
676
|
-
|
|
677
|
-
@overload
|
|
678
|
-
async def stream_async(
|
|
679
|
-
self,
|
|
680
|
-
*,
|
|
681
|
-
model_id: OpenAICompletionsModelId,
|
|
682
|
-
messages: Sequence[Message],
|
|
683
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
684
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
685
|
-
**params: Unpack[Params],
|
|
686
|
-
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
687
|
-
"""Stream an `llm.AsyncStreamResponse` with an optional response format."""
|
|
688
|
-
...
|
|
689
|
-
|
|
690
|
-
async def stream_async(
|
|
691
|
-
self,
|
|
692
|
-
*,
|
|
693
|
-
model_id: OpenAICompletionsModelId,
|
|
694
|
-
messages: Sequence[Message],
|
|
695
|
-
tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
|
|
696
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
697
|
-
**params: Unpack[Params],
|
|
698
|
-
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
699
|
-
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI ChatCompletions API.
|
|
700
|
-
|
|
701
|
-
Args:
|
|
702
|
-
model_id: Model identifier to use.
|
|
703
|
-
messages: Messages to send to the LLM.
|
|
704
|
-
tools: Optional tools that the model may invoke.
|
|
705
|
-
format: Optional response format specifier.
|
|
706
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
707
|
-
|
|
708
|
-
Returns:
|
|
709
|
-
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
710
|
-
"""
|
|
711
|
-
|
|
712
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
713
|
-
model_id=model_id,
|
|
714
|
-
messages=messages,
|
|
715
|
-
tools=tools,
|
|
716
|
-
format=format,
|
|
717
|
-
params=params,
|
|
718
|
-
)
|
|
719
|
-
|
|
720
|
-
openai_stream = await self.async_client.chat.completions.create(
|
|
721
|
-
**kwargs,
|
|
722
|
-
stream=True,
|
|
723
|
-
)
|
|
724
|
-
|
|
725
|
-
chunk_iterator = _utils.decode_async_stream(openai_stream)
|
|
726
|
-
|
|
727
|
-
return AsyncStreamResponse(
|
|
728
|
-
provider="openai:completions",
|
|
729
|
-
model_id=model_id,
|
|
730
|
-
params=params,
|
|
731
|
-
tools=tools,
|
|
732
|
-
input_messages=input_messages,
|
|
733
|
-
chunk_iterator=chunk_iterator,
|
|
734
|
-
format=format,
|
|
735
|
-
)
|
|
736
|
-
|
|
737
|
-
@overload
|
|
738
|
-
async def context_stream_async(
|
|
739
|
-
self,
|
|
740
|
-
*,
|
|
741
|
-
ctx: Context[DepsT],
|
|
742
|
-
model_id: OpenAICompletionsModelId,
|
|
743
|
-
messages: Sequence[Message],
|
|
744
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
745
|
-
| AsyncContextToolkit[DepsT]
|
|
746
|
-
| None = None,
|
|
747
|
-
format: None = None,
|
|
748
|
-
**params: Unpack[Params],
|
|
749
|
-
) -> AsyncContextStreamResponse[DepsT]:
|
|
750
|
-
"""Stream an `llm.AsyncContextStreamResponse` without a response format."""
|
|
751
|
-
...
|
|
752
|
-
|
|
753
|
-
@overload
|
|
754
|
-
async def context_stream_async(
|
|
755
|
-
self,
|
|
756
|
-
*,
|
|
757
|
-
ctx: Context[DepsT],
|
|
758
|
-
model_id: OpenAICompletionsModelId,
|
|
759
|
-
messages: Sequence[Message],
|
|
760
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
761
|
-
| AsyncContextToolkit[DepsT]
|
|
762
|
-
| None = None,
|
|
763
|
-
format: type[FormattableT] | Format[FormattableT],
|
|
764
|
-
**params: Unpack[Params],
|
|
765
|
-
) -> AsyncContextStreamResponse[DepsT, FormattableT]:
|
|
766
|
-
"""Stream an `llm.AsyncContextStreamResponse` with a response format."""
|
|
767
|
-
...
|
|
768
|
-
|
|
769
|
-
@overload
|
|
770
|
-
async def context_stream_async(
|
|
771
|
-
self,
|
|
772
|
-
*,
|
|
773
|
-
ctx: Context[DepsT],
|
|
774
|
-
model_id: OpenAICompletionsModelId,
|
|
775
|
-
messages: Sequence[Message],
|
|
776
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
777
|
-
| AsyncContextToolkit[DepsT]
|
|
778
|
-
| None = None,
|
|
779
|
-
format: type[FormattableT] | Format[FormattableT] | None,
|
|
780
|
-
**params: Unpack[Params],
|
|
781
|
-
) -> AsyncContextStreamResponse | AsyncContextStreamResponse[DepsT, FormattableT]:
|
|
782
|
-
"""Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
|
|
783
|
-
...
|
|
784
|
-
|
|
785
|
-
async def context_stream_async(
|
|
786
|
-
self,
|
|
787
|
-
*,
|
|
788
|
-
ctx: Context[DepsT],
|
|
789
|
-
model_id: OpenAICompletionsModelId,
|
|
790
|
-
messages: Sequence[Message],
|
|
791
|
-
tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
|
|
792
|
-
| AsyncContextToolkit[DepsT]
|
|
793
|
-
| None = None,
|
|
794
|
-
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
795
|
-
**params: Unpack[Params],
|
|
796
|
-
) -> AsyncContextStreamResponse | AsyncContextStreamResponse[DepsT, FormattableT]:
|
|
797
|
-
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the OpenAI ChatCompletions API.
|
|
798
|
-
|
|
799
|
-
Args:
|
|
800
|
-
ctx: Context object with dependencies for tools.
|
|
801
|
-
model_id: Model identifier to use.
|
|
802
|
-
messages: Messages to send to the LLM.
|
|
803
|
-
tools: Optional tools that the model may invoke.
|
|
804
|
-
format: Optional response format specifier.
|
|
805
|
-
**params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
|
|
806
|
-
|
|
807
|
-
Returns:
|
|
808
|
-
An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
809
|
-
"""
|
|
810
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
811
|
-
model_id=model_id,
|
|
812
|
-
messages=messages,
|
|
813
|
-
tools=tools,
|
|
814
|
-
format=format,
|
|
815
|
-
params=params,
|
|
816
|
-
)
|
|
817
|
-
|
|
818
|
-
openai_stream = await self.async_client.chat.completions.create(
|
|
819
|
-
**kwargs,
|
|
820
|
-
stream=True,
|
|
821
|
-
)
|
|
822
|
-
|
|
823
|
-
chunk_iterator = _utils.decode_async_stream(openai_stream)
|
|
824
|
-
|
|
825
|
-
return AsyncContextStreamResponse(
|
|
826
|
-
provider="openai:completions",
|
|
827
|
-
model_id=model_id,
|
|
828
|
-
params=params,
|
|
829
|
-
tools=tools,
|
|
830
|
-
input_messages=input_messages,
|
|
831
|
-
chunk_iterator=chunk_iterator,
|
|
832
|
-
format=format,
|
|
833
|
-
)
|