mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +141 -0
  5. mirascope/api/_generated/client.py +163 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +17 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/errors/conflict_error.py +15 -0
  27. mirascope/api/_generated/errors/forbidden_error.py +15 -0
  28. mirascope/api/_generated/errors/internal_server_error.py +15 -0
  29. mirascope/api/_generated/errors/not_found_error.py +15 -0
  30. mirascope/api/_generated/health/__init__.py +7 -0
  31. mirascope/api/_generated/health/client.py +96 -0
  32. mirascope/api/_generated/health/raw_client.py +129 -0
  33. mirascope/api/_generated/health/types/__init__.py +8 -0
  34. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  35. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  36. mirascope/api/_generated/organizations/__init__.py +25 -0
  37. mirascope/api/_generated/organizations/client.py +380 -0
  38. mirascope/api/_generated/organizations/raw_client.py +876 -0
  39. mirascope/api/_generated/organizations/types/__init__.py +23 -0
  40. mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
  41. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
  42. mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
  43. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
  44. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
  45. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
  46. mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
  47. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
  48. mirascope/api/_generated/projects/__init__.py +17 -0
  49. mirascope/api/_generated/projects/client.py +458 -0
  50. mirascope/api/_generated/projects/raw_client.py +1016 -0
  51. mirascope/api/_generated/projects/types/__init__.py +15 -0
  52. mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
  53. mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
  54. mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
  55. mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
  56. mirascope/api/_generated/reference.md +753 -0
  57. mirascope/api/_generated/traces/__init__.py +55 -0
  58. mirascope/api/_generated/traces/client.py +162 -0
  59. mirascope/api/_generated/traces/raw_client.py +168 -0
  60. mirascope/api/_generated/traces/types/__init__.py +95 -0
  61. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  62. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  63. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  64. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  65. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  66. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  67. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  68. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  69. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  70. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  71. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  72. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  73. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  74. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  75. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  76. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  77. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  78. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  79. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  80. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  81. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  82. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  83. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  84. mirascope/api/_generated/types/__init__.py +37 -0
  85. mirascope/api/_generated/types/already_exists_error.py +24 -0
  86. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  87. mirascope/api/_generated/types/database_error.py +24 -0
  88. mirascope/api/_generated/types/database_error_tag.py +5 -0
  89. mirascope/api/_generated/types/http_api_decode_error.py +29 -0
  90. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  91. mirascope/api/_generated/types/issue.py +40 -0
  92. mirascope/api/_generated/types/issue_tag.py +17 -0
  93. mirascope/api/_generated/types/not_found_error_body.py +24 -0
  94. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  95. mirascope/api/_generated/types/permission_denied_error.py +24 -0
  96. mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
  97. mirascope/api/_generated/types/property_key.py +7 -0
  98. mirascope/api/_generated/types/property_key_key.py +27 -0
  99. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  100. mirascope/api/client.py +255 -0
  101. mirascope/api/settings.py +81 -0
  102. mirascope/llm/__init__.py +45 -11
  103. mirascope/llm/calls/calls.py +81 -57
  104. mirascope/llm/calls/decorator.py +121 -115
  105. mirascope/llm/content/__init__.py +3 -2
  106. mirascope/llm/context/_utils.py +19 -6
  107. mirascope/llm/exceptions.py +30 -16
  108. mirascope/llm/formatting/_utils.py +9 -5
  109. mirascope/llm/formatting/format.py +2 -2
  110. mirascope/llm/formatting/from_call_args.py +2 -2
  111. mirascope/llm/messages/message.py +13 -5
  112. mirascope/llm/models/__init__.py +2 -2
  113. mirascope/llm/models/models.py +189 -81
  114. mirascope/llm/prompts/__init__.py +13 -12
  115. mirascope/llm/prompts/_utils.py +27 -24
  116. mirascope/llm/prompts/decorator.py +133 -204
  117. mirascope/llm/prompts/prompts.py +424 -0
  118. mirascope/llm/prompts/protocols.py +25 -59
  119. mirascope/llm/providers/__init__.py +44 -0
  120. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  121. mirascope/llm/providers/anthropic/__init__.py +29 -0
  122. mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
  123. mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
  124. mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
  125. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
  126. mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
  127. mirascope/llm/providers/anthropic/beta_provider.py +322 -0
  128. mirascope/llm/providers/anthropic/model_id.py +23 -0
  129. mirascope/llm/providers/anthropic/model_info.py +87 -0
  130. mirascope/llm/providers/anthropic/provider.py +416 -0
  131. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  132. mirascope/llm/{clients → providers}/base/_utils.py +25 -8
  133. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  134. mirascope/llm/providers/google/__init__.py +21 -0
  135. mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
  136. mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
  137. mirascope/llm/providers/google/model_id.py +22 -0
  138. mirascope/llm/providers/google/model_info.py +62 -0
  139. mirascope/llm/providers/google/provider.py +442 -0
  140. mirascope/llm/providers/load_provider.py +54 -0
  141. mirascope/llm/providers/mlx/__init__.py +24 -0
  142. mirascope/llm/providers/mlx/_utils.py +129 -0
  143. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  144. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  145. mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
  146. mirascope/llm/providers/mlx/mlx.py +237 -0
  147. mirascope/llm/providers/mlx/model_id.py +17 -0
  148. mirascope/llm/providers/mlx/provider.py +415 -0
  149. mirascope/llm/providers/model_id.py +16 -0
  150. mirascope/llm/providers/ollama/__init__.py +19 -0
  151. mirascope/llm/providers/ollama/provider.py +71 -0
  152. mirascope/llm/providers/openai/__init__.py +6 -0
  153. mirascope/llm/providers/openai/completions/__init__.py +25 -0
  154. mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
  155. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
  156. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
  157. mirascope/llm/providers/openai/completions/base_provider.py +513 -0
  158. mirascope/llm/providers/openai/completions/provider.py +22 -0
  159. mirascope/llm/providers/openai/model_id.py +31 -0
  160. mirascope/llm/providers/openai/model_info.py +303 -0
  161. mirascope/llm/providers/openai/provider.py +398 -0
  162. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  163. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
  164. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
  165. mirascope/llm/providers/openai/responses/provider.py +469 -0
  166. mirascope/llm/providers/provider_id.py +23 -0
  167. mirascope/llm/providers/provider_registry.py +169 -0
  168. mirascope/llm/providers/together/__init__.py +19 -0
  169. mirascope/llm/providers/together/provider.py +40 -0
  170. mirascope/llm/responses/__init__.py +3 -0
  171. mirascope/llm/responses/base_response.py +14 -5
  172. mirascope/llm/responses/base_stream_response.py +35 -6
  173. mirascope/llm/responses/finish_reason.py +1 -0
  174. mirascope/llm/responses/response.py +33 -13
  175. mirascope/llm/responses/root_response.py +12 -13
  176. mirascope/llm/responses/stream_response.py +35 -23
  177. mirascope/llm/responses/usage.py +95 -0
  178. mirascope/llm/tools/__init__.py +9 -2
  179. mirascope/llm/tools/_utils.py +12 -3
  180. mirascope/llm/tools/protocols.py +4 -4
  181. mirascope/llm/tools/tool_schema.py +44 -9
  182. mirascope/llm/tools/tools.py +10 -9
  183. mirascope/ops/__init__.py +156 -0
  184. mirascope/ops/_internal/__init__.py +5 -0
  185. mirascope/ops/_internal/closure.py +1118 -0
  186. mirascope/ops/_internal/configuration.py +126 -0
  187. mirascope/ops/_internal/context.py +76 -0
  188. mirascope/ops/_internal/exporters/__init__.py +26 -0
  189. mirascope/ops/_internal/exporters/exporters.py +342 -0
  190. mirascope/ops/_internal/exporters/processors.py +104 -0
  191. mirascope/ops/_internal/exporters/types.py +165 -0
  192. mirascope/ops/_internal/exporters/utils.py +29 -0
  193. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  194. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  195. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  196. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  197. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  198. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  199. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  200. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  201. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  202. mirascope/ops/_internal/propagation.py +198 -0
  203. mirascope/ops/_internal/protocols.py +51 -0
  204. mirascope/ops/_internal/session.py +139 -0
  205. mirascope/ops/_internal/spans.py +232 -0
  206. mirascope/ops/_internal/traced_calls.py +371 -0
  207. mirascope/ops/_internal/traced_functions.py +394 -0
  208. mirascope/ops/_internal/tracing.py +276 -0
  209. mirascope/ops/_internal/types.py +13 -0
  210. mirascope/ops/_internal/utils.py +75 -0
  211. mirascope/ops/_internal/versioned_calls.py +512 -0
  212. mirascope/ops/_internal/versioned_functions.py +346 -0
  213. mirascope/ops/_internal/versioning.py +303 -0
  214. mirascope/ops/exceptions.py +21 -0
  215. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
  216. mirascope-2.0.0a4.dist-info/RECORD +247 -0
  217. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
  218. mirascope/graphs/__init__.py +0 -22
  219. mirascope/graphs/finite_state_machine.py +0 -625
  220. mirascope/llm/agents/__init__.py +0 -15
  221. mirascope/llm/agents/agent.py +0 -97
  222. mirascope/llm/agents/agent_template.py +0 -45
  223. mirascope/llm/agents/decorator.py +0 -176
  224. mirascope/llm/calls/base_call.py +0 -33
  225. mirascope/llm/clients/__init__.py +0 -34
  226. mirascope/llm/clients/anthropic/__init__.py +0 -25
  227. mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
  228. mirascope/llm/clients/anthropic/clients.py +0 -819
  229. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  230. mirascope/llm/clients/google/__init__.py +0 -20
  231. mirascope/llm/clients/google/clients.py +0 -853
  232. mirascope/llm/clients/google/model_ids.py +0 -15
  233. mirascope/llm/clients/openai/__init__.py +0 -25
  234. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  235. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  236. mirascope/llm/clients/openai/completions/clients.py +0 -833
  237. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  238. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  239. mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
  240. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  241. mirascope/llm/clients/openai/responses/clients.py +0 -832
  242. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  243. mirascope/llm/clients/openai/shared/__init__.py +0 -7
  244. mirascope/llm/clients/openai/shared/_utils.py +0 -55
  245. mirascope/llm/clients/providers.py +0 -175
  246. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  247. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  248. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  249. /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
  250. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  251. /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
  252. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
@@ -4,10 +4,8 @@ from __future__ import annotations
4
4
 
5
5
  from abc import ABC, abstractmethod
6
6
  from collections.abc import Sequence
7
- from contextvars import ContextVar, Token
8
- from types import TracebackType
9
- from typing import Generic, overload
10
- from typing_extensions import Self, TypeVar, Unpack
7
+ from typing import TYPE_CHECKING, Any, ClassVar, Generic, TypeAlias, overload
8
+ from typing_extensions import TypeVar, Unpack
11
9
 
12
10
  from ...context import Context, DepsT
13
11
  from ...formatting import Format, FormattableT
@@ -34,51 +32,40 @@ from ...tools import (
34
32
  )
35
33
  from .params import Params
36
34
 
37
- ModelIdT = TypeVar("ModelIdT", bound=str)
35
+ if TYPE_CHECKING:
36
+ from ..provider_id import ProviderId
37
+
38
38
  ProviderClientT = TypeVar("ProviderClientT")
39
39
 
40
- ClientT = TypeVar("ClientT", bound="BaseClient")
41
- """Type variable for an LLM client."""
40
+ Provider: TypeAlias = "BaseProvider[Any]"
41
+ """Type alias for `BaseProvider` with any client type."""
42
42
 
43
43
 
44
- class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
45
- """Base abstract client for provider-specific implementations.
44
+ class BaseProvider(Generic[ProviderClientT], ABC):
45
+ """Base abstract provider for LLM interactions.
46
46
 
47
47
  This class defines explicit methods for each type of call, eliminating
48
48
  the need for complex overloads in provider implementations.
49
49
  """
50
50
 
51
- client: ProviderClientT
52
- _token: Token | None = None
51
+ id: ClassVar[ProviderId]
52
+ """Provider identifier (e.g., "anthropic", "openai")."""
53
53
 
54
- @property
55
- @abstractmethod
56
- def _context_var(self) -> ContextVar:
57
- """The ContextVar for this client type."""
58
- ...
54
+ default_scope: ClassVar[str | list[str]]
55
+ """Default scope(s) for this provider when explicitly registered.
59
56
 
60
- def __enter__(self) -> Self:
61
- """Sets the client context and stores the token."""
62
- self._token = self._context_var.set(self)
63
- return self
57
+ Can be a single scope string or a list of scopes. For example:
58
+ - "anthropic/" - Single scope
59
+ - ["anthropic/", "openai/"] - Multiple scopes (e.g., for AWS Bedrock)
60
+ """
64
61
 
65
- def __exit__(
66
- self,
67
- exc_type: type[BaseException] | None,
68
- exc_val: BaseException | None,
69
- exc_tb: TracebackType | None,
70
- ) -> None:
71
- """Restores the client context to the token from the last setting."""
72
- if self._token is not None:
73
- self._context_var.reset(self._token)
74
- self._token = None
62
+ client: ProviderClientT
75
63
 
76
64
  @overload
77
- @abstractmethod
78
65
  def call(
79
66
  self,
80
67
  *,
81
- model_id: ModelIdT,
68
+ model_id: str,
82
69
  messages: Sequence[Message],
83
70
  tools: Sequence[Tool] | Toolkit | None = None,
84
71
  format: None = None,
@@ -88,11 +75,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
88
75
  ...
89
76
 
90
77
  @overload
91
- @abstractmethod
92
78
  def call(
93
79
  self,
94
80
  *,
95
- model_id: ModelIdT,
81
+ model_id: str,
96
82
  messages: Sequence[Message],
97
83
  tools: Sequence[Tool] | Toolkit | None = None,
98
84
  format: type[FormattableT] | Format[FormattableT],
@@ -102,11 +88,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
102
88
  ...
103
89
 
104
90
  @overload
105
- @abstractmethod
106
91
  def call(
107
92
  self,
108
93
  *,
109
- model_id: ModelIdT,
94
+ model_id: str,
110
95
  messages: Sequence[Message],
111
96
  tools: Sequence[Tool] | Toolkit | None = None,
112
97
  format: type[FormattableT] | Format[FormattableT] | None,
@@ -115,11 +100,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
115
100
  """Generate an `llm.Response` with an optional response format."""
116
101
  ...
117
102
 
118
- @abstractmethod
119
103
  def call(
120
104
  self,
121
105
  *,
122
- model_id: ModelIdT,
106
+ model_id: str,
123
107
  messages: Sequence[Message],
124
108
  tools: Sequence[Tool] | Toolkit | None = None,
125
109
  format: type[FormattableT] | Format[FormattableT] | None = None,
@@ -137,15 +121,33 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
137
121
  Returns:
138
122
  An `llm.Response` object containing the LLM-generated content.
139
123
  """
124
+ return self._call(
125
+ model_id=model_id,
126
+ messages=messages,
127
+ tools=tools,
128
+ format=format,
129
+ **params,
130
+ )
131
+
132
+ @abstractmethod
133
+ def _call(
134
+ self,
135
+ *,
136
+ model_id: str,
137
+ messages: Sequence[Message],
138
+ tools: Sequence[Tool] | Toolkit | None = None,
139
+ format: type[FormattableT] | Format[FormattableT] | None = None,
140
+ **params: Unpack[Params],
141
+ ) -> Response | Response[FormattableT]:
142
+ """Implementation for call(). Subclasses override this method."""
140
143
  ...
141
144
 
142
145
  @overload
143
- @abstractmethod
144
146
  def context_call(
145
147
  self,
146
148
  *,
147
149
  ctx: Context[DepsT],
148
- model_id: ModelIdT,
150
+ model_id: str,
149
151
  messages: Sequence[Message],
150
152
  tools: Sequence[Tool | ContextTool[DepsT]]
151
153
  | ContextToolkit[DepsT]
@@ -157,12 +159,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
157
159
  ...
158
160
 
159
161
  @overload
160
- @abstractmethod
161
162
  def context_call(
162
163
  self,
163
164
  *,
164
165
  ctx: Context[DepsT],
165
- model_id: ModelIdT,
166
+ model_id: str,
166
167
  messages: Sequence[Message],
167
168
  tools: Sequence[Tool | ContextTool[DepsT]]
168
169
  | ContextToolkit[DepsT]
@@ -174,12 +175,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
174
175
  ...
175
176
 
176
177
  @overload
177
- @abstractmethod
178
178
  def context_call(
179
179
  self,
180
180
  *,
181
181
  ctx: Context[DepsT],
182
- model_id: ModelIdT,
182
+ model_id: str,
183
183
  messages: Sequence[Message],
184
184
  tools: Sequence[Tool | ContextTool[DepsT]]
185
185
  | ContextToolkit[DepsT]
@@ -190,12 +190,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
190
190
  """Generate an `llm.ContextResponse` with an optional response format."""
191
191
  ...
192
192
 
193
- @abstractmethod
194
193
  def context_call(
195
194
  self,
196
195
  *,
197
196
  ctx: Context[DepsT],
198
- model_id: ModelIdT,
197
+ model_id: str,
199
198
  messages: Sequence[Message],
200
199
  tools: Sequence[Tool | ContextTool[DepsT]]
201
200
  | ContextToolkit[DepsT]
@@ -216,14 +215,36 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
216
215
  Returns:
217
216
  An `llm.ContextResponse` object containing the LLM-generated content.
218
217
  """
218
+ return self._context_call(
219
+ ctx=ctx,
220
+ model_id=model_id,
221
+ messages=messages,
222
+ tools=tools,
223
+ format=format,
224
+ **params,
225
+ )
226
+
227
+ @abstractmethod
228
+ def _context_call(
229
+ self,
230
+ *,
231
+ ctx: Context[DepsT],
232
+ model_id: str,
233
+ messages: Sequence[Message],
234
+ tools: Sequence[Tool | ContextTool[DepsT]]
235
+ | ContextToolkit[DepsT]
236
+ | None = None,
237
+ format: type[FormattableT] | Format[FormattableT] | None = None,
238
+ **params: Unpack[Params],
239
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
240
+ """Implementation for context_call(). Subclasses override this method."""
219
241
  ...
220
242
 
221
243
  @overload
222
- @abstractmethod
223
244
  async def call_async(
224
245
  self,
225
246
  *,
226
- model_id: ModelIdT,
247
+ model_id: str,
227
248
  messages: Sequence[Message],
228
249
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
229
250
  format: None = None,
@@ -233,11 +254,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
233
254
  ...
234
255
 
235
256
  @overload
236
- @abstractmethod
237
257
  async def call_async(
238
258
  self,
239
259
  *,
240
- model_id: ModelIdT,
260
+ model_id: str,
241
261
  messages: Sequence[Message],
242
262
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
243
263
  format: type[FormattableT] | Format[FormattableT],
@@ -247,11 +267,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
247
267
  ...
248
268
 
249
269
  @overload
250
- @abstractmethod
251
270
  async def call_async(
252
271
  self,
253
272
  *,
254
- model_id: ModelIdT,
273
+ model_id: str,
255
274
  messages: Sequence[Message],
256
275
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
257
276
  format: type[FormattableT] | Format[FormattableT] | None,
@@ -260,11 +279,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
260
279
  """Generate an `llm.AsyncResponse` with an optional response format."""
261
280
  ...
262
281
 
263
- @abstractmethod
264
282
  async def call_async(
265
283
  self,
266
284
  *,
267
- model_id: ModelIdT,
285
+ model_id: str,
268
286
  messages: Sequence[Message],
269
287
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
270
288
  format: type[FormattableT] | Format[FormattableT] | None = None,
@@ -282,15 +300,33 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
282
300
  Returns:
283
301
  An `llm.AsyncResponse` object containing the LLM-generated content.
284
302
  """
303
+ return await self._call_async(
304
+ model_id=model_id,
305
+ messages=messages,
306
+ tools=tools,
307
+ format=format,
308
+ **params,
309
+ )
310
+
311
+ @abstractmethod
312
+ async def _call_async(
313
+ self,
314
+ *,
315
+ model_id: str,
316
+ messages: Sequence[Message],
317
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
318
+ format: type[FormattableT] | Format[FormattableT] | None = None,
319
+ **params: Unpack[Params],
320
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
321
+ """Implementation for call_async(). Subclasses override this method."""
285
322
  ...
286
323
 
287
324
  @overload
288
- @abstractmethod
289
325
  async def context_call_async(
290
326
  self,
291
327
  *,
292
328
  ctx: Context[DepsT],
293
- model_id: ModelIdT,
329
+ model_id: str,
294
330
  messages: Sequence[Message],
295
331
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
296
332
  | AsyncContextToolkit[DepsT]
@@ -302,12 +338,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
302
338
  ...
303
339
 
304
340
  @overload
305
- @abstractmethod
306
341
  async def context_call_async(
307
342
  self,
308
343
  *,
309
344
  ctx: Context[DepsT],
310
- model_id: ModelIdT,
345
+ model_id: str,
311
346
  messages: Sequence[Message],
312
347
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
313
348
  | AsyncContextToolkit[DepsT]
@@ -319,12 +354,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
319
354
  ...
320
355
 
321
356
  @overload
322
- @abstractmethod
323
357
  async def context_call_async(
324
358
  self,
325
359
  *,
326
360
  ctx: Context[DepsT],
327
- model_id: ModelIdT,
361
+ model_id: str,
328
362
  messages: Sequence[Message],
329
363
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
330
364
  | AsyncContextToolkit[DepsT]
@@ -335,12 +369,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
335
369
  """Generate an `llm.AsyncContextResponse` with an optional response format."""
336
370
  ...
337
371
 
338
- @abstractmethod
339
372
  async def context_call_async(
340
373
  self,
341
374
  *,
342
375
  ctx: Context[DepsT],
343
- model_id: ModelIdT,
376
+ model_id: str,
344
377
  messages: Sequence[Message],
345
378
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
346
379
  | AsyncContextToolkit[DepsT]
@@ -361,14 +394,36 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
361
394
  Returns:
362
395
  An `llm.AsyncContextResponse` object containing the LLM-generated content.
363
396
  """
397
+ return await self._context_call_async(
398
+ ctx=ctx,
399
+ model_id=model_id,
400
+ messages=messages,
401
+ tools=tools,
402
+ format=format,
403
+ **params,
404
+ )
405
+
406
+ @abstractmethod
407
+ async def _context_call_async(
408
+ self,
409
+ *,
410
+ ctx: Context[DepsT],
411
+ model_id: str,
412
+ messages: Sequence[Message],
413
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
414
+ | AsyncContextToolkit[DepsT]
415
+ | None = None,
416
+ format: type[FormattableT] | Format[FormattableT] | None = None,
417
+ **params: Unpack[Params],
418
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
419
+ """Implementation for context_call_async(). Subclasses override this method."""
364
420
  ...
365
421
 
366
422
  @overload
367
- @abstractmethod
368
423
  def stream(
369
424
  self,
370
425
  *,
371
- model_id: ModelIdT,
426
+ model_id: str,
372
427
  messages: Sequence[Message],
373
428
  tools: Sequence[Tool] | Toolkit | None = None,
374
429
  format: None = None,
@@ -378,11 +433,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
378
433
  ...
379
434
 
380
435
  @overload
381
- @abstractmethod
382
436
  def stream(
383
437
  self,
384
438
  *,
385
- model_id: ModelIdT,
439
+ model_id: str,
386
440
  messages: Sequence[Message],
387
441
  tools: Sequence[Tool] | Toolkit | None = None,
388
442
  format: type[FormattableT] | Format[FormattableT],
@@ -392,11 +446,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
392
446
  ...
393
447
 
394
448
  @overload
395
- @abstractmethod
396
449
  def stream(
397
450
  self,
398
451
  *,
399
- model_id: ModelIdT,
452
+ model_id: str,
400
453
  messages: Sequence[Message],
401
454
  tools: Sequence[Tool] | Toolkit | None = None,
402
455
  format: type[FormattableT] | Format[FormattableT] | None,
@@ -405,11 +458,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
405
458
  """Stream an `llm.StreamResponse` with an optional response format."""
406
459
  ...
407
460
 
408
- @abstractmethod
409
461
  def stream(
410
462
  self,
411
463
  *,
412
- model_id: ModelIdT,
464
+ model_id: str,
413
465
  messages: Sequence[Message],
414
466
  tools: Sequence[Tool] | Toolkit | None = None,
415
467
  format: type[FormattableT] | Format[FormattableT] | None = None,
@@ -427,15 +479,33 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
427
479
  Returns:
428
480
  An `llm.StreamResponse` object for iterating over the LLM-generated content.
429
481
  """
482
+ return self._stream(
483
+ model_id=model_id,
484
+ messages=messages,
485
+ tools=tools,
486
+ format=format,
487
+ **params,
488
+ )
489
+
490
+ @abstractmethod
491
+ def _stream(
492
+ self,
493
+ *,
494
+ model_id: str,
495
+ messages: Sequence[Message],
496
+ tools: Sequence[Tool] | Toolkit | None = None,
497
+ format: type[FormattableT] | Format[FormattableT] | None = None,
498
+ **params: Unpack[Params],
499
+ ) -> StreamResponse | StreamResponse[FormattableT]:
500
+ """Implementation for stream(). Subclasses override this method."""
430
501
  ...
431
502
 
432
503
  @overload
433
- @abstractmethod
434
504
  def context_stream(
435
505
  self,
436
506
  *,
437
507
  ctx: Context[DepsT],
438
- model_id: ModelIdT,
508
+ model_id: str,
439
509
  messages: Sequence[Message],
440
510
  tools: Sequence[Tool | ContextTool[DepsT]]
441
511
  | ContextToolkit[DepsT]
@@ -447,12 +517,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
447
517
  ...
448
518
 
449
519
  @overload
450
- @abstractmethod
451
520
  def context_stream(
452
521
  self,
453
522
  *,
454
523
  ctx: Context[DepsT],
455
- model_id: ModelIdT,
524
+ model_id: str,
456
525
  messages: Sequence[Message],
457
526
  tools: Sequence[Tool | ContextTool[DepsT]]
458
527
  | ContextToolkit[DepsT]
@@ -464,12 +533,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
464
533
  ...
465
534
 
466
535
  @overload
467
- @abstractmethod
468
536
  def context_stream(
469
537
  self,
470
538
  *,
471
539
  ctx: Context[DepsT],
472
- model_id: ModelIdT,
540
+ model_id: str,
473
541
  messages: Sequence[Message],
474
542
  tools: Sequence[Tool | ContextTool[DepsT]]
475
543
  | ContextToolkit[DepsT]
@@ -482,12 +550,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
482
550
  """Stream an `llm.ContextStreamResponse` with an optional response format."""
483
551
  ...
484
552
 
485
- @abstractmethod
486
553
  def context_stream(
487
554
  self,
488
555
  *,
489
556
  ctx: Context[DepsT],
490
- model_id: ModelIdT,
557
+ model_id: str,
491
558
  messages: Sequence[Message],
492
559
  tools: Sequence[Tool | ContextTool[DepsT]]
493
560
  | ContextToolkit[DepsT]
@@ -510,14 +577,38 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
510
577
  Returns:
511
578
  An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
512
579
  """
580
+ return self._context_stream(
581
+ ctx=ctx,
582
+ model_id=model_id,
583
+ messages=messages,
584
+ tools=tools,
585
+ format=format,
586
+ **params,
587
+ )
588
+
589
+ @abstractmethod
590
+ def _context_stream(
591
+ self,
592
+ *,
593
+ ctx: Context[DepsT],
594
+ model_id: str,
595
+ messages: Sequence[Message],
596
+ tools: Sequence[Tool | ContextTool[DepsT]]
597
+ | ContextToolkit[DepsT]
598
+ | None = None,
599
+ format: type[FormattableT] | Format[FormattableT] | None = None,
600
+ **params: Unpack[Params],
601
+ ) -> (
602
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
603
+ ):
604
+ """Implementation for context_stream(). Subclasses override this method."""
513
605
  ...
514
606
 
515
607
  @overload
516
- @abstractmethod
517
608
  async def stream_async(
518
609
  self,
519
610
  *,
520
- model_id: ModelIdT,
611
+ model_id: str,
521
612
  messages: Sequence[Message],
522
613
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
523
614
  format: None = None,
@@ -527,11 +618,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
527
618
  ...
528
619
 
529
620
  @overload
530
- @abstractmethod
531
621
  async def stream_async(
532
622
  self,
533
623
  *,
534
- model_id: ModelIdT,
624
+ model_id: str,
535
625
  messages: Sequence[Message],
536
626
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
537
627
  format: type[FormattableT] | Format[FormattableT],
@@ -541,11 +631,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
541
631
  ...
542
632
 
543
633
  @overload
544
- @abstractmethod
545
634
  async def stream_async(
546
635
  self,
547
636
  *,
548
- model_id: ModelIdT,
637
+ model_id: str,
549
638
  messages: Sequence[Message],
550
639
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
551
640
  format: type[FormattableT] | Format[FormattableT] | None,
@@ -554,11 +643,10 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
554
643
  """Stream an `llm.AsyncStreamResponse` with an optional response format."""
555
644
  ...
556
645
 
557
- @abstractmethod
558
646
  async def stream_async(
559
647
  self,
560
648
  *,
561
- model_id: ModelIdT,
649
+ model_id: str,
562
650
  messages: Sequence[Message],
563
651
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
564
652
  format: type[FormattableT] | Format[FormattableT] | None = None,
@@ -576,15 +664,33 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
576
664
  Returns:
577
665
  An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
578
666
  """
667
+ return await self._stream_async(
668
+ model_id=model_id,
669
+ messages=messages,
670
+ tools=tools,
671
+ format=format,
672
+ **params,
673
+ )
674
+
675
+ @abstractmethod
676
+ async def _stream_async(
677
+ self,
678
+ *,
679
+ model_id: str,
680
+ messages: Sequence[Message],
681
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
682
+ format: type[FormattableT] | Format[FormattableT] | None = None,
683
+ **params: Unpack[Params],
684
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
685
+ """Implementation for stream_async(). Subclasses override this method."""
579
686
  ...
580
687
 
581
688
  @overload
582
- @abstractmethod
583
689
  async def context_stream_async(
584
690
  self,
585
691
  *,
586
692
  ctx: Context[DepsT],
587
- model_id: ModelIdT,
693
+ model_id: str,
588
694
  messages: Sequence[Message],
589
695
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
590
696
  | AsyncContextToolkit[DepsT]
@@ -596,12 +702,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
596
702
  ...
597
703
 
598
704
  @overload
599
- @abstractmethod
600
705
  async def context_stream_async(
601
706
  self,
602
707
  *,
603
708
  ctx: Context[DepsT],
604
- model_id: ModelIdT,
709
+ model_id: str,
605
710
  messages: Sequence[Message],
606
711
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
607
712
  | AsyncContextToolkit[DepsT]
@@ -613,12 +718,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
613
718
  ...
614
719
 
615
720
  @overload
616
- @abstractmethod
617
721
  async def context_stream_async(
618
722
  self,
619
723
  *,
620
724
  ctx: Context[DepsT],
621
- model_id: ModelIdT,
725
+ model_id: str,
622
726
  messages: Sequence[Message],
623
727
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
624
728
  | AsyncContextToolkit[DepsT]
@@ -632,12 +736,11 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
632
736
  """Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
633
737
  ...
634
738
 
635
- @abstractmethod
636
739
  async def context_stream_async(
637
740
  self,
638
741
  *,
639
742
  ctx: Context[DepsT],
640
- model_id: ModelIdT,
743
+ model_id: str,
641
744
  messages: Sequence[Message],
642
745
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
643
746
  | AsyncContextToolkit[DepsT]
@@ -661,13 +764,39 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
661
764
  Returns:
662
765
  An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
663
766
  """
767
+ return await self._context_stream_async(
768
+ ctx=ctx,
769
+ model_id=model_id,
770
+ messages=messages,
771
+ tools=tools,
772
+ format=format,
773
+ **params,
774
+ )
775
+
776
+ @abstractmethod
777
+ async def _context_stream_async(
778
+ self,
779
+ *,
780
+ ctx: Context[DepsT],
781
+ model_id: str,
782
+ messages: Sequence[Message],
783
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
784
+ | AsyncContextToolkit[DepsT]
785
+ | None = None,
786
+ format: type[FormattableT] | Format[FormattableT] | None = None,
787
+ **params: Unpack[Params],
788
+ ) -> (
789
+ AsyncContextStreamResponse[DepsT, None]
790
+ | AsyncContextStreamResponse[DepsT, FormattableT]
791
+ ):
792
+ """Implementation for context_stream_async(). Subclasses override this method."""
664
793
  ...
665
794
 
666
795
  @overload
667
796
  def resume(
668
797
  self,
669
798
  *,
670
- model_id: ModelIdT,
799
+ model_id: str,
671
800
  response: Response,
672
801
  content: UserContent,
673
802
  **params: Unpack[Params],
@@ -679,7 +808,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
679
808
  def resume(
680
809
  self,
681
810
  *,
682
- model_id: ModelIdT,
811
+ model_id: str,
683
812
  response: Response[FormattableT],
684
813
  content: UserContent,
685
814
  **params: Unpack[Params],
@@ -691,7 +820,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
691
820
  def resume(
692
821
  self,
693
822
  *,
694
- model_id: ModelIdT,
823
+ model_id: str,
695
824
  response: Response | Response[FormattableT],
696
825
  content: UserContent,
697
826
  **params: Unpack[Params],
@@ -702,7 +831,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
702
831
  def resume(
703
832
  self,
704
833
  *,
705
- model_id: ModelIdT,
834
+ model_id: str,
706
835
  response: Response | Response[FormattableT],
707
836
  content: UserContent,
708
837
  **params: Unpack[Params],
@@ -736,7 +865,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
736
865
  async def resume_async(
737
866
  self,
738
867
  *,
739
- model_id: ModelIdT,
868
+ model_id: str,
740
869
  response: AsyncResponse,
741
870
  content: UserContent,
742
871
  **params: Unpack[Params],
@@ -748,7 +877,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
748
877
  async def resume_async(
749
878
  self,
750
879
  *,
751
- model_id: ModelIdT,
880
+ model_id: str,
752
881
  response: AsyncResponse[FormattableT],
753
882
  content: UserContent,
754
883
  **params: Unpack[Params],
@@ -760,7 +889,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
760
889
  async def resume_async(
761
890
  self,
762
891
  *,
763
- model_id: ModelIdT,
892
+ model_id: str,
764
893
  response: AsyncResponse | AsyncResponse[FormattableT],
765
894
  content: UserContent,
766
895
  **params: Unpack[Params],
@@ -771,7 +900,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
771
900
  async def resume_async(
772
901
  self,
773
902
  *,
774
- model_id: ModelIdT,
903
+ model_id: str,
775
904
  response: AsyncResponse | AsyncResponse[FormattableT],
776
905
  content: UserContent,
777
906
  **params: Unpack[Params],
@@ -806,7 +935,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
806
935
  self,
807
936
  *,
808
937
  ctx: Context[DepsT],
809
- model_id: ModelIdT,
938
+ model_id: str,
810
939
  response: ContextResponse[DepsT, None],
811
940
  content: UserContent,
812
941
  **params: Unpack[Params],
@@ -819,7 +948,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
819
948
  self,
820
949
  *,
821
950
  ctx: Context[DepsT],
822
- model_id: ModelIdT,
951
+ model_id: str,
823
952
  response: ContextResponse[DepsT, FormattableT],
824
953
  content: UserContent,
825
954
  **params: Unpack[Params],
@@ -832,7 +961,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
832
961
  self,
833
962
  *,
834
963
  ctx: Context[DepsT],
835
- model_id: ModelIdT,
964
+ model_id: str,
836
965
  response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
837
966
  content: UserContent,
838
967
  **params: Unpack[Params],
@@ -844,7 +973,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
844
973
  self,
845
974
  *,
846
975
  ctx: Context[DepsT],
847
- model_id: ModelIdT,
976
+ model_id: str,
848
977
  response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
849
978
  content: UserContent,
850
979
  **params: Unpack[Params],
@@ -881,7 +1010,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
881
1010
  self,
882
1011
  *,
883
1012
  ctx: Context[DepsT],
884
- model_id: ModelIdT,
1013
+ model_id: str,
885
1014
  response: AsyncContextResponse[DepsT, None],
886
1015
  content: UserContent,
887
1016
  **params: Unpack[Params],
@@ -894,7 +1023,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
894
1023
  self,
895
1024
  *,
896
1025
  ctx: Context[DepsT],
897
- model_id: ModelIdT,
1026
+ model_id: str,
898
1027
  response: AsyncContextResponse[DepsT, FormattableT],
899
1028
  content: UserContent,
900
1029
  **params: Unpack[Params],
@@ -907,7 +1036,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
907
1036
  self,
908
1037
  *,
909
1038
  ctx: Context[DepsT],
910
- model_id: ModelIdT,
1039
+ model_id: str,
911
1040
  response: AsyncContextResponse[DepsT, None]
912
1041
  | AsyncContextResponse[DepsT, FormattableT],
913
1042
  content: UserContent,
@@ -920,7 +1049,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
920
1049
  self,
921
1050
  *,
922
1051
  ctx: Context[DepsT],
923
- model_id: ModelIdT,
1052
+ model_id: str,
924
1053
  response: AsyncContextResponse[DepsT, None]
925
1054
  | AsyncContextResponse[DepsT, FormattableT],
926
1055
  content: UserContent,
@@ -957,7 +1086,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
957
1086
  def resume_stream(
958
1087
  self,
959
1088
  *,
960
- model_id: ModelIdT,
1089
+ model_id: str,
961
1090
  response: StreamResponse,
962
1091
  content: UserContent,
963
1092
  **params: Unpack[Params],
@@ -969,7 +1098,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
969
1098
  def resume_stream(
970
1099
  self,
971
1100
  *,
972
- model_id: ModelIdT,
1101
+ model_id: str,
973
1102
  response: StreamResponse[FormattableT],
974
1103
  content: UserContent,
975
1104
  **params: Unpack[Params],
@@ -981,7 +1110,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
981
1110
  def resume_stream(
982
1111
  self,
983
1112
  *,
984
- model_id: ModelIdT,
1113
+ model_id: str,
985
1114
  response: StreamResponse | StreamResponse[FormattableT],
986
1115
  content: UserContent,
987
1116
  **params: Unpack[Params],
@@ -992,7 +1121,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
992
1121
  def resume_stream(
993
1122
  self,
994
1123
  *,
995
- model_id: ModelIdT,
1124
+ model_id: str,
996
1125
  response: StreamResponse | StreamResponse[FormattableT],
997
1126
  content: UserContent,
998
1127
  **params: Unpack[Params],
@@ -1026,7 +1155,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1026
1155
  async def resume_stream_async(
1027
1156
  self,
1028
1157
  *,
1029
- model_id: ModelIdT,
1158
+ model_id: str,
1030
1159
  response: AsyncStreamResponse,
1031
1160
  content: UserContent,
1032
1161
  **params: Unpack[Params],
@@ -1038,7 +1167,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1038
1167
  async def resume_stream_async(
1039
1168
  self,
1040
1169
  *,
1041
- model_id: ModelIdT,
1170
+ model_id: str,
1042
1171
  response: AsyncStreamResponse[FormattableT],
1043
1172
  content: UserContent,
1044
1173
  **params: Unpack[Params],
@@ -1050,7 +1179,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1050
1179
  async def resume_stream_async(
1051
1180
  self,
1052
1181
  *,
1053
- model_id: ModelIdT,
1182
+ model_id: str,
1054
1183
  response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
1055
1184
  content: UserContent,
1056
1185
  **params: Unpack[Params],
@@ -1061,7 +1190,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1061
1190
  async def resume_stream_async(
1062
1191
  self,
1063
1192
  *,
1064
- model_id: ModelIdT,
1193
+ model_id: str,
1065
1194
  response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
1066
1195
  content: UserContent,
1067
1196
  **params: Unpack[Params],
@@ -1096,7 +1225,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1096
1225
  self,
1097
1226
  *,
1098
1227
  ctx: Context[DepsT],
1099
- model_id: ModelIdT,
1228
+ model_id: str,
1100
1229
  response: ContextStreamResponse[DepsT, None],
1101
1230
  content: UserContent,
1102
1231
  **params: Unpack[Params],
@@ -1109,7 +1238,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1109
1238
  self,
1110
1239
  *,
1111
1240
  ctx: Context[DepsT],
1112
- model_id: ModelIdT,
1241
+ model_id: str,
1113
1242
  response: ContextStreamResponse[DepsT, FormattableT],
1114
1243
  content: UserContent,
1115
1244
  **params: Unpack[Params],
@@ -1122,7 +1251,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1122
1251
  self,
1123
1252
  *,
1124
1253
  ctx: Context[DepsT],
1125
- model_id: ModelIdT,
1254
+ model_id: str,
1126
1255
  response: ContextStreamResponse[DepsT, None]
1127
1256
  | ContextStreamResponse[DepsT, FormattableT],
1128
1257
  content: UserContent,
@@ -1137,7 +1266,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1137
1266
  self,
1138
1267
  *,
1139
1268
  ctx: Context[DepsT],
1140
- model_id: ModelIdT,
1269
+ model_id: str,
1141
1270
  response: ContextStreamResponse[DepsT, None]
1142
1271
  | ContextStreamResponse[DepsT, FormattableT],
1143
1272
  content: UserContent,
@@ -1177,7 +1306,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1177
1306
  self,
1178
1307
  *,
1179
1308
  ctx: Context[DepsT],
1180
- model_id: ModelIdT,
1309
+ model_id: str,
1181
1310
  response: AsyncContextStreamResponse[DepsT, None],
1182
1311
  content: UserContent,
1183
1312
  **params: Unpack[Params],
@@ -1190,7 +1319,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1190
1319
  self,
1191
1320
  *,
1192
1321
  ctx: Context[DepsT],
1193
- model_id: ModelIdT,
1322
+ model_id: str,
1194
1323
  response: AsyncContextStreamResponse[DepsT, FormattableT],
1195
1324
  content: UserContent,
1196
1325
  **params: Unpack[Params],
@@ -1203,7 +1332,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1203
1332
  self,
1204
1333
  *,
1205
1334
  ctx: Context[DepsT],
1206
- model_id: ModelIdT,
1335
+ model_id: str,
1207
1336
  response: AsyncContextStreamResponse[DepsT, None]
1208
1337
  | AsyncContextStreamResponse[DepsT, FormattableT],
1209
1338
  content: UserContent,
@@ -1219,7 +1348,7 @@ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
1219
1348
  self,
1220
1349
  *,
1221
1350
  ctx: Context[DepsT],
1222
- model_id: ModelIdT,
1351
+ model_id: str,
1223
1352
  response: AsyncContextStreamResponse[DepsT, None]
1224
1353
  | AsyncContextStreamResponse[DepsT, FormattableT],
1225
1354
  content: UserContent,