mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +141 -0
  5. mirascope/api/_generated/client.py +163 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +17 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/errors/conflict_error.py +15 -0
  27. mirascope/api/_generated/errors/forbidden_error.py +15 -0
  28. mirascope/api/_generated/errors/internal_server_error.py +15 -0
  29. mirascope/api/_generated/errors/not_found_error.py +15 -0
  30. mirascope/api/_generated/health/__init__.py +7 -0
  31. mirascope/api/_generated/health/client.py +96 -0
  32. mirascope/api/_generated/health/raw_client.py +129 -0
  33. mirascope/api/_generated/health/types/__init__.py +8 -0
  34. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  35. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  36. mirascope/api/_generated/organizations/__init__.py +25 -0
  37. mirascope/api/_generated/organizations/client.py +380 -0
  38. mirascope/api/_generated/organizations/raw_client.py +876 -0
  39. mirascope/api/_generated/organizations/types/__init__.py +23 -0
  40. mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
  41. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
  42. mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
  43. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
  44. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
  45. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
  46. mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
  47. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
  48. mirascope/api/_generated/projects/__init__.py +17 -0
  49. mirascope/api/_generated/projects/client.py +458 -0
  50. mirascope/api/_generated/projects/raw_client.py +1016 -0
  51. mirascope/api/_generated/projects/types/__init__.py +15 -0
  52. mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
  53. mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
  54. mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
  55. mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
  56. mirascope/api/_generated/reference.md +753 -0
  57. mirascope/api/_generated/traces/__init__.py +55 -0
  58. mirascope/api/_generated/traces/client.py +162 -0
  59. mirascope/api/_generated/traces/raw_client.py +168 -0
  60. mirascope/api/_generated/traces/types/__init__.py +95 -0
  61. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  62. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  63. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  64. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  65. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  66. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  67. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  68. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  69. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  70. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  71. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  72. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  73. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  74. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  75. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  76. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  77. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  78. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  79. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  80. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  81. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  82. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  83. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  84. mirascope/api/_generated/types/__init__.py +37 -0
  85. mirascope/api/_generated/types/already_exists_error.py +24 -0
  86. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  87. mirascope/api/_generated/types/database_error.py +24 -0
  88. mirascope/api/_generated/types/database_error_tag.py +5 -0
  89. mirascope/api/_generated/types/http_api_decode_error.py +29 -0
  90. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  91. mirascope/api/_generated/types/issue.py +40 -0
  92. mirascope/api/_generated/types/issue_tag.py +17 -0
  93. mirascope/api/_generated/types/not_found_error_body.py +24 -0
  94. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  95. mirascope/api/_generated/types/permission_denied_error.py +24 -0
  96. mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
  97. mirascope/api/_generated/types/property_key.py +7 -0
  98. mirascope/api/_generated/types/property_key_key.py +27 -0
  99. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  100. mirascope/api/client.py +255 -0
  101. mirascope/api/settings.py +81 -0
  102. mirascope/llm/__init__.py +45 -11
  103. mirascope/llm/calls/calls.py +81 -57
  104. mirascope/llm/calls/decorator.py +121 -115
  105. mirascope/llm/content/__init__.py +3 -2
  106. mirascope/llm/context/_utils.py +19 -6
  107. mirascope/llm/exceptions.py +30 -16
  108. mirascope/llm/formatting/_utils.py +9 -5
  109. mirascope/llm/formatting/format.py +2 -2
  110. mirascope/llm/formatting/from_call_args.py +2 -2
  111. mirascope/llm/messages/message.py +13 -5
  112. mirascope/llm/models/__init__.py +2 -2
  113. mirascope/llm/models/models.py +189 -81
  114. mirascope/llm/prompts/__init__.py +13 -12
  115. mirascope/llm/prompts/_utils.py +27 -24
  116. mirascope/llm/prompts/decorator.py +133 -204
  117. mirascope/llm/prompts/prompts.py +424 -0
  118. mirascope/llm/prompts/protocols.py +25 -59
  119. mirascope/llm/providers/__init__.py +44 -0
  120. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  121. mirascope/llm/providers/anthropic/__init__.py +29 -0
  122. mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
  123. mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
  124. mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
  125. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
  126. mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
  127. mirascope/llm/providers/anthropic/beta_provider.py +322 -0
  128. mirascope/llm/providers/anthropic/model_id.py +23 -0
  129. mirascope/llm/providers/anthropic/model_info.py +87 -0
  130. mirascope/llm/providers/anthropic/provider.py +416 -0
  131. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  132. mirascope/llm/{clients → providers}/base/_utils.py +25 -8
  133. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  134. mirascope/llm/providers/google/__init__.py +21 -0
  135. mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
  136. mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
  137. mirascope/llm/providers/google/model_id.py +22 -0
  138. mirascope/llm/providers/google/model_info.py +62 -0
  139. mirascope/llm/providers/google/provider.py +442 -0
  140. mirascope/llm/providers/load_provider.py +54 -0
  141. mirascope/llm/providers/mlx/__init__.py +24 -0
  142. mirascope/llm/providers/mlx/_utils.py +129 -0
  143. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  144. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  145. mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
  146. mirascope/llm/providers/mlx/mlx.py +237 -0
  147. mirascope/llm/providers/mlx/model_id.py +17 -0
  148. mirascope/llm/providers/mlx/provider.py +415 -0
  149. mirascope/llm/providers/model_id.py +16 -0
  150. mirascope/llm/providers/ollama/__init__.py +19 -0
  151. mirascope/llm/providers/ollama/provider.py +71 -0
  152. mirascope/llm/providers/openai/__init__.py +6 -0
  153. mirascope/llm/providers/openai/completions/__init__.py +25 -0
  154. mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
  155. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
  156. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
  157. mirascope/llm/providers/openai/completions/base_provider.py +513 -0
  158. mirascope/llm/providers/openai/completions/provider.py +22 -0
  159. mirascope/llm/providers/openai/model_id.py +31 -0
  160. mirascope/llm/providers/openai/model_info.py +303 -0
  161. mirascope/llm/providers/openai/provider.py +398 -0
  162. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  163. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
  164. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
  165. mirascope/llm/providers/openai/responses/provider.py +469 -0
  166. mirascope/llm/providers/provider_id.py +23 -0
  167. mirascope/llm/providers/provider_registry.py +169 -0
  168. mirascope/llm/providers/together/__init__.py +19 -0
  169. mirascope/llm/providers/together/provider.py +40 -0
  170. mirascope/llm/responses/__init__.py +3 -0
  171. mirascope/llm/responses/base_response.py +14 -5
  172. mirascope/llm/responses/base_stream_response.py +35 -6
  173. mirascope/llm/responses/finish_reason.py +1 -0
  174. mirascope/llm/responses/response.py +33 -13
  175. mirascope/llm/responses/root_response.py +12 -13
  176. mirascope/llm/responses/stream_response.py +35 -23
  177. mirascope/llm/responses/usage.py +95 -0
  178. mirascope/llm/tools/__init__.py +9 -2
  179. mirascope/llm/tools/_utils.py +12 -3
  180. mirascope/llm/tools/protocols.py +4 -4
  181. mirascope/llm/tools/tool_schema.py +44 -9
  182. mirascope/llm/tools/tools.py +10 -9
  183. mirascope/ops/__init__.py +156 -0
  184. mirascope/ops/_internal/__init__.py +5 -0
  185. mirascope/ops/_internal/closure.py +1118 -0
  186. mirascope/ops/_internal/configuration.py +126 -0
  187. mirascope/ops/_internal/context.py +76 -0
  188. mirascope/ops/_internal/exporters/__init__.py +26 -0
  189. mirascope/ops/_internal/exporters/exporters.py +342 -0
  190. mirascope/ops/_internal/exporters/processors.py +104 -0
  191. mirascope/ops/_internal/exporters/types.py +165 -0
  192. mirascope/ops/_internal/exporters/utils.py +29 -0
  193. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  194. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  195. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  196. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  197. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  198. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  199. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  200. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  201. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  202. mirascope/ops/_internal/propagation.py +198 -0
  203. mirascope/ops/_internal/protocols.py +51 -0
  204. mirascope/ops/_internal/session.py +139 -0
  205. mirascope/ops/_internal/spans.py +232 -0
  206. mirascope/ops/_internal/traced_calls.py +371 -0
  207. mirascope/ops/_internal/traced_functions.py +394 -0
  208. mirascope/ops/_internal/tracing.py +276 -0
  209. mirascope/ops/_internal/types.py +13 -0
  210. mirascope/ops/_internal/utils.py +75 -0
  211. mirascope/ops/_internal/versioned_calls.py +512 -0
  212. mirascope/ops/_internal/versioned_functions.py +346 -0
  213. mirascope/ops/_internal/versioning.py +303 -0
  214. mirascope/ops/exceptions.py +21 -0
  215. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
  216. mirascope-2.0.0a4.dist-info/RECORD +247 -0
  217. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
  218. mirascope/graphs/__init__.py +0 -22
  219. mirascope/graphs/finite_state_machine.py +0 -625
  220. mirascope/llm/agents/__init__.py +0 -15
  221. mirascope/llm/agents/agent.py +0 -97
  222. mirascope/llm/agents/agent_template.py +0 -45
  223. mirascope/llm/agents/decorator.py +0 -176
  224. mirascope/llm/calls/base_call.py +0 -33
  225. mirascope/llm/clients/__init__.py +0 -34
  226. mirascope/llm/clients/anthropic/__init__.py +0 -25
  227. mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
  228. mirascope/llm/clients/anthropic/clients.py +0 -819
  229. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  230. mirascope/llm/clients/google/__init__.py +0 -20
  231. mirascope/llm/clients/google/clients.py +0 -853
  232. mirascope/llm/clients/google/model_ids.py +0 -15
  233. mirascope/llm/clients/openai/__init__.py +0 -25
  234. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  235. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  236. mirascope/llm/clients/openai/completions/clients.py +0 -833
  237. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  238. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  239. mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
  240. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  241. mirascope/llm/clients/openai/responses/clients.py +0 -832
  242. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  243. mirascope/llm/clients/openai/shared/__init__.py +0 -7
  244. mirascope/llm/clients/openai/shared/_utils.py +0 -55
  245. mirascope/llm/clients/providers.py +0 -175
  246. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  247. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  248. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  249. /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
  250. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  251. /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
  252. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
@@ -2,16 +2,22 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from collections.abc import Iterator, Sequence
6
- from contextlib import contextmanager
7
- from contextvars import ContextVar
8
- from typing import TYPE_CHECKING, overload
5
+ from collections.abc import Sequence
6
+ from contextvars import ContextVar, Token
7
+ from types import TracebackType
8
+ from typing import overload
9
9
  from typing_extensions import Unpack
10
10
 
11
- from ..clients import PROVIDERS, get_client
12
11
  from ..context import Context, DepsT
13
12
  from ..formatting import Format, FormattableT
14
13
  from ..messages import Message, UserContent
14
+ from ..providers import (
15
+ ModelId,
16
+ Params,
17
+ Provider,
18
+ ProviderId,
19
+ get_provider_for_model,
20
+ )
15
21
  from ..responses import (
16
22
  AsyncContextResponse,
17
23
  AsyncContextStreamResponse,
@@ -33,18 +39,10 @@ from ..tools import (
33
39
  Toolkit,
34
40
  )
35
41
 
36
- if TYPE_CHECKING:
37
- from ..clients import (
38
- ModelId,
39
- Params,
40
- Provider,
41
- )
42
-
43
-
44
42
  MODEL_CONTEXT: ContextVar[Model | None] = ContextVar("MODEL_CONTEXT", default=None)
45
43
 
46
44
 
47
- def get_model_from_context() -> Model | None:
45
+ def model_from_context() -> Model | None:
48
46
  """Get the LLM currently set via context, if any."""
49
47
  return MODEL_CONTEXT.get()
50
48
 
@@ -68,7 +66,7 @@ class Model:
68
66
 
69
67
  def recommend_book(genre: str) -> llm.Response:
70
68
  # Uses context model if available, otherwise creates default
71
- model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
69
+ model = llm.use_model("openai/gpt-5-mini")
72
70
  message = llm.messages.user(f"Please recommend a book in {genre}.")
73
71
  return model.call(messages=[message])
74
72
 
@@ -76,7 +74,7 @@ class Model:
76
74
  response = recommend_book("fantasy")
77
75
 
78
76
  # Override with different model
79
- with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
77
+ with llm.model(provider="anthropic", model_id="anthropic/claude-sonnet-4-5"):
80
78
  response = recommend_book("fantasy") # Uses Claude
81
79
  ```
82
80
 
@@ -87,33 +85,73 @@ class Model:
87
85
 
88
86
  def recommend_book(genre: str) -> llm.Response:
89
87
  # Hardcoded model, cannot be overridden by context
90
- model = llm.Model(provider="openai", model_id="gpt-4o-mini")
88
+ model = llm.Model("openai/gpt-5-mini")
91
89
  message = llm.messages.user(f"Please recommend a book in {genre}.")
92
90
  return model.call(messages=[message])
93
91
  ```
94
92
  """
95
93
 
96
- provider: Provider
97
- """The provider being used (e.g. `openai`)."""
98
-
99
94
  model_id: ModelId
100
- """The model being used (e.g. `gpt-4o-mini`)."""
95
+ """The model being used (e.g. `"openai/gpt-4o-mini"`)."""
101
96
 
102
97
  params: Params
103
98
  """The default parameters for the model (temperature, max_tokens, etc.)."""
104
99
 
105
100
  def __init__(
106
101
  self,
107
- provider: Provider,
108
102
  model_id: ModelId,
109
103
  **params: Unpack[Params],
110
104
  ) -> None:
111
- """Initialize the Model with provider, model_id, and optional params."""
112
- if provider not in PROVIDERS:
113
- raise ValueError(f"Unknown provider: {provider}")
114
- self.provider = provider
105
+ """Initialize the Model with model_id and optional params."""
106
+ if "/" not in model_id:
107
+ raise ValueError(
108
+ "Invalid model_id format. Expected format: 'provider/model-name' "
109
+ f"(e.g., 'openai/gpt-4'). Got: '{model_id}'"
110
+ )
115
111
  self.model_id = model_id
116
112
  self.params = params
113
+ self._token_stack: list[Token[Model | None]] = []
114
+
115
+ @property
116
+ def provider(self) -> Provider:
117
+ """The provider being used (e.g. an `OpenAIProvider`).
118
+
119
+ This property dynamically looks up the provider from the registry based on
120
+ the current model_id. This allows provider overrides via `llm.register_provider()`
121
+ to take effect even after the model instance is created.
122
+
123
+ Raises:
124
+ NoRegisteredProviderError: If no provider is available for the model_id
125
+ """
126
+ return get_provider_for_model(self.model_id)
127
+
128
+ @property
129
+ def provider_id(self) -> ProviderId:
130
+ """The string id of the provider being used (e.g. `"openai"`).
131
+
132
+ This property returns the `id` field of the dynamically resolved provider.
133
+
134
+ Raises:
135
+ NoRegisteredProviderError: If no provider is available for the model_id
136
+ """
137
+ return self.provider.id
138
+
139
+ def __enter__(self) -> Model:
140
+ """Enter the context manager, setting this model in context."""
141
+ token = MODEL_CONTEXT.set(self)
142
+ self._token_stack.append(token)
143
+ return self
144
+
145
+ def __exit__(
146
+ self,
147
+ exc_type: type[BaseException] | None,
148
+ exc_val: BaseException | None,
149
+ exc_tb: TracebackType | None,
150
+ ) -> None:
151
+ """Exit the context manager, resetting the model context."""
152
+ if self._token_stack:
153
+ token = self._token_stack.pop()
154
+ MODEL_CONTEXT.reset(token)
117
155
 
118
156
  @overload
119
157
  def call(
@@ -165,7 +203,7 @@ class Model:
165
203
  Returns:
166
204
  An `llm.Response` object containing the LLM-generated content.
167
205
  """
168
- return get_client(self.provider).call(
206
+ return self.provider.call(
169
207
  model_id=self.model_id,
170
208
  messages=messages,
171
209
  tools=tools,
@@ -223,7 +261,7 @@ class Model:
223
261
  Returns:
224
262
  An `llm.AsyncResponse` object containing the LLM-generated content.
225
263
  """
226
- return await get_client(self.provider).call_async(
264
+ return await self.provider.call_async(
227
265
  model_id=self.model_id,
228
266
  messages=messages,
229
267
  tools=tools,
@@ -281,7 +319,7 @@ class Model:
281
319
  Returns:
282
320
  An `llm.StreamResponse` object for iterating over the LLM-generated content.
283
321
  """
284
- return get_client(self.provider).stream(
322
+ return self.provider.stream(
285
323
  model_id=self.model_id,
286
324
  messages=messages,
287
325
  tools=tools,
@@ -293,7 +331,7 @@ class Model:
293
331
  async def stream_async(
294
332
  self,
295
333
  *,
296
- messages: list[Message],
334
+ messages: Sequence[Message],
297
335
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
298
336
  format: None = None,
299
337
  ) -> AsyncStreamResponse:
@@ -304,7 +342,7 @@ class Model:
304
342
  async def stream_async(
305
343
  self,
306
344
  *,
307
- messages: list[Message],
345
+ messages: Sequence[Message],
308
346
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
309
347
  format: type[FormattableT] | Format[FormattableT],
310
348
  ) -> AsyncStreamResponse[FormattableT]:
@@ -315,7 +353,7 @@ class Model:
315
353
  async def stream_async(
316
354
  self,
317
355
  *,
318
- messages: list[Message],
356
+ messages: Sequence[Message],
319
357
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
320
358
  format: type[FormattableT] | Format[FormattableT] | None,
321
359
  ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
@@ -325,7 +363,7 @@ class Model:
325
363
  async def stream_async(
326
364
  self,
327
365
  *,
328
- messages: list[Message],
366
+ messages: Sequence[Message],
329
367
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
330
368
  format: type[FormattableT] | Format[FormattableT] | None = None,
331
369
  ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
@@ -339,7 +377,7 @@ class Model:
339
377
  Returns:
340
378
  An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
341
379
  """
342
- return await get_client(self.provider).stream_async(
380
+ return await self.provider.stream_async(
343
381
  model_id=self.model_id,
344
382
  messages=messages,
345
383
  tools=tools,
@@ -410,7 +448,7 @@ class Model:
410
448
  Returns:
411
449
  An `llm.ContextResponse` object containing the LLM-generated content.
412
450
  """
413
- return get_client(self.provider).context_call(
451
+ return self.provider.context_call(
414
452
  ctx=ctx,
415
453
  model_id=self.model_id,
416
454
  messages=messages,
@@ -482,7 +520,7 @@ class Model:
482
520
  Returns:
483
521
  An `llm.AsyncContextResponse` object containing the LLM-generated content.
484
522
  """
485
- return await get_client(self.provider).context_call_async(
523
+ return await self.provider.context_call_async(
486
524
  ctx=ctx,
487
525
  model_id=self.model_id,
488
526
  messages=messages,
@@ -558,7 +596,7 @@ class Model:
558
596
  Returns:
559
597
  An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
560
598
  """
561
- return get_client(self.provider).context_stream(
599
+ return self.provider.context_stream(
562
600
  ctx=ctx,
563
601
  model_id=self.model_id,
564
602
  messages=messages,
@@ -572,7 +610,7 @@ class Model:
572
610
  self,
573
611
  *,
574
612
  ctx: Context[DepsT],
575
- messages: list[Message],
613
+ messages: Sequence[Message],
576
614
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
577
615
  | AsyncContextToolkit[DepsT]
578
616
  | None = None,
@@ -586,7 +624,7 @@ class Model:
586
624
  self,
587
625
  *,
588
626
  ctx: Context[DepsT],
589
- messages: list[Message],
627
+ messages: Sequence[Message],
590
628
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
591
629
  | AsyncContextToolkit[DepsT]
592
630
  | None = None,
@@ -600,7 +638,7 @@ class Model:
600
638
  self,
601
639
  *,
602
640
  ctx: Context[DepsT],
603
- messages: list[Message],
641
+ messages: Sequence[Message],
604
642
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
605
643
  | AsyncContextToolkit[DepsT]
606
644
  | None = None,
@@ -616,7 +654,7 @@ class Model:
616
654
  self,
617
655
  *,
618
656
  ctx: Context[DepsT],
619
- messages: list[Message],
657
+ messages: Sequence[Message],
620
658
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
621
659
  | AsyncContextToolkit[DepsT]
622
660
  | None = None,
@@ -636,7 +674,7 @@ class Model:
636
674
  Returns:
637
675
  An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
638
676
  """
639
- return await get_client(self.provider).context_stream_async(
677
+ return await self.provider.context_stream_async(
640
678
  ctx=ctx,
641
679
  model_id=self.model_id,
642
680
  messages=messages,
@@ -696,7 +734,7 @@ class Model:
696
734
  Returns:
697
735
  A new `llm.Response` object containing the extended conversation.
698
736
  """
699
- return get_client(self.provider).resume(
737
+ return self.provider.resume(
700
738
  model_id=self.model_id,
701
739
  response=response,
702
740
  content=content,
@@ -754,7 +792,7 @@ class Model:
754
792
  Returns:
755
793
  A new `llm.AsyncResponse` object containing the extended conversation.
756
794
  """
757
- return await get_client(self.provider).resume_async(
795
+ return await self.provider.resume_async(
758
796
  model_id=self.model_id,
759
797
  response=response,
760
798
  content=content,
@@ -817,7 +855,7 @@ class Model:
817
855
  Returns:
818
856
  A new `llm.ContextResponse` object containing the extended conversation.
819
857
  """
820
- return get_client(self.provider).context_resume(
858
+ return self.provider.context_resume(
821
859
  ctx=ctx,
822
860
  model_id=self.model_id,
823
861
  response=response,
@@ -883,7 +921,7 @@ class Model:
883
921
  Returns:
884
922
  A new `llm.AsyncContextResponse` object containing the extended conversation.
885
923
  """
886
- return await get_client(self.provider).context_resume_async(
924
+ return await self.provider.context_resume_async(
887
925
  ctx=ctx,
888
926
  model_id=self.model_id,
889
927
  response=response,
@@ -942,7 +980,7 @@ class Model:
942
980
  Returns:
943
981
  A new `llm.StreamResponse` object for streaming the extended conversation.
944
982
  """
945
- return get_client(self.provider).resume_stream(
983
+ return self.provider.resume_stream(
946
984
  model_id=self.model_id,
947
985
  response=response,
948
986
  content=content,
@@ -1000,7 +1038,7 @@ class Model:
1000
1038
  Returns:
1001
1039
  A new `llm.AsyncStreamResponse` object for asynchronously streaming the extended conversation.
1002
1040
  """
1003
- return await get_client(self.provider).resume_stream_async(
1041
+ return await self.provider.resume_stream_async(
1004
1042
  model_id=self.model_id,
1005
1043
  response=response,
1006
1044
  content=content,
@@ -1069,7 +1107,7 @@ class Model:
1069
1107
  Returns:
1070
1108
  A new `llm.ContextStreamResponse` object for streaming the extended conversation.
1071
1109
  """
1072
- return get_client(self.provider).context_resume_stream(
1110
+ return self.provider.context_resume_stream(
1073
1111
  ctx=ctx,
1074
1112
  model_id=self.model_id,
1075
1113
  response=response,
@@ -1141,7 +1179,7 @@ class Model:
1141
1179
  Returns:
1142
1180
  A new `llm.AsyncContextStreamResponse` object for asynchronously streaming the extended conversation.
1143
1181
  """
1144
- return await get_client(self.provider).context_resume_stream_async(
1182
+ return await self.provider.context_resume_stream_async(
1145
1183
  ctx=ctx,
1146
1184
  model_id=self.model_id,
1147
1185
  response=response,
@@ -1150,71 +1188,139 @@ class Model:
1150
1188
  )
1151
1189
 
1152
1190
 
1153
- @contextmanager
1154
1191
  def model(
1155
- *,
1156
- provider: Provider,
1157
1192
  model_id: ModelId,
1158
1193
  **params: Unpack[Params],
1159
- ) -> Iterator[None]:
1160
- """Set a model in context for the duration of the context manager.
1194
+ ) -> Model:
1195
+ """Helper for creating a `Model` instance (which may be used as a context manager).
1196
+
1197
+ This is just an alias for the `Model` constructor, added for convenience.
1198
+
1199
+ This function returns a `Model` instance that implements the context manager protocol.
1200
+ When used with a `with` statement, the model will be set in context and used by both
1201
+ `llm.use_model()` and `llm.call()` within that context. This allows you to override
1202
+ the default model at runtime without modifying function definitions.
1203
+
1204
+ The returned `Model` instance can also be stored and reused:
1161
1205
 
1162
- This context manager sets a model that will be used by `llm.use_model()` calls
1163
- within the context. This allows you to override the default model at runtime.
1206
+ ```python
1207
+ m = llm.model("openai/gpt-4o")
1208
+ # Use directly
1209
+ response = m.call(messages=[...])
1210
+ # Or use as context manager
1211
+ with m:
1212
+ response = recommend_book("fantasy")
1213
+ ```
1214
+
1215
+ When a model is set in context, it completely overrides any model ID or parameters
1216
+ specified in `llm.use_model()` or `llm.call()`. The context model's parameters take
1217
+ precedence, and any unset parameters use default values.
1164
1218
 
1165
1219
  Args:
1166
- provider: The LLM provider to use (e.g., "openai:completions", "anthropic", "google").
1167
- model_id: The specific model identifier for the chosen provider.
1220
+ model_id: A model ID string (e.g., "openai/gpt-4").
1168
1221
  **params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
1169
1222
 
1223
+ Returns:
1224
+ A Model instance that can be used as a context manager.
1225
+
1170
1226
  Raises:
1171
1227
  ValueError: If the specified provider is not supported.
1172
1228
 
1173
1229
  Example:
1230
+ With `llm.use_model()`
1174
1231
 
1175
1232
  ```python
1176
1233
  import mirascope.llm as llm
1177
1234
 
1178
1235
  def recommend_book(genre: str) -> llm.Response:
1179
- model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
1236
+ model = llm.use_model("openai/gpt-5-mini")
1180
1237
  message = llm.messages.user(f"Please recommend a book in {genre}.")
1181
1238
  return model.call(messages=[message])
1182
1239
 
1183
1240
  # Override the default model at runtime
1184
- with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
1241
+ with llm.model("anthropic/claude-sonnet-4-5"):
1242
+ response = recommend_book("fantasy") # Uses Claude instead of GPT
1243
+ ```
1244
+
1245
+ Example:
1246
+ With `llm.call()`
1247
+
1248
+ ```python
1249
+ import mirascope.llm as llm
1250
+
1251
+ @llm.call("openai/gpt-5-mini")
1252
+ def recommend_book(genre: str):
1253
+ return f"Please recommend a {genre} book."
1254
+
1255
+ # Override the decorated model at runtime
1256
+ with llm.model("anthropic/claude-sonnet-4-0"):
1185
1257
  response = recommend_book("fantasy") # Uses Claude instead of GPT
1186
1258
  ```
1259
+
1260
+ Example:
1261
+ Storing and reusing Model instances
1262
+
1263
+ ```python
1264
+ import mirascope.llm as llm
1265
+
1266
+ # Create and store a model
1267
+ m = llm.model("openai/gpt-4o")
1268
+
1269
+ # Use it directly
1270
+ response = m.call(messages=[llm.messages.user("Hello!")])
1271
+
1272
+ # Or use it as a context manager
1273
+ with m:
1274
+ response = recommend_book("fantasy")
1275
+ ```
1187
1276
  """
1188
- token = MODEL_CONTEXT.set(Model(provider, model_id, **params))
1189
- try:
1190
- yield
1191
- finally:
1192
- MODEL_CONTEXT.reset(token)
1277
+ return Model(model_id, **params)
1193
1278
 
1194
1279
 
1280
+ @overload
1195
1281
  def use_model(
1196
- *,
1197
- provider: Provider,
1198
- model_id: ModelId,
1282
+ model: ModelId,
1283
+ **params: Unpack[Params],
1284
+ ) -> Model:
1285
+ """Get the model from context if available, otherwise create a new `Model`.
1286
+
1287
+ This overload accepts a model ID string and allows additional params.
1288
+ """
1289
+ ...
1290
+
1291
+
1292
+ @overload
1293
+ def use_model(
1294
+ model: Model,
1295
+ ) -> Model:
1296
+ """Get the model from context if available, otherwise use the provided `Model`.
1297
+
1298
+ This overload accepts a `Model` instance and does not allow additional params.
1299
+ """
1300
+ ...
1301
+
1302
+
1303
+ def use_model(
1304
+ model: Model | ModelId,
1199
1305
  **params: Unpack[Params],
1200
1306
  ) -> Model:
1201
- """Get the model from context if available, otherwise create a new Model.
1307
+ """Get the model from context if available, otherwise create a new `Model`.
1202
1308
 
1203
1309
  This function checks if a model has been set in the context (via `llm.model()`
1204
- context manager). If a model is found in the context, it returns that model.
1205
- Otherwise, it creates and returns a new `llm.Model` instance with the provided
1206
- arguments as defaults.
1310
+ context manager). If a model is found in the context, it returns that model,
1311
+ ignoring any model ID or parameters passed to this function. Otherwise, it creates
1312
+ and returns a new `llm.Model` instance with the provided arguments.
1207
1313
 
1208
1314
  This allows you to write functions that work with a default model but can be
1209
1315
  overridden at runtime using the `llm.model()` context manager.
1210
1316
 
1211
1317
  Args:
1212
- provider: The LLM provider to use (e.g., "openai:completions", "anthropic", "google").
1213
- model_id: The specific model identifier for the chosen provider.
1318
+ model: A model ID string (e.g., "openai/gpt-4") or a Model instance
1214
1319
  **params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
1320
+ Only available when passing a model ID string
1215
1321
 
1216
1322
  Returns:
1217
- An `llm.Model` instance from context or a new instance with the specified settings.
1323
+ An `llm.Model` instance from context (if set) or a new instance with the specified settings.
1218
1324
 
1219
1325
  Raises:
1220
1326
  ValueError: If the specified provider is not supported.
@@ -1225,19 +1331,21 @@ def use_model(
1225
1331
  import mirascope.llm as llm
1226
1332
 
1227
1333
  def recommend_book(genre: str) -> llm.Response:
1228
- model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
1334
+ model = llm.use_model("openai/gpt-5-mini")
1229
1335
  message = llm.messages.user(f"Please recommend a book in {genre}.")
1230
1336
  return model.call(messages=[message])
1231
1337
 
1232
- # Uses the default model (gpt-4o-mini)
1338
+ # Uses the default model (gpt-5-mini)
1233
1339
  response = recommend_book("fantasy")
1234
1340
 
1235
1341
  # Override with a different model
1236
- with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
1342
+ with llm.model(provider="anthropic", model_id="anthropic/claude-sonnet-4-5"):
1237
1343
  response = recommend_book("fantasy") # Uses Claude instead
1238
1344
  ```
1239
1345
  """
1240
- context_model = get_model_from_context()
1346
+ context_model = model_from_context()
1241
1347
  if context_model is not None:
1242
1348
  return context_model
1243
- return Model(provider, model_id, **params)
1349
+ if isinstance(model, str):
1350
+ return Model(model, **params)
1351
+ return model
@@ -5,29 +5,30 @@ python functions.
5
5
  """
6
6
 
7
7
  from . import _utils
8
- from .decorator import prompt
9
- from .protocols import (
8
+ from .decorator import PromptDecorator, prompt
9
+ from .prompts import (
10
10
  AsyncContextPrompt,
11
- AsyncContextPromptable,
12
11
  AsyncPrompt,
13
- AsyncPromptable,
14
12
  ContextPrompt,
15
- ContextPromptable,
16
13
  Prompt,
17
- Promptable,
18
- PromptT,
14
+ )
15
+ from .protocols import (
16
+ AsyncContextMessageTemplate,
17
+ AsyncMessageTemplate,
18
+ ContextMessageTemplate,
19
+ MessageTemplate,
19
20
  )
20
21
 
21
22
  __all__ = [
23
+ "AsyncContextMessageTemplate",
22
24
  "AsyncContextPrompt",
23
- "AsyncContextPromptable",
25
+ "AsyncMessageTemplate",
24
26
  "AsyncPrompt",
25
- "AsyncPromptable",
27
+ "ContextMessageTemplate",
26
28
  "ContextPrompt",
27
- "ContextPromptable",
29
+ "MessageTemplate",
28
30
  "Prompt",
29
- "PromptT",
30
- "Promptable",
31
+ "PromptDecorator",
31
32
  "_utils",
32
33
  "prompt",
33
34
  ]
@@ -1,4 +1,5 @@
1
1
  import inspect
2
+ from collections.abc import Sequence
2
3
  from typing_extensions import TypeIs
3
4
 
4
5
  from ..context import DepsT, _utils as _context_utils
@@ -12,24 +13,26 @@ from ..messages import (
12
13
  )
13
14
  from ..types import P
14
15
  from .protocols import (
15
- AsyncContextPromptable,
16
- AsyncPromptable,
17
- ContextPromptable,
18
- Promptable,
16
+ AsyncContextMessageTemplate,
17
+ AsyncMessageTemplate,
18
+ ContextMessageTemplate,
19
+ MessageTemplate,
19
20
  )
20
21
 
21
22
 
22
23
  def is_messages(
23
- messages_or_content: list[Message] | UserContent,
24
- ) -> TypeIs[list[Message]]:
25
- if not messages_or_content:
26
- raise ValueError("Prompt returned empty content")
27
- return isinstance(messages_or_content, list) and isinstance(
28
- messages_or_content[0], SystemMessage | UserMessage | AssistantMessage
29
- )
30
-
31
-
32
- def promote_to_messages(result: list[Message] | UserContent) -> list[Message]:
24
+ messages_or_content: Sequence[Message] | UserContent,
25
+ ) -> TypeIs[Sequence[Message]]:
26
+ if isinstance(messages_or_content, list):
27
+ if not messages_or_content:
28
+ raise ValueError("Empty array may not be used as message content")
29
+ return isinstance(
30
+ messages_or_content[0], SystemMessage | UserMessage | AssistantMessage
31
+ )
32
+ return False
33
+
34
+
35
+ def promote_to_messages(result: Sequence[Message] | UserContent) -> Sequence[Message]:
33
36
  """Promote a prompt result to a list of messages.
34
37
 
35
38
  If the result is already a list of Messages, returns it as-is.
@@ -41,20 +44,20 @@ def promote_to_messages(result: list[Message] | UserContent) -> list[Message]:
41
44
 
42
45
 
43
46
  def is_context_promptable(
44
- fn: ContextPromptable[P, DepsT]
45
- | AsyncContextPromptable[P, DepsT]
46
- | Promptable[P]
47
- | AsyncPromptable[P],
48
- ) -> TypeIs[ContextPromptable[P, DepsT] | AsyncContextPromptable[P, DepsT]]:
47
+ fn: ContextMessageTemplate[P, DepsT]
48
+ | AsyncContextMessageTemplate[P, DepsT]
49
+ | MessageTemplate[P]
50
+ | AsyncMessageTemplate[P],
51
+ ) -> TypeIs[ContextMessageTemplate[P, DepsT] | AsyncContextMessageTemplate[P, DepsT]]:
49
52
  """Type guard to check if a function is a context promptable function."""
50
53
  return _context_utils.first_param_is_context(fn)
51
54
 
52
55
 
53
56
  def is_async_promptable(
54
- fn: ContextPromptable[P, DepsT]
55
- | AsyncContextPromptable[P, DepsT]
56
- | Promptable[P]
57
- | AsyncPromptable[P],
58
- ) -> TypeIs[AsyncPromptable[P] | AsyncContextPromptable[P, DepsT]]:
57
+ fn: ContextMessageTemplate[P, DepsT]
58
+ | AsyncContextMessageTemplate[P, DepsT]
59
+ | MessageTemplate[P]
60
+ | AsyncMessageTemplate[P],
61
+ ) -> TypeIs[AsyncMessageTemplate[P] | AsyncContextMessageTemplate[P, DepsT]]:
59
62
  """Type guard to check if a function is an async promptable function."""
60
63
  return inspect.iscoroutinefunction(fn)