mirascope 2.0.0a1__py3-none-any.whl → 2.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (205) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +85 -0
  5. mirascope/api/_generated/client.py +155 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +7 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/health/__init__.py +7 -0
  27. mirascope/api/_generated/health/client.py +96 -0
  28. mirascope/api/_generated/health/raw_client.py +129 -0
  29. mirascope/api/_generated/health/types/__init__.py +8 -0
  30. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  31. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  32. mirascope/api/_generated/reference.md +167 -0
  33. mirascope/api/_generated/traces/__init__.py +55 -0
  34. mirascope/api/_generated/traces/client.py +162 -0
  35. mirascope/api/_generated/traces/raw_client.py +168 -0
  36. mirascope/api/_generated/traces/types/__init__.py +95 -0
  37. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  38. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  39. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  40. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  41. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  42. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  43. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  44. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  45. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  46. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  47. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  48. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  49. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  50. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  51. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  52. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  53. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  54. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  55. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  56. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  57. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  58. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  59. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  60. mirascope/api/_generated/types/__init__.py +21 -0
  61. mirascope/api/_generated/types/http_api_decode_error.py +31 -0
  62. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  63. mirascope/api/_generated/types/issue.py +44 -0
  64. mirascope/api/_generated/types/issue_tag.py +17 -0
  65. mirascope/api/_generated/types/property_key.py +7 -0
  66. mirascope/api/_generated/types/property_key_tag.py +29 -0
  67. mirascope/api/_generated/types/property_key_tag_tag.py +5 -0
  68. mirascope/api/client.py +255 -0
  69. mirascope/api/settings.py +81 -0
  70. mirascope/llm/__init__.py +41 -11
  71. mirascope/llm/calls/calls.py +81 -57
  72. mirascope/llm/calls/decorator.py +121 -115
  73. mirascope/llm/content/__init__.py +3 -2
  74. mirascope/llm/context/_utils.py +19 -6
  75. mirascope/llm/exceptions.py +30 -16
  76. mirascope/llm/formatting/_utils.py +9 -5
  77. mirascope/llm/formatting/format.py +2 -2
  78. mirascope/llm/formatting/from_call_args.py +2 -2
  79. mirascope/llm/messages/message.py +13 -5
  80. mirascope/llm/models/__init__.py +2 -2
  81. mirascope/llm/models/models.py +189 -81
  82. mirascope/llm/prompts/__init__.py +13 -12
  83. mirascope/llm/prompts/_utils.py +27 -24
  84. mirascope/llm/prompts/decorator.py +133 -204
  85. mirascope/llm/prompts/prompts.py +424 -0
  86. mirascope/llm/prompts/protocols.py +25 -59
  87. mirascope/llm/providers/__init__.py +38 -0
  88. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  89. mirascope/llm/providers/anthropic/__init__.py +24 -0
  90. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +5 -4
  91. mirascope/llm/{clients → providers}/anthropic/_utils/encode.py +31 -10
  92. mirascope/llm/providers/anthropic/model_id.py +40 -0
  93. mirascope/llm/{clients/anthropic/clients.py → providers/anthropic/provider.py} +33 -418
  94. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  95. mirascope/llm/{clients → providers}/base/_utils.py +10 -7
  96. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  97. mirascope/llm/providers/google/__init__.py +21 -0
  98. mirascope/llm/{clients → providers}/google/_utils/decode.py +6 -4
  99. mirascope/llm/{clients → providers}/google/_utils/encode.py +30 -24
  100. mirascope/llm/providers/google/model_id.py +28 -0
  101. mirascope/llm/providers/google/provider.py +438 -0
  102. mirascope/llm/providers/load_provider.py +48 -0
  103. mirascope/llm/providers/mlx/__init__.py +24 -0
  104. mirascope/llm/providers/mlx/_utils.py +107 -0
  105. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  106. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  107. mirascope/llm/providers/mlx/encoding/transformers.py +131 -0
  108. mirascope/llm/providers/mlx/mlx.py +237 -0
  109. mirascope/llm/providers/mlx/model_id.py +17 -0
  110. mirascope/llm/providers/mlx/provider.py +411 -0
  111. mirascope/llm/providers/model_id.py +16 -0
  112. mirascope/llm/providers/openai/__init__.py +6 -0
  113. mirascope/llm/providers/openai/completions/__init__.py +20 -0
  114. mirascope/llm/{clients/openai/responses → providers/openai/completions}/_utils/__init__.py +2 -0
  115. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +5 -3
  116. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +33 -23
  117. mirascope/llm/providers/openai/completions/provider.py +456 -0
  118. mirascope/llm/providers/openai/model_id.py +31 -0
  119. mirascope/llm/providers/openai/model_info.py +246 -0
  120. mirascope/llm/providers/openai/provider.py +386 -0
  121. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  122. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +5 -3
  123. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +28 -17
  124. mirascope/llm/providers/openai/responses/provider.py +470 -0
  125. mirascope/llm/{clients → providers}/openai/shared/_utils.py +7 -3
  126. mirascope/llm/providers/provider_id.py +13 -0
  127. mirascope/llm/providers/provider_registry.py +167 -0
  128. mirascope/llm/responses/base_response.py +10 -5
  129. mirascope/llm/responses/base_stream_response.py +10 -5
  130. mirascope/llm/responses/response.py +24 -13
  131. mirascope/llm/responses/root_response.py +7 -12
  132. mirascope/llm/responses/stream_response.py +35 -23
  133. mirascope/llm/tools/__init__.py +9 -2
  134. mirascope/llm/tools/_utils.py +12 -3
  135. mirascope/llm/tools/decorator.py +10 -10
  136. mirascope/llm/tools/protocols.py +4 -4
  137. mirascope/llm/tools/tool_schema.py +44 -9
  138. mirascope/llm/tools/tools.py +12 -11
  139. mirascope/ops/__init__.py +156 -0
  140. mirascope/ops/_internal/__init__.py +5 -0
  141. mirascope/ops/_internal/closure.py +1118 -0
  142. mirascope/ops/_internal/configuration.py +126 -0
  143. mirascope/ops/_internal/context.py +76 -0
  144. mirascope/ops/_internal/exporters/__init__.py +26 -0
  145. mirascope/ops/_internal/exporters/exporters.py +342 -0
  146. mirascope/ops/_internal/exporters/processors.py +104 -0
  147. mirascope/ops/_internal/exporters/types.py +165 -0
  148. mirascope/ops/_internal/exporters/utils.py +29 -0
  149. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  150. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  151. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  152. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  153. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  154. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  155. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  156. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  157. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  158. mirascope/ops/_internal/propagation.py +198 -0
  159. mirascope/ops/_internal/protocols.py +51 -0
  160. mirascope/ops/_internal/session.py +139 -0
  161. mirascope/ops/_internal/spans.py +232 -0
  162. mirascope/ops/_internal/traced_calls.py +371 -0
  163. mirascope/ops/_internal/traced_functions.py +394 -0
  164. mirascope/ops/_internal/tracing.py +276 -0
  165. mirascope/ops/_internal/types.py +13 -0
  166. mirascope/ops/_internal/utils.py +75 -0
  167. mirascope/ops/_internal/versioned_calls.py +512 -0
  168. mirascope/ops/_internal/versioned_functions.py +346 -0
  169. mirascope/ops/_internal/versioning.py +303 -0
  170. mirascope/ops/exceptions.py +21 -0
  171. {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/METADATA +77 -1
  172. mirascope-2.0.0a3.dist-info/RECORD +206 -0
  173. {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/WHEEL +1 -1
  174. mirascope/graphs/__init__.py +0 -22
  175. mirascope/graphs/finite_state_machine.py +0 -625
  176. mirascope/llm/agents/__init__.py +0 -15
  177. mirascope/llm/agents/agent.py +0 -97
  178. mirascope/llm/agents/agent_template.py +0 -45
  179. mirascope/llm/agents/decorator.py +0 -176
  180. mirascope/llm/calls/base_call.py +0 -33
  181. mirascope/llm/clients/__init__.py +0 -34
  182. mirascope/llm/clients/anthropic/__init__.py +0 -25
  183. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  184. mirascope/llm/clients/google/__init__.py +0 -20
  185. mirascope/llm/clients/google/clients.py +0 -853
  186. mirascope/llm/clients/google/model_ids.py +0 -15
  187. mirascope/llm/clients/openai/__init__.py +0 -25
  188. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  189. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  190. mirascope/llm/clients/openai/completions/clients.py +0 -833
  191. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  192. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  193. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  194. mirascope/llm/clients/openai/responses/clients.py +0 -832
  195. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  196. mirascope/llm/clients/providers.py +0 -175
  197. mirascope-2.0.0a1.dist-info/RECORD +0 -102
  198. /mirascope/llm/{clients → providers}/anthropic/_utils/__init__.py +0 -0
  199. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  200. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  201. /mirascope/llm/{clients → providers}/google/_utils/__init__.py +0 -0
  202. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  203. /mirascope/llm/{clients/openai/completions → providers/openai/responses}/_utils/__init__.py +0 -0
  204. /mirascope/llm/{clients → providers}/openai/shared/__init__.py +0 -0
  205. {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,167 @@
1
+ """Provider registry for managing provider instances and scopes."""
2
+
3
+ from typing import overload
4
+
5
+ from ..exceptions import NoRegisteredProviderError
6
+ from .base import Provider
7
+ from .load_provider import load_provider
8
+ from .provider_id import ProviderId
9
+
10
+ # Global registry mapping scopes to providers
11
+ # Scopes are matched by prefix (longest match wins)
12
+ PROVIDER_REGISTRY: dict[str, Provider] = {}
13
+
14
+ # Default auto-registration mapping for built-in providers
15
+ # These providers will be automatically registered on first use
16
+ DEFAULT_AUTO_REGISTER_SCOPES: dict[str, ProviderId] = {
17
+ "anthropic/": "anthropic",
18
+ "google/": "google",
19
+ "openai/": "openai",
20
+ "mlx-community/": "mlx",
21
+ }
22
+
23
+
24
+ @overload
25
+ def register_provider(
26
+ provider: Provider,
27
+ scope: str | list[str] | None = None,
28
+ ) -> Provider:
29
+ """Register a provider instance with scope(s).
30
+
31
+ Args:
32
+ provider: Provider instance to register.
33
+ scope: Scope string or list of scopes (e.g., "anthropic/", ["anthropic/", "openai/"]).
34
+ If None, uses the provider's default_scope.
35
+ """
36
+ ...
37
+
38
+
39
+ @overload
40
+ def register_provider(
41
+ provider: ProviderId,
42
+ scope: str | list[str] | None = None,
43
+ *,
44
+ api_key: str | None = None,
45
+ base_url: str | None = None,
46
+ ) -> Provider:
47
+ """Register a provider by ID with scope(s).
48
+
49
+ Args:
50
+ provider: Provider ID string (e.g., "anthropic", "openai").
51
+ scope: Scope string or list of scopes (e.g., "anthropic/", ["anthropic/", "openai/"]).
52
+ If None, uses the provider's default_scope.
53
+ api_key: API key for authentication.
54
+ base_url: Base URL for the API.
55
+ """
56
+ ...
57
+
58
+
59
+ def register_provider(
60
+ provider: ProviderId | Provider,
61
+ scope: str | list[str] | None = None,
62
+ *,
63
+ api_key: str | None = None,
64
+ base_url: str | None = None,
65
+ ) -> Provider:
66
+ """Register a provider with scope(s) in the global registry.
67
+
68
+ Scopes use prefix matching on model IDs:
69
+ - "anthropic/" matches "anthropic/*"
70
+ - "anthropic/claude-4-5" matches "anthropic/claude-4-5*"
71
+ - "anthropic/claude-4-5-sonnet" matches exactly "anthropic/claude-4-5-sonnet"
72
+
73
+ When multiple scopes match a model_id, the longest match wins.
74
+
75
+ Args:
76
+ provider: Either a provider ID string or a provider instance.
77
+ scope: Scope string or list of scopes for prefix matching on model IDs.
78
+ If None, uses the provider's default_scope attribute.
79
+ Can be a single string or a list of strings.
80
+ api_key: API key for authentication (only used if provider is a string).
81
+ base_url: Base URL for the API (only used if provider is a string).
82
+
83
+ Example:
84
+ ```python
85
+ # Register with default scope
86
+ llm.register_provider("anthropic", api_key="key")
87
+
88
+ # Register for specific models
89
+ llm.register_provider("openai", scope="openai/gpt-4")
90
+
91
+ # Register for multiple scopes
92
+ llm.register_provider("aws-bedrock", scope=["anthropic/", "openai/"])
93
+
94
+ # Register a custom instance
95
+ custom = llm.providers.AnthropicProvider(api_key="team-key")
96
+ llm.register_provider(custom, scope="anthropic/claude-4-5-sonnet")
97
+ ```
98
+ """
99
+
100
+ if isinstance(provider, str):
101
+ provider = load_provider(provider, api_key=api_key, base_url=base_url)
102
+
103
+ if scope is None:
104
+ scope = provider.default_scope
105
+
106
+ scopes = [scope] if isinstance(scope, str) else scope
107
+ for s in scopes:
108
+ PROVIDER_REGISTRY[s] = provider
109
+
110
+ return provider
111
+
112
+
113
+ def get_provider_for_model(model_id: str) -> Provider:
114
+ """Get the provider for a model_id based on the registry.
115
+
116
+ Uses longest prefix matching to find the most specific provider for the model.
117
+ If no explicit registration is found, checks for auto-registration defaults
118
+ and automatically registers the provider on first use.
119
+
120
+ Args:
121
+ model_id: The full model ID (e.g., "anthropic/claude-4-5-sonnet").
122
+
123
+ Returns:
124
+ The provider instance registered for this model.
125
+
126
+ Raises:
127
+ ValueError: If no provider is registered or available for this model.
128
+
129
+ Example:
130
+ ```python
131
+ # Assuming providers are registered:
132
+ # - "anthropic/" -> AnthropicProvider()
133
+ # - "anthropic/claude-4-5-sonnet" -> CustomProvider()
134
+
135
+ provider = get_provider_for_model("anthropic/claude-4-5-sonnet")
136
+ # Returns CustomProvider (longest match)
137
+
138
+ provider = get_provider_for_model("anthropic/claude-3-opus")
139
+ # Returns AnthropicProvider (matches "anthropic/" prefix)
140
+
141
+ # Auto-registration on first use:
142
+ provider = get_provider_for_model("openai/gpt-4")
143
+ # Automatically loads and registers OpenAIProvider() for "openai/"
144
+ ```
145
+ """
146
+ # Try explicit registry first (longest match wins)
147
+ matching_scopes = [
148
+ scope for scope in PROVIDER_REGISTRY if model_id.startswith(scope)
149
+ ]
150
+ if matching_scopes:
151
+ best_scope = max(matching_scopes, key=len)
152
+ return PROVIDER_REGISTRY[best_scope]
153
+
154
+ # Fall back to auto-registration
155
+ matching_defaults = [
156
+ scope for scope in DEFAULT_AUTO_REGISTER_SCOPES if model_id.startswith(scope)
157
+ ]
158
+ if matching_defaults:
159
+ best_scope = max(matching_defaults, key=len)
160
+ provider_id = DEFAULT_AUTO_REGISTER_SCOPES[best_scope]
161
+ provider = load_provider(provider_id)
162
+ # Auto-register for future calls
163
+ PROVIDER_REGISTRY[best_scope] = provider
164
+ return provider
165
+
166
+ # No provider found
167
+ raise NoRegisteredProviderError(model_id)
@@ -11,7 +11,7 @@ from .finish_reason import FinishReason
11
11
  from .root_response import RootResponse
12
12
 
13
13
  if TYPE_CHECKING:
14
- from ..clients import ModelId, Params, Provider
14
+ from ..providers import ModelId, Params, ProviderId
15
15
 
16
16
 
17
17
  class BaseResponse(RootResponse[ToolkitT, FormattableT]):
@@ -21,8 +21,9 @@ class BaseResponse(RootResponse[ToolkitT, FormattableT]):
21
21
  self,
22
22
  *,
23
23
  raw: Any, # noqa: ANN401
24
- provider: "Provider",
24
+ provider_id: "ProviderId",
25
25
  model_id: "ModelId",
26
+ provider_model_name: str,
26
27
  params: "Params",
27
28
  toolkit: ToolkitT,
28
29
  format: Format[FormattableT] | None = None,
@@ -34,8 +35,10 @@ class BaseResponse(RootResponse[ToolkitT, FormattableT]):
34
35
 
35
36
  Args:
36
37
  raw: The raw response from the LLM.
37
- provider: The provider name (e.g. "anthropic", "openai:completions").
38
+ provider: The provider name (e.g. "anthropic", "openai").
38
39
  model_id: The model identifier that generated the response.
40
+ provider_model_name: Optional provider-specific model name. May include
41
+ provider-specific additional info (like api mode in "gpt-5:responses").
39
42
  params: The params used to generate the response (or None).
40
43
  toolkit: Toolkit containing all the tools used to generate the response.
41
44
  format: The `Format` for the expected structured output format (or None).
@@ -44,8 +47,9 @@ class BaseResponse(RootResponse[ToolkitT, FormattableT]):
44
47
  finish_reason: The reason why the LLM finished generating a response.
45
48
  """
46
49
  self.raw = raw
47
- self.provider = provider
50
+ self.provider_id = provider_id
48
51
  self.model_id = model_id
52
+ self.provider_model_name = provider_model_name
49
53
  self.params = params
50
54
  self.toolkit = toolkit
51
55
  self.finish_reason = finish_reason
@@ -84,8 +88,9 @@ class BaseResponse(RootResponse[ToolkitT, FormattableT]):
84
88
  assistant_message = AssistantMessage(
85
89
  content=self.content,
86
90
  name=assistant_message.name,
87
- provider=assistant_message.provider,
91
+ provider_id=assistant_message.provider_id,
88
92
  model_id=assistant_message.model_id,
93
+ provider_model_name=assistant_message.provider_model_name,
89
94
  raw_message=assistant_message.raw_message,
90
95
  )
91
96
  self.messages = list(input_messages) + [assistant_message]
@@ -38,7 +38,7 @@ from .streams import (
38
38
  )
39
39
 
40
40
  if TYPE_CHECKING:
41
- from ..clients import ModelId, Params, Provider
41
+ from ..providers import ModelId, Params, ProviderId
42
42
 
43
43
 
44
44
  @dataclass(kw_only=True)
@@ -157,8 +157,9 @@ class BaseStreamResponse(
157
157
  def __init__(
158
158
  self,
159
159
  *,
160
- provider: "Provider",
160
+ provider_id: "ProviderId",
161
161
  model_id: "ModelId",
162
+ provider_model_name: str,
162
163
  params: "Params",
163
164
  toolkit: ToolkitT,
164
165
  format: Format[FormattableT] | None = None,
@@ -168,8 +169,10 @@ class BaseStreamResponse(
168
169
  """Initialize the BaseStreamResponse.
169
170
 
170
171
  Args:
171
- provider: The provider name (e.g. "anthropic", "openai:completions").
172
+ provider: The provider name (e.g. "anthropic", "openai").
172
173
  model_id: The model identifier that generated the response.
174
+ provider_model_name: Optional provider-specific model name. May include
175
+ provider-specific additional info (like api mode in "gpt-5:responses").
173
176
  params: The params used to generate the response (or None).
174
177
  toolkit: Toolkit containing all the tools used to generate the response.
175
178
  format: The `Format` for the expected structured output format (or None).
@@ -179,8 +182,9 @@ class BaseStreamResponse(
179
182
  as the stream is consumed.
180
183
  """
181
184
 
182
- self.provider = provider
185
+ self.provider_id = provider_id
183
186
  self.model_id = model_id
187
+ self.provider_model_name = provider_model_name
184
188
  self.params = params
185
189
  self.toolkit = toolkit
186
190
  self.format = format
@@ -206,8 +210,9 @@ class BaseStreamResponse(
206
210
 
207
211
  self._assistant_message = AssistantMessage(
208
212
  content=self._content,
209
- provider=provider,
213
+ provider_id=provider_id,
210
214
  model_id=model_id,
215
+ provider_model_name=provider_model_name,
211
216
  raw_message=None,
212
217
  )
213
218
 
@@ -18,11 +18,12 @@ from ..tools import (
18
18
  Tool,
19
19
  Toolkit,
20
20
  )
21
+ from ..types import Jsonable
21
22
  from .base_response import BaseResponse
22
23
  from .finish_reason import FinishReason
23
24
 
24
25
  if TYPE_CHECKING:
25
- from ..clients import ModelId, Params, Provider
26
+ from ..providers import ModelId, Params, ProviderId
26
27
 
27
28
 
28
29
  class Response(BaseResponse[Toolkit, FormattableT]):
@@ -32,8 +33,9 @@ class Response(BaseResponse[Toolkit, FormattableT]):
32
33
  self,
33
34
  *,
34
35
  raw: Any, # noqa: ANN401
35
- provider: "Provider",
36
+ provider_id: "ProviderId",
36
37
  model_id: "ModelId",
38
+ provider_model_name: str,
37
39
  params: "Params",
38
40
  tools: Sequence[Tool] | Toolkit | None = None,
39
41
  format: Format[FormattableT] | None = None,
@@ -45,8 +47,9 @@ class Response(BaseResponse[Toolkit, FormattableT]):
45
47
  toolkit = tools if isinstance(tools, Toolkit) else Toolkit(tools=tools)
46
48
  super().__init__(
47
49
  raw=raw,
48
- provider=provider,
50
+ provider_id=provider_id,
49
51
  model_id=model_id,
52
+ provider_model_name=provider_model_name,
50
53
  params=params,
51
54
  toolkit=toolkit,
52
55
  format=format,
@@ -55,7 +58,7 @@ class Response(BaseResponse[Toolkit, FormattableT]):
55
58
  finish_reason=finish_reason,
56
59
  )
57
60
 
58
- def execute_tools(self) -> Sequence[ToolOutput]:
61
+ def execute_tools(self) -> Sequence[ToolOutput[Jsonable]]:
59
62
  """Execute and return all of the tool calls in the response.
60
63
 
61
64
  Returns:
@@ -101,8 +104,9 @@ class AsyncResponse(BaseResponse[AsyncToolkit, FormattableT]):
101
104
  self,
102
105
  *,
103
106
  raw: Any, # noqa: ANN401
104
- provider: "Provider",
107
+ provider_id: "ProviderId",
105
108
  model_id: "ModelId",
109
+ provider_model_name: str,
106
110
  params: "Params",
107
111
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
108
112
  format: Format[FormattableT] | None = None,
@@ -116,8 +120,9 @@ class AsyncResponse(BaseResponse[AsyncToolkit, FormattableT]):
116
120
  )
117
121
  super().__init__(
118
122
  raw=raw,
119
- provider=provider,
123
+ provider_id=provider_id,
120
124
  model_id=model_id,
125
+ provider_model_name=provider_model_name,
121
126
  params=params,
122
127
  toolkit=toolkit,
123
128
  format=format,
@@ -126,7 +131,7 @@ class AsyncResponse(BaseResponse[AsyncToolkit, FormattableT]):
126
131
  finish_reason=finish_reason,
127
132
  )
128
133
 
129
- async def execute_tools(self) -> Sequence[ToolOutput]:
134
+ async def execute_tools(self) -> Sequence[ToolOutput[Jsonable]]:
130
135
  """Execute and return all of the tool calls in the response.
131
136
 
132
137
  Returns:
@@ -179,8 +184,9 @@ class ContextResponse(
179
184
  self,
180
185
  *,
181
186
  raw: Any, # noqa: ANN401
182
- provider: "Provider",
187
+ provider_id: "ProviderId",
183
188
  model_id: "ModelId",
189
+ provider_model_name: str,
184
190
  params: "Params",
185
191
  tools: Sequence[Tool | ContextTool[DepsT]]
186
192
  | ContextToolkit[DepsT]
@@ -196,8 +202,9 @@ class ContextResponse(
196
202
  )
197
203
  super().__init__(
198
204
  raw=raw,
199
- provider=provider,
205
+ provider_id=provider_id,
200
206
  model_id=model_id,
207
+ provider_model_name=provider_model_name,
201
208
  params=params,
202
209
  toolkit=toolkit,
203
210
  format=format,
@@ -206,7 +213,7 @@ class ContextResponse(
206
213
  finish_reason=finish_reason,
207
214
  )
208
215
 
209
- def execute_tools(self, ctx: Context[DepsT]) -> Sequence[ToolOutput]:
216
+ def execute_tools(self, ctx: Context[DepsT]) -> Sequence[ToolOutput[Jsonable]]:
210
217
  """Execute and return all of the tool calls in the response.
211
218
 
212
219
  Args:
@@ -265,8 +272,9 @@ class AsyncContextResponse(
265
272
  self,
266
273
  *,
267
274
  raw: Any, # noqa: ANN401
268
- provider: "Provider",
275
+ provider_id: "ProviderId",
269
276
  model_id: "ModelId",
277
+ provider_model_name: str,
270
278
  params: "Params",
271
279
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
272
280
  | AsyncContextToolkit[DepsT]
@@ -284,8 +292,9 @@ class AsyncContextResponse(
284
292
  )
285
293
  super().__init__(
286
294
  raw=raw,
287
- provider=provider,
295
+ provider_id=provider_id,
288
296
  model_id=model_id,
297
+ provider_model_name=provider_model_name,
289
298
  params=params,
290
299
  toolkit=toolkit,
291
300
  format=format,
@@ -294,7 +303,9 @@ class AsyncContextResponse(
294
303
  finish_reason=finish_reason,
295
304
  )
296
305
 
297
- async def execute_tools(self, ctx: Context[DepsT]) -> Sequence[ToolOutput]:
306
+ async def execute_tools(
307
+ self, ctx: Context[DepsT]
308
+ ) -> Sequence[ToolOutput[Jsonable]]:
298
309
  """Execute and return all of the tool calls in the response.
299
310
 
300
311
  Args:
@@ -13,8 +13,8 @@ from . import _utils
13
13
  from .finish_reason import FinishReason
14
14
 
15
15
  if TYPE_CHECKING:
16
- from ..clients import ModelId, Params, Provider
17
16
  from ..models import Model
17
+ from ..providers import ModelId, Params, ProviderId
18
18
 
19
19
 
20
20
  class RootResponse(Generic[ToolkitT, FormattableT], ABC):
@@ -23,7 +23,7 @@ class RootResponse(Generic[ToolkitT, FormattableT], ABC):
23
23
  raw: Any
24
24
  """The raw response from the LLM."""
25
25
 
26
- provider: "Provider"
26
+ provider_id: "ProviderId"
27
27
  """The provider that generated this response."""
28
28
 
29
29
  model_id: "ModelId"
@@ -116,7 +116,9 @@ class RootResponse(Generic[ToolkitT, FormattableT], ABC):
116
116
  return None
117
117
 
118
118
  formattable = self.format.formattable
119
- if formattable is None or formattable is NoneType:
119
+ if formattable is None or formattable is NoneType: # pyright: ignore[reportUnnecessaryComparison]
120
+ # note: pyright claims the None comparison is unnecessary, but removing it
121
+ # introduces type errors.
120
122
  return None # pragma: no cover
121
123
 
122
124
  if partial:
@@ -165,13 +167,6 @@ class RootResponse(Generic[ToolkitT, FormattableT], ABC):
165
167
  @property
166
168
  def model(self) -> "Model":
167
169
  """A `Model` with parameters matching this response."""
168
- from ..models import Model, get_model_from_context
170
+ from ..models import use_model # Dynamic import to avoid circular dependency
169
171
 
170
- if context_model := get_model_from_context():
171
- return context_model
172
-
173
- return Model(
174
- provider=self.provider,
175
- model_id=self.model_id,
176
- **self.params,
177
- )
172
+ return use_model(self.model_id, **self.params)
@@ -18,6 +18,7 @@ from ..tools import (
18
18
  Tool,
19
19
  Toolkit,
20
20
  )
21
+ from ..types import Jsonable
21
22
  from .base_stream_response import (
22
23
  AsyncChunkIterator,
23
24
  BaseAsyncStreamResponse,
@@ -26,7 +27,7 @@ from .base_stream_response import (
26
27
  )
27
28
 
28
29
  if TYPE_CHECKING:
29
- from ..clients import ModelId, Params, Provider
30
+ from ..providers import ModelId, Params, ProviderId
30
31
 
31
32
 
32
33
  class StreamResponse(BaseSyncStreamResponse[Toolkit, FormattableT]):
@@ -76,8 +77,8 @@ class StreamResponse(BaseSyncStreamResponse[Toolkit, FormattableT]):
76
77
  from mirascope import llm
77
78
 
78
79
  @llm.call(
79
- provider="openai:completions",
80
- model_id="gpt-4o-mini",
80
+ provider_id="openai",
81
+ model_id="openai/gpt-5-mini",
81
82
  )
82
83
  def answer_question(question: str) -> str:
83
84
  return f"Answer this question: {question}"
@@ -93,8 +94,9 @@ class StreamResponse(BaseSyncStreamResponse[Toolkit, FormattableT]):
93
94
  def __init__(
94
95
  self,
95
96
  *,
96
- provider: "Provider",
97
+ provider_id: "ProviderId",
97
98
  model_id: "ModelId",
99
+ provider_model_name: str,
98
100
  params: "Params",
99
101
  tools: Sequence[Tool] | Toolkit | None = None,
100
102
  format: Format[FormattableT] | None = None,
@@ -104,8 +106,9 @@ class StreamResponse(BaseSyncStreamResponse[Toolkit, FormattableT]):
104
106
  """Initialize a `StreamResponse`."""
105
107
  toolkit = tools if isinstance(tools, Toolkit) else Toolkit(tools=tools)
106
108
  super().__init__(
107
- provider=provider,
109
+ provider_id=provider_id,
108
110
  model_id=model_id,
111
+ provider_model_name=provider_model_name,
109
112
  params=params,
110
113
  toolkit=toolkit,
111
114
  format=format,
@@ -113,7 +116,7 @@ class StreamResponse(BaseSyncStreamResponse[Toolkit, FormattableT]):
113
116
  chunk_iterator=chunk_iterator,
114
117
  )
115
118
 
116
- def execute_tools(self) -> Sequence[ToolOutput]:
119
+ def execute_tools(self) -> Sequence[ToolOutput[Jsonable]]:
117
120
  """Execute and return all of the tool calls in the response.
118
121
 
119
122
  Returns:
@@ -201,8 +204,8 @@ class AsyncStreamResponse(BaseAsyncStreamResponse[AsyncToolkit, FormattableT]):
201
204
  from mirascope import llm
202
205
 
203
206
  @llm.call(
204
- provider="openai:completions",
205
- model_id="gpt-4o-mini",
207
+ provider_id="openai",
208
+ model_id="openai/gpt-5-mini",
206
209
  )
207
210
  async def answer_question(question: str) -> str:
208
211
  return f"Answer this question: {question}"
@@ -218,8 +221,9 @@ class AsyncStreamResponse(BaseAsyncStreamResponse[AsyncToolkit, FormattableT]):
218
221
  def __init__(
219
222
  self,
220
223
  *,
221
- provider: "Provider",
224
+ provider_id: "ProviderId",
222
225
  model_id: "ModelId",
226
+ provider_model_name: str,
223
227
  params: "Params",
224
228
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
225
229
  format: Format[FormattableT] | None = None,
@@ -231,8 +235,9 @@ class AsyncStreamResponse(BaseAsyncStreamResponse[AsyncToolkit, FormattableT]):
231
235
  tools if isinstance(tools, AsyncToolkit) else AsyncToolkit(tools=tools)
232
236
  )
233
237
  super().__init__(
234
- provider=provider,
238
+ provider_id=provider_id,
235
239
  model_id=model_id,
240
+ provider_model_name=provider_model_name,
236
241
  params=params,
237
242
  toolkit=toolkit,
238
243
  format=format,
@@ -240,7 +245,7 @@ class AsyncStreamResponse(BaseAsyncStreamResponse[AsyncToolkit, FormattableT]):
240
245
  chunk_iterator=chunk_iterator,
241
246
  )
242
247
 
243
- async def execute_tools(self) -> Sequence[ToolOutput]:
248
+ async def execute_tools(self) -> Sequence[ToolOutput[Jsonable]]:
244
249
  """Execute and return all of the tool calls in the response.
245
250
 
246
251
  Returns:
@@ -285,7 +290,8 @@ class AsyncStreamResponse(BaseAsyncStreamResponse[AsyncToolkit, FormattableT]):
285
290
 
286
291
 
287
292
  class ContextStreamResponse(
288
- BaseSyncStreamResponse[ContextToolkit, FormattableT], Generic[DepsT, FormattableT]
293
+ BaseSyncStreamResponse[ContextToolkit[DepsT], FormattableT],
294
+ Generic[DepsT, FormattableT],
289
295
  ):
290
296
  """A `ContextStreamResponse` wraps response content from the LLM with a streaming interface.
291
297
 
@@ -333,8 +339,8 @@ class ContextStreamResponse(
333
339
  from mirascope import llm
334
340
 
335
341
  @llm.call(
336
- provider="openai:completions",
337
- model_id="gpt-4o-mini",
342
+ provider_id="openai",
343
+ model_id="openai/gpt-5-mini",
338
344
  )
339
345
  def answer_question(ctx: llm.Context, question: str) -> str:
340
346
  return f"Answer this question: {question}"
@@ -351,8 +357,9 @@ class ContextStreamResponse(
351
357
  def __init__(
352
358
  self,
353
359
  *,
354
- provider: "Provider",
360
+ provider_id: "ProviderId",
355
361
  model_id: "ModelId",
362
+ provider_model_name: str,
356
363
  params: "Params",
357
364
  tools: Sequence[Tool | ContextTool[DepsT]]
358
365
  | ContextToolkit[DepsT]
@@ -366,8 +373,9 @@ class ContextStreamResponse(
366
373
  tools if isinstance(tools, ContextToolkit) else ContextToolkit(tools=tools)
367
374
  )
368
375
  super().__init__(
369
- provider=provider,
376
+ provider_id=provider_id,
370
377
  model_id=model_id,
378
+ provider_model_name=provider_model_name,
371
379
  params=params,
372
380
  toolkit=toolkit,
373
381
  format=format,
@@ -375,7 +383,7 @@ class ContextStreamResponse(
375
383
  chunk_iterator=chunk_iterator,
376
384
  )
377
385
 
378
- def execute_tools(self, ctx: Context[DepsT]) -> Sequence[ToolOutput]:
386
+ def execute_tools(self, ctx: Context[DepsT]) -> Sequence[ToolOutput[Jsonable]]:
379
387
  """Execute and return all of the tool calls in the response.
380
388
 
381
389
  Args:
@@ -426,7 +434,7 @@ class ContextStreamResponse(
426
434
 
427
435
 
428
436
  class AsyncContextStreamResponse(
429
- BaseAsyncStreamResponse[AsyncContextToolkit, FormattableT],
437
+ BaseAsyncStreamResponse[AsyncContextToolkit[DepsT], FormattableT],
430
438
  Generic[DepsT, FormattableT],
431
439
  ):
432
440
  """An `AsyncContextStreamResponse` wraps response content from the LLM with a streaming interface.
@@ -475,8 +483,8 @@ class AsyncContextStreamResponse(
475
483
  from mirascope import llm
476
484
 
477
485
  @llm.call(
478
- provider="openai:completions",
479
- model_id="gpt-4o-mini",
486
+ provider_id="openai",
487
+ model_id="openai/gpt-5-mini",
480
488
  )
481
489
  async def answer_question(ctx: llm.Context, question: str) -> str:
482
490
  return f"Answer this question: {question}"
@@ -493,8 +501,9 @@ class AsyncContextStreamResponse(
493
501
  def __init__(
494
502
  self,
495
503
  *,
496
- provider: "Provider",
504
+ provider_id: "ProviderId",
497
505
  model_id: "ModelId",
506
+ provider_model_name: str,
498
507
  params: "Params",
499
508
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
500
509
  | AsyncContextToolkit[DepsT]
@@ -510,8 +519,9 @@ class AsyncContextStreamResponse(
510
519
  else AsyncContextToolkit(tools=tools)
511
520
  )
512
521
  super().__init__(
513
- provider=provider,
522
+ provider_id=provider_id,
514
523
  model_id=model_id,
524
+ provider_model_name=provider_model_name,
515
525
  params=params,
516
526
  toolkit=toolkit,
517
527
  format=format,
@@ -519,7 +529,9 @@ class AsyncContextStreamResponse(
519
529
  chunk_iterator=chunk_iterator,
520
530
  )
521
531
 
522
- async def execute_tools(self, ctx: Context[DepsT]) -> Sequence[ToolOutput]:
532
+ async def execute_tools(
533
+ self, ctx: Context[DepsT]
534
+ ) -> Sequence[ToolOutput[Jsonable]]:
523
535
  """Execute and return all of the tool calls in the response.
524
536
 
525
537
  Args: